diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
78 files changed, 4838 insertions, 3387 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 0bf3d47bb90d..4a8013f20152 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2557,7 +2557,9 @@ static int e100_set_eeprom(struct net_device *netdev, } static void e100_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nic *nic = netdev_priv(netdev); struct param_range *rfds = &nic->params.rfds; @@ -2570,7 +2572,9 @@ static void e100_get_ringparam(struct net_device *netdev, } static int e100_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nic *nic = netdev_priv(netdev); struct param_range *rfds = &nic->params.rfds; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 0a57172dfcbc..32803b0cf1e8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -539,7 +539,9 @@ static void e1000_get_drvinfo(struct net_device *netdev, } static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev, } static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 8515e00d1b40..b80ae9a82224 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -655,7 +655,9 @@ static void e1000_get_drvinfo(struct net_device *netdev, } static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -666,7 +668,9 @@ static void e1000_get_ringparam(struct net_device *netdev, } static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 44e2dc8328a2..635a95927e93 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; - /* flags reserved for future extensions - must be zero */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 0d37f011d0ce..d53369e30040 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data) } static void fm10k_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); @@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev, } static int fm10k_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 291e61ac3e44..2c1b1da1220e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (vsi->type != I40E_VSI_MAIN && + vsi->type != I40E_VSI_FDIR && + vsi->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "vsi %d type %d descriptor rings not available\n", + vsi_seid, vsi->type); + return; + } if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 513ba6974355..091f36adbbe1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1916,7 +1916,9 @@ static void i40e_get_drvinfo(struct net_device *netdev, } static void i40e_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -1944,7 +1946,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index) } static int i40e_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; struct i40e_netdev_priv *np = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 09b1d5aed1c9..61e5789d78db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 80ae264c99ba..2ea4deb8fc44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1949,6 +1949,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, } /** + * i40e_sync_vf_state + * @vf: pointer to the VF info + * @state: VF state + * + * Called from a VF message to synchronize the service with a potential + * VF reset state + **/ +static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) +{ + int i; + + /* When handling some messages, it needs VF state to be set. + * It is possible that this flag is cleared during VF reset, + * so there is a need to wait until the end of the reset to + * handle the request message correctly. + */ + for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { + if (test_bit(state, &vf->vf_states)) + return true; + usleep_range(10000, 20000); + } + + return test_bit(state, &vf->vf_states); +} + +/** * i40e_vc_get_version_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) bool allmulti = false; bool alluni = false; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi; u16 num_qps_all = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) return -EINVAL; if (req_pairs > I40E_MAX_VF_QUEUES) { @@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi = NULL; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || - (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + vrk->key_len != I40E_HKEY_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u16 i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || - (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u64 speed = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* set this flag only after making sure all inputs are sane */ vf->adq_enabled = true; - /* num_req_queues is set when user changes number of queues via ethtool - * and this causes issue for default VSI(which depends on this variable) - * when ADq is enabled, hence reset it. - */ - vf->num_req_queues = 0; /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); @@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 091e32c1bb46..49575a640a84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -18,6 +18,8 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 +#define I40E_VF_STATE_WAIT_COUNT 20 + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 75635bd57cf6..59806d1f7e79 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -55,7 +55,8 @@ enum iavf_vsi_state_t { struct iavf_vsi { struct iavf_adapter *back; struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)]; u16 seid; u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); @@ -137,14 +138,24 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; - bool is_new_mac; /* filter is new, wait for PF decision */ - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_mac:1; /* filter is new, wait for PF decision */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 is_primary:1; /* filter is a default VF MAC */ + u8 padding:4; + }; +}; + +#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid }) +struct iavf_vlan { + u16 vid; + u16 tpid; }; struct iavf_vlan_filter { struct list_head list; - u16 vlan; + struct iavf_vlan vlan; bool remove; /* filter needs to be removed */ bool add; /* filter needs to be added */ }; @@ -177,6 +188,8 @@ enum iavf_state_t { __IAVF_REMOVE, /* driver is being unloaded */ __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS, + __IAVF_INIT_CONFIG_ADAPTER, __IAVF_INIT_SW, /* got resources, setting up structs */ __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ @@ -274,37 +287,47 @@ struct iavf_adapter { /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ - u32 aq_required; -#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define IAVF_FLAG_AQ_GET_CONFIG BIT(10) + u64 aq_required; +#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define IAVF_FLAG_AQ_GET_HENA BIT(11) -#define IAVF_FLAG_AQ_SET_HENA BIT(12) -#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) -#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25) -#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) -#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) -#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) +#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) +#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) +#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) +#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22) +#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23) +#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24) +#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25) +#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26) +#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27) +#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29) +#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38) /* OS defined structs */ struct net_device *netdev; @@ -344,6 +367,14 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN_V2) +#define VLAN_V2_FILTERING_ALLOWED(_a) \ + (VLAN_V2_ALLOWED((_a)) && \ + ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ + (_a)->vlan_v2_caps.filtering.filtering_support.inner)) +#define VLAN_FILTERING_ALLOWED(_a) \ + (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a))) #define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_CAP_ADV_LINK_SPEED) #define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ @@ -355,6 +386,7 @@ struct iavf_adapter { struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ ((_a)->pf_version.minor == 1)) + struct virtchnl_vlan_caps vlan_v2_caps; u16 msg_enable; struct iavf_eth_stats current_stats; struct iavf_vsi vsi; @@ -443,7 +475,9 @@ static inline void iavf_change_state(struct iavf_adapter *adapter, int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -460,6 +494,9 @@ int iavf_send_api_ver(struct iavf_adapter *adapter); int iavf_verify_api_ver(struct iavf_adapter *adapter); int iavf_send_vf_config_msg(struct iavf_adapter *adapter); int iavf_get_vf_config(struct iavf_adapter *adapter); +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); void iavf_deconfigure_queues(struct iavf_adapter *adapter); @@ -495,10 +532,19 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features); void iavf_add_fdir_filter(struct iavf_adapter *adapter); void iavf_del_fdir_filter(struct iavf_adapter *adapter); void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 144a77679359..3bb56714beb0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev, **/ static int iavf_get_sset_count(struct net_device *netdev, int sset) { + /* Report the maximum number queues, even if not every queue is + * currently configured. Since allocation of queues is in pairs, + * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set + * at device creation and never changes. + */ + if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + - (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + (IAVF_QUEUE_STATS_LEN * 2 * + netdev->real_num_tx_queues); else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; else @@ -354,20 +361,24 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; + /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); - for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + /* As num_active_queues describe both tx and rx queues, we can use + * it to iterate over rings' stats. + */ + for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring; - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->tx_rings[i] : NULL); + /* Tx rings stats */ + ring = &adapter->tx_rings[i]; iavf_add_queue_stats(&data, ring); - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->rx_rings[i] : NULL); + /* Rx rings stats */ + ring = &adapter->rx_rings[i]; iavf_add_queue_stats(&data, ring); } rcu_read_unlock(); @@ -404,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) iavf_add_stat_strings(&data, iavf_gstrings_stats); - /* Queues are always allocated in pairs, so we just use num_tx_queues - * for both Tx and Rx queues. + /* Queues are always allocated in pairs, so we just use + * real_num_tx_queues for both Tx and Rx queues. */ - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, @@ -580,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev, * iavf_get_ringparam - Get ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool extenal ringparam structure + * @extack: netlink extended ACK report struct * * Returns current ring parameters. TX and RX rings are reported separately, * but the number of rings is not reported. **/ static void iavf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); @@ -599,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev, * iavf_set_ringparam - Set ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool external ringparam structure + * @extack: netlink extended ACK report struct * * Sets ring parameters. TX and RX rings are controlled separately, but the * number of rings is not specified, so all rings get the same settings. **/ static int iavf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; @@ -612,23 +631,44 @@ static int iavf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - IAVF_MIN_TXD, - IAVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > IAVF_MAX_TXD || + ring->tx_pending < IAVF_MIN_TXD || + ring->rx_pending > IAVF_MAX_RXD || + ring->rx_pending < IAVF_MIN_RXD) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", + new_tx_count); - new_rx_count = clamp_t(u32, ring->rx_pending, - IAVF_MIN_RXD, - IAVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", + new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) + (new_rx_count == adapter->rx_desc_count)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; + } + + if (new_tx_count != adapter->tx_desc_count) { + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", + adapter->tx_desc_count, new_tx_count); + adapter->tx_desc_count = new_tx_count; + } - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; + if (new_rx_count != adapter->rx_desc_count) { + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", + adapter->rx_desc_count, new_rx_count); + adapter->rx_desc_count = new_rx_count; + } if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; @@ -723,12 +763,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -751,6 +810,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -792,9 +852,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -1877,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @key: hash key * @hfunc: hash function to use * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -1886,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Only support toeplitz hash function */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + + if (!key && !indir) return 0; if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + } return iavf_config_rss(adapter); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 336e6bf95e48..0fbac51b7cca 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, * * Returns 0 on success, negative on failure **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10; @@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } /** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + +/** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that is timing out @@ -450,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -633,14 +646,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) * mac_vlan_list_lock. **/ static struct -iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (vlan == f->vlan) + if (f->vlan.vid == vlan.vid && + f->vlan.tpid == vlan.tpid) return f; } + return NULL; } @@ -652,7 +668,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) * Returns ptr to the filter object or NULL when no memory available. **/ static struct -iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f = NULL; @@ -681,7 +698,7 @@ clearout: * @adapter: board private structure * @vlan: VLAN tag **/ -static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f; @@ -704,13 +721,58 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) **/ static void iavf_restore_filters(struct iavf_adapter *adapter) { + u16 vid; + /* re-add all VLAN filters */ - if (VLAN_ALLOWED(adapter)) { - u16 vid; + for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q)); - for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) - iavf_add_vlan(adapter, vid); - } + for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD)); +} + +/** + * iavf_get_num_vlans_added - get number of VLANs added + * @adapter: board private structure + */ +static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) +{ + return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + + bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); +} + +/** + * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF + * @adapter: board private structure + * + * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, + * do not impose a limit as that maintains current behavior and for + * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. + **/ +static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) +{ + /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has + * never been a limit on the VF driver side + */ + if (VLAN_ALLOWED(adapter)) + return VLAN_N_VID; + else if (VLAN_V2_ALLOWED(adapter)) + return adapter->vlan_v2_caps.filtering.max_filters; + + return 0; +} + +/** + * iavf_max_vlans_added - check if maximum VLANs allowed already exist + * @adapter: board private structure + **/ +static bool iavf_max_vlans_added(struct iavf_adapter *adapter) +{ + if (iavf_get_num_vlans_added(adapter) < + iavf_get_max_vlans_allowed(adapter)) + return false; + + return true; } /** @@ -724,13 +786,23 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!VLAN_ALLOWED(adapter)) + if (!VLAN_FILTERING_ALLOWED(adapter)) return -EIO; - if (iavf_add_vlan(adapter, vid) == NULL) + if (iavf_max_vlans_added(adapter)) { + netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", + iavf_get_max_vlans_allowed(adapter)); + return -EIO; + } + + if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM; - set_bit(vid, adapter->vsi.active_vlans); + if (proto == cpu_to_be16(ETH_P_8021Q)) + set_bit(vid, adapter->vsi.active_cvlans); + else + set_bit(vid, adapter->vsi.active_svlans); + return 0; } @@ -745,11 +817,11 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!VLAN_ALLOWED(adapter)) - return -EIO; - - iavf_del_vlan(adapter, vid); - clear_bit(vid, adapter->vsi.active_vlans); + iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); + if (proto == cpu_to_be16(ETH_P_8021Q)) + clear_bit(vid, adapter->vsi.active_cvlans); + else + clear_bit(vid, adapter->vsi.active_svlans); return 0; } @@ -1144,6 +1216,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter) } /** + * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload + * @adapter: board private structure + * + * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or + * stripped in certain descriptor fields. Instead of checking the offload + * capability bits in the hot path, cache the location the ring specific + * flags. + */ +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct iavf_ring *tx_ring = &adapter->tx_rings[i]; + struct iavf_ring *rx_ring = &adapter->rx_rings[i]; + + /* prevent multiple L2TAG bits being set after VFR */ + tx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); + rx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); + + if (VLAN_ALLOWED(adapter)) { + tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_supported_caps *stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support; + + stripping_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + insertion_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + + if (stripping_support->outer) { + if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } else if (stripping_support->inner) { + if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } + + if (insertion_support->outer) { + if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } else if (insertion_support->inner) { + if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } + } + } +} + +/** * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * @@ -1204,6 +1356,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) adapter->num_active_queues = num_active_queues; + iavf_set_queue_vlan_tag_loc(adapter); + return 0; err_out: @@ -1576,6 +1730,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) { if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) return iavf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) + return iavf_send_vf_offload_vlan_v2_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); return 0; @@ -1709,10 +1865,133 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_del_adv_rss_cfg(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; } /** + * iavf_set_vlan_offload_features - set VLAN offload configuration + * @adapter: board private structure + * @prev_features: previous features used for comparison + * @features: updated features used for configuration + * + * Set the aq_required bit(s) based on the requested features passed in to + * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule + * the watchdog if any changes are requested to expedite the request via + * virtchnl. + **/ +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features) +{ + bool enable_stripping = true, enable_insertion = true; + u16 vlan_ethertype = 0; + u64 aq_required = 0; + + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else + vlan_ethertype = ETH_P_8021Q; + + if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) + enable_stripping = false; + if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) + enable_insertion = false; + + if (VLAN_ALLOWED(adapter)) { + /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN + * stripping via virtchnl. VLAN insertion can be toggled on the + * netdev, but it doesn't require a virtchnl message + */ + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + + } else if (VLAN_V2_ALLOWED(adapter)) { + switch (vlan_ethertype) { + case ETH_P_8021Q: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + break; + case ETH_P_8021AD: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + } + } + + if (aq_required) { + adapter->aq_required |= aq_required; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + } +} + +/** * iavf_startup - first step of driver startup * @adapter: board private structure * @@ -1814,6 +2093,59 @@ err: } /** + * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + struct iavf_vsi *vsi = &adapter->vsi; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && + num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, + "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, + adapter->vsi_res->num_queue_pairs); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + iavf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = adapter->vf_res->rss_key_size; + adapter->rss_lut_size = adapter->vf_res->rss_lut_size; + } else { + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; + } + + return 0; +} + +/** * iavf_init_get_resources - third step of driver startup * @adapter: board private structure * @@ -1824,7 +2156,6 @@ err: **/ static void iavf_init_get_resources(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; int err; @@ -1842,7 +2173,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) err = iavf_get_vf_config(adapter); if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { err = iavf_send_vf_config_msg(adapter); - goto err; + goto err_alloc; } else if (err == IAVF_ERR_PARAM) { /* We only get ERR_PARAM if the device is in a very bad * state or if we've been disabled for previous bad @@ -1857,9 +2188,83 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) goto err_alloc; } - err = iavf_process_config(adapter); + err = iavf_parse_vf_resource_msg(adapter); if (err) goto err_alloc; + + err = iavf_send_vf_offload_vlan_v2_msg(adapter); + if (err == -EOPNOTSUPP) { + /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so + * go directly to finishing initialization + */ + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + return; + } else if (err) { + dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n", + err); + goto err_alloc; + } + + /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the + * state accordingly + */ + iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + return; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_get_offload_vlan_v2_caps - part of driver startup + * @adapter: board private structure + * + * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the + * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is + * not negotiated, then this state will never be entered. + **/ +static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter) +{ + int ret; + + WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + + memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); + + ret = iavf_get_vf_vlan_v2_caps(adapter); + if (ret) { + if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + iavf_send_vf_offload_vlan_v2_msg(adapter); + goto err; + } + + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + return; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void iavf_init_config_adapter(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); + + if (iavf_process_config(adapter)) + goto err; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; @@ -1942,6 +2347,10 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) else iavf_init_rss(adapter); + if (VLAN_V2_ALLOWED(adapter)) + /* request initial VLAN offload settings */ + iavf_set_vlan_offload_features(adapter, 0, netdev->features); + return; err_mem: iavf_free_rss(adapter); @@ -1949,9 +2358,6 @@ err_register: iavf_free_misc_irq(adapter); err_sw_init: iavf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } @@ -2000,6 +2406,18 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; + case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS: + iavf_init_get_offload_vlan_v2_caps(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_CONFIG_ADAPTER: + iavf_init_config_adapter(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; case __IAVF_INIT_FAILED: if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, @@ -2033,6 +2451,7 @@ static void iavf_watchdog_task(struct work_struct *work) } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); @@ -2052,10 +2471,13 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { + int ret = iavf_process_aq_command(adapter); + /* An error will be returned if no commands were * processed; use this opportunity to update stats + * if the error isn't -ENOTSUPP */ - if (iavf_process_aq_command(adapter) && + if (ret && ret != -EOPNOTSUPP && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } @@ -2063,16 +2485,14 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: - mutex_unlock(&adapter->crit_lock); - return; default: + mutex_unlock(&adapter->crit_lock); return; } /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { - iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -2173,7 +2593,6 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; u32 reg_val; int i = 0, err; @@ -2236,6 +2655,7 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", @@ -2254,6 +2674,7 @@ continue_reset: (adapter->state == __IAVF_RESETTING)); if (running) { + netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2295,6 +2716,13 @@ continue_reset: } adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been + * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, + * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until + * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have + * been successfully sent and negotiated + */ + adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -2313,11 +2741,6 @@ continue_reset: list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ @@ -2331,7 +2754,6 @@ continue_reset: spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); @@ -2365,7 +2787,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - + netdev->flags |= IFF_UP; iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -2378,8 +2800,10 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) + if (running) { iavf_change_state(adapter, __IAVF_RUNNING); + netdev->flags |= IFF_UP; + } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2901,7 +3325,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2911,7 +3335,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2946,7 +3370,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return IAVF_ERR_CONFIG; + return -EINVAL; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); @@ -2970,7 +3394,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2980,13 +3404,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); @@ -3007,7 +3431,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return IAVF_ERR_CONFIG; + return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK @@ -3017,7 +3441,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -3042,7 +3466,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -3052,7 +3476,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (match.key->dst) { @@ -3419,6 +3843,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { iavf_notify_client_l2_params(&adapter->vsi); @@ -3430,6 +3856,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + /** * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted @@ -3441,20 +3872,11 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload - */ - if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) - return -EINVAL; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - adapter->aq_required |= - IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - else - adapter->aq_required |= - IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } + /* trigger update on any VLAN feature change */ + if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ + (features & NETIF_VLAN_OFFLOAD_FEATURES)) + iavf_set_vlan_offload_features(adapter, netdev->features, + features); return 0; } @@ -3518,6 +3940,228 @@ out_err: } /** + * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that can be toggled on and off. + **/ +static netdev_features_t +iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) +{ + netdev_features_t hw_features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return hw_features; + + /* Enable VLAN features if supported */ + if (VLAN_ALLOWED(adapter)) { + hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner && + insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } + } + + return hw_features; +} + +/** + * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that are enabled by default. + **/ +static netdev_features_t +iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) +{ + netdev_features_t features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return features; + + if (VLAN_ALLOWED(adapter)) { + features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *filtering_support = + &vlan_v2_caps->filtering.filtering_support; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + u32 ethertype_init; + + /* give priority to outer stripping and don't support both outer + * and inner stripping + */ + ethertype_init = vlan_v2_caps->offloads.ethertype_init; + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + else if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + /* give priority to outer insertion and don't support both outer + * and inner insertion + */ + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + } + + /* give priority to outer filtering and don't bother if both + * outer and inner filtering are enabled + */ + ethertype_init = vlan_v2_caps->filtering.ethertype_init; + if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else if (filtering_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + } + + return features; +} + +#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ + (!(((requested) & (feature_bit)) && \ + !((allowed) & (feature_bit)))) + +/** + * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support + * @adapter: board private structure + * @requested_features: stack requested NETDEV features + **/ +static netdev_features_t +iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, + netdev_features_t requested_features) +{ + netdev_features_t allowed_features; + + allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | + iavf_get_netdev_vlan_features(adapter); + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + + if ((requested_features & + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && + (requested_features & + (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && + adapter->vlan_v2_caps.offloads.ethertype_match == + VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { + netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); + requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX); + } + + return requested_features; +} + +/** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits @@ -3529,13 +4173,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (adapter->vf_res && - !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) - features &= ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - - return features; + return iavf_fix_netdev_vlan_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { @@ -3587,39 +4225,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; - int i, num_req_queues = adapter->num_req_queues; + netdev_features_t hw_vlan_features, vlan_features; struct net_device *netdev = adapter->netdev; - struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features; - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < vfres->num_vsis; i++) { - if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) - adapter->vsi_res = &vfres->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); - return -ENODEV; - } - - if (num_req_queues && - num_req_queues > adapter->vsi_res->num_queue_pairs) { - /* Problem. The PF gave us fewer queues than what we had - * negotiated in our request. Need a reset to see if we can't - * get back to a working state. - */ - dev_err(&adapter->pdev->dev, - "Requested %d queues, but PF only gave us %d.\n", - num_req_queues, - adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - iavf_schedule_reset(adapter); - return -ENODEV; - } - adapter->num_req_queues = 0; - hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -3663,19 +4273,19 @@ int iavf_process_config(struct iavf_adapter *adapter) */ hw_features = hw_enc_features; - /* Enable VLAN features if supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX); + /* get HW VLAN features that can be toggled */ + hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); + /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) hw_features |= NETIF_F_GSO_UDP_L4; - netdev->hw_features |= hw_features; + netdev->hw_features |= hw_features | hw_vlan_features; + vlan_features = iavf_get_netdev_vlan_features(adapter); - netdev->features |= hw_features; + netdev->features |= hw_features | vlan_features; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; @@ -3700,21 +4310,6 @@ int iavf_process_config(struct iavf_adapter *adapter) netdev->features &= ~NETIF_F_GSO; } - adapter->vsi.id = adapter->vsi_res->vsi_id; - - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; - vsi->netdev = adapter->netdev; - vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - adapter->rss_key_size = vfres->rss_key_size; - adapter->rss_lut_size = vfres->rss_lut_size; - } else { - adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; - } - return 0; } @@ -3984,6 +4579,7 @@ static void iavf_remove(struct pci_dev *pdev) if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + dev_info(&adapter->pdev->dev, "Removing device\n"); /* Shut down all the garbage mashers on the detention level */ iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 3525eab8e9f9..a0b1c18a3273 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -865,6 +865,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); napi_gro_receive(&q_vector->napi, skb); } @@ -1468,7 +1471,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) struct iavf_rx_buffer *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; - u16 vlan_tag; + u16 vlan_tag = 0; u8 rx_ptype; u64 qword; @@ -1551,9 +1554,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? - le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && + rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) + vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); + if (rx_desc->wb.qword2.ext_status & + cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && + rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) + vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); iavf_receive_skb(rx_ring, skb, vlan_tag); @@ -1766,7 +1773,7 @@ tx_only: if (likely(napi_complete_done(napi, work_done))) iavf_update_enable_itr(vsi, q_vector); - return min(work_done, budget - 1); + return min_t(int, work_done, budget - 1); } /** @@ -1781,46 +1788,29 @@ tx_only: * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct iavf_ring *tx_ring, - u32 *flags) +static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct iavf_ring *tx_ring, u32 *flags) { - __be16 protocol = skb->protocol; u32 tx_flags = 0; - if (protocol == htons(ETH_P_8021Q) && - !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { - /* When HW VLAN acceleration is turned off by the user the - * stack sets the protocol to 8021q so that the driver - * can take any steps required to support the SW only - * VLAN handling. In our case the driver doesn't need - * to take any further steps so just set the protocol - * to the encapsulated ethertype. - */ - skb->protocol = vlan_get_protocol(skb); - goto out; - } - /* if we have a HW VLAN tag being added, default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN, check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - return -EINVAL; + /* stack will only request hardware VLAN insertion offload for protocols + * that the driver supports and has enabled + */ + if (!skb_vlan_tag_present(skb)) + return; - protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_SW_VLAN; + tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; + if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { + tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; + } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + tx_flags |= IAVF_TX_FLAGS_HW_VLAN; + } else { + dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); + return; } -out: *flags = tx_flags; - return 0; } /** @@ -2440,8 +2430,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* prepare the xmit flags */ - if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) - goto out_drop; + iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); + if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { + cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << + IAVF_TXD_CTX_QW1_CMD_SHIFT; + cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> + IAVF_TX_FLAGS_VLAN_SHIFT; + } /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index e5b9ba42dd00..2624bf6d009e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size) #define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define IAVF_MIN_DESC_PENDING 4 -#define IAVF_TX_FLAGS_HW_VLAN BIT(1) -#define IAVF_TX_FLAGS_SW_VLAN BIT(2) -#define IAVF_TX_FLAGS_TSO BIT(3) -#define IAVF_TX_FLAGS_IPV4 BIT(4) -#define IAVF_TX_FLAGS_IPV6 BIT(5) -#define IAVF_TX_FLAGS_FCCRC BIT(6) -#define IAVF_TX_FLAGS_FSO BIT(7) -#define IAVF_TX_FLAGS_FD_SB BIT(9) -#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define IAVF_TX_FLAGS_VLAN_SHIFT 16 +#define IAVF_TX_FLAGS_HW_VLAN BIT(1) +#define IAVF_TX_FLAGS_SW_VLAN BIT(2) +#define IAVF_TX_FLAGS_TSO BIT(3) +#define IAVF_TX_FLAGS_IPV4 BIT(4) +#define IAVF_TX_FLAGS_IPV6 BIT(5) +#define IAVF_TX_FLAGS_FCCRC BIT(6) +#define IAVF_TX_FLAGS_FSO BIT(7) +#define IAVF_TX_FLAGS_FD_SB BIT(9) +#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11) +#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IAVF_TX_FLAGS_VLAN_SHIFT 16 struct iavf_tx_buffer { struct iavf_tx_desc *next_to_watch; @@ -362,6 +363,9 @@ struct iavf_ring { u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) +#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) +#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) /* stats structs */ struct iavf_queue_stats stats; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8c3f0f77cb57..5ee1d118fd30 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -137,6 +137,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | @@ -155,6 +156,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) NULL, 0); } +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) +{ + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; + + if (!VLAN_V2_ALLOWED(adapter)) + return -EOPNOTSUPP; + + adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; + + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, + NULL, 0); +} + /** * iavf_validate_num_queues * @adapter: adapter structure @@ -235,6 +249,45 @@ out: return err; } +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) +{ + struct iavf_hw *hw = &adapter->hw; + struct iavf_arq_event_info event; + enum virtchnl_ops op; + enum iavf_status err; + u16 len; + + len = sizeof(struct virtchnl_vlan_caps); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + /* When the AQ is empty, iavf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = iavf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) + break; + } + + err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); + if (err) + goto out_alloc; + + memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len)); +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + /** * iavf_configure_queues * @adapter: adapter structure @@ -589,7 +642,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter) **/ void iavf_add_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct iavf_vlan_filter *f; bool more = false; @@ -607,48 +659,105 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + f->add = false; + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->add) { - vvfl->vlan_id[i] = f->vlan; - i++; - f->add = false; - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; + + len = sizeof(*vvfl_v2) + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + i++; + f->add = false; + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; - iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -659,7 +768,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter) **/ void iavf_del_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -673,53 +781,123 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->remove) { - vvfl->vlan_id[i] = f->vlan; - i++; - list_del(&f->list); - kfree(f); - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; + + len = sizeof(*vvfl_v2) + + ((count - 1) * sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - + sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + list_del(&f->list); + kfree(f); + i++; + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -752,15 +930,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", + adapter->netdev->name); } if (!flags) { - adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | - IAVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | - IAVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + if (adapter->flags & IAVF_FLAG_PROMISC_ON) { + adapter->flags &= ~IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { + adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; + dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", + adapter->netdev->name); + } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -784,6 +970,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -936,6 +1124,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } +/** + * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + */ +static u32 iavf_tpid_to_vc_ethertype(u16 tpid) +{ + switch (tpid) { + case ETH_P_8021Q: + return VIRTCHNL_VLAN_ETHERTYPE_8100; + case ETH_P_8021AD: + return VIRTCHNL_VLAN_ETHERTYPE_88A8; + } + + return 0; +} + +/** + * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message + * @adapter: adapter structure + * @msg: message structure used for updating offloads over virtchnl to update + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + * @offload_op: opcode used to determine which support structure to check + */ +static int +iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, + struct virtchnl_vlan_setting *msg, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_supported_caps *offload_support; + u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); + + /* reference the correct offload support structure */ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + break; + default: + dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", + offload_op); + return -EINVAL; + } + + /* make sure ethertype is supported */ + if (offload_support->outer & vc_ethertype && + offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { + msg->outer_ethertype_setting = vc_ethertype; + } else if (offload_support->inner & vc_ethertype && + offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { + msg->inner_ethertype_setting = vc_ethertype; + } else { + dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", + offload_op, tpid); + return -EINVAL; + } + + return 0; +} + +/** + * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request + * @adapter: adapter structure + * @tpid: VLAN TPID + * @offload_op: opcode used to determine which AQ required bit to clear + */ +static void +iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + break; + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", + offload_op); + } +} + +/** + * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl + * @adapter: adapter structure + * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) + * @offload_op: offload_op used to make the request over virtchnl + */ +static void +iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_setting *msg; + int len = sizeof(*msg); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", + offload_op, adapter->current_op); + return; + } + + adapter->current_op = offload_op; + + msg = kzalloc(len, GFP_KERNEL); + if (!msg) + return; + + msg->vport_id = adapter->vsi_res->vsi_id; + + /* always clear to prevent unsupported and endless requests */ + iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); + + /* only send valid offload requests */ + if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) + iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); + else + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(msg); +} + +/** + * iavf_enable_vlan_stripping_v2 - enable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN stripping + */ +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_disable_vlan_stripping_v2 - disable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN stripping + */ +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_enable_vlan_insertion_v2 - enable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN insertion + */ +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); +} + +/** + * iavf_disable_vlan_insertion_v2 - disable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN insertion + */ +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); +} + #define IAVF_MAX_SPEED_STRLEN 13 /** @@ -1005,7 +1391,7 @@ print_link_msg: } else if (link_speed_mbps == SPEED_UNKNOWN) { snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); } else { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps, "Mbps"); } @@ -1510,7 +1896,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); @@ -1722,8 +2108,67 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_parse_vf_resource_msg(adapter); + + /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the + * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish + * configuration + */ + if (VLAN_V2_ALLOWED(adapter)) + break; + /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * wasn't successfully negotiated with the PF + */ + } + fallthrough; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { + if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) + memcpy(&adapter->vlan_v2_caps, msg, + min_t(u16, msglen, + sizeof(adapter->vlan_v2_caps))); + iavf_process_config(adapter); + + /* unlock crit_lock before acquiring rtnl_lock as other + * processes holding rtnl_lock could be waiting for the same + * crit_lock + */ + mutex_unlock(&adapter->crit_lock); + /* VLAN capabilities can change during VFR, so make sure to + * update the netdev features with the new capabilities + */ + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + if (iavf_lock_timeout(&adapter->crit_lock, 10000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", + __FUNCTION__); + + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features(adapter, 0, + netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + } break; case VIRTCHNL_OP_ENABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b2db39ee5f85..5505bd658a9b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -503,6 +503,7 @@ struct ice_pf { struct pci_dev *pdev; struct devlink_region *nvm_region; + struct devlink_region *sram_region; struct devlink_region *devcaps_region; /* devlink port data */ @@ -552,6 +553,7 @@ struct ice_pf { spinlock_t aq_wait_lock; struct hlist_head aq_wait_list; wait_queue_head_t aq_wait_queue; + bool fw_emp_reset_disabled; wait_queue_head_t reset_wait_queue; @@ -576,6 +578,7 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ + u8 rdma_mode; u16 dcbx_cap; u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; @@ -847,7 +850,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); -const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); int diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 4eef3488d86f..ad1dcfa5ff65 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -117,6 +117,8 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_RDMA 0x0051 +#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; @@ -1408,6 +1410,11 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ #define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) +#define ICE_AQC_NVM_RESET_LVL_M ICE_M(0x3, 0) /* Write reply only */ +#define ICE_AQC_NVM_POR_FLAG 0 +#define ICE_AQC_NVM_PERST_FLAG 1 +#define ICE_AQC_NVM_EMPR_FLAG 2 +#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1efc635cc0f5..44bdd0ed1629 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -759,7 +759,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_channel *ch = ring->ch; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u16 pf_q; u8 tc; @@ -804,9 +804,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ring->q_handle, 1, qg_buf, buf_len, NULL); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", + status); + return status; } /* Add Tx Queue TEID into the VSI Tx ring from the @@ -929,7 +929,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u32 val; /* clear cause_ena bit for disabled queues */ @@ -953,18 +953,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, rel_vmvf_num, NULL); /* if the disable queue command was exercised during an - * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * active reset flow, -EBUSY is returned. * This is not an error as the reset operation disables * queues at the hardware level anyway. */ - if (status == ICE_ERR_RESET_ONGOING) { + if (status == -EBUSY) { dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { + } else if (status == -ENOENT) { dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); } else if (status) { - dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", + status); + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index b3066d0fea8b..157add1268d9 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" @@ -16,10 +15,10 @@ * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ -static enum ice_status ice_set_mac_type(struct ice_hw *hw) +static int ice_set_mac_type(struct ice_hw *hw) { if (hw->vendor_id != PCI_VENDOR_ID_INTEL) - return ICE_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: @@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw) * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +int ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) * ice_discover_dev_caps is expected to be called before this function is * called. */ -static enum ice_status +static int ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) - return ICE_ERR_BUF_TOO_SHORT; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); @@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); - return ICE_ERR_CFG; + return -EIO; } /* A single port can report up to two (LAN and WoL) addresses */ @@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; + int status; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; if (report_mode == ICE_AQC_REPORT_DFLT_CFG && !ice_fw_supports_report_dflt_cfg(hw)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); @@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, * returns error (ENOENT), then no cage present. If no cage present, then * connection type is backplane or BASE-T. */ -static enum ice_status +static int ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, struct ice_sq_cd *cd) { @@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; u16 cmd_flags; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; @@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * * Set MAC configuration (0x0603) */ -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; @@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); @@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ -static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; - enum ice_status status; + int status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); sw = hw->switch_info; if (!sw) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; @@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) * ice_get_fw_log_cfg - get FW logging configuration * @hw: pointer to the HW struct */ -static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) +static int ice_get_fw_log_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; - enum ice_status status; __le16 *config; + int status; u16 size; size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); if (!config) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); @@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) * messages from FW to SW. Interrupts are typically disabled during the device's * initialization phase. */ -static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) { struct ice_aqc_fw_logging *cmd; - enum ice_status status = 0; u16 i, chgs = 0, len = 0; struct ice_aq_desc desc; __le16 *data = NULL; u8 actv_evnts = 0; void *buf = NULL; + int status = 0; if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) return 0; @@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) sizeof(*data), GFP_KERNEL); if (!data) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } val = i << ICE_AQC_FW_LOG_ID_S; @@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw(struct ice_hw *hw) +int ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; u16 mac_buf_len; void *mac_buf; + int status; /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); @@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); if (!hw->port_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_cqinit; } @@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_sched; } @@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); - status = ICE_ERR_CFG; + status = -EIO; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); @@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_fltr_mgmt_struct; } @@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw) * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ -enum ice_status ice_check_reset(struct ice_hw *hw) +int ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; @@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ @@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ -static enum ice_status ice_pf_reset(struct ice_hw *hw) +static int ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; @@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) - return ICE_ERR_RESET_FAILED; + return -EIO; return 0; } @@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +int ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; @@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) val = GLGEN_RTRIG_GLOBR_M; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } val |= rd32(hw, GLGEN_RTRIG); @@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * * Copies rxq context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (rxq_index > QRX_CTRL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { @@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; rlan_ctx->prefena = 1; @@ -1369,9 +1368,8 @@ static int ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), - (struct ice_aq_desc *)desc, - buf, buf_size, cd)); + return ice_sq_send_cmd(hw, ice_get_sbq(hw), + (struct ice_aq_desc *)desc, buf, buf_size, cd); } /** @@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode) * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned. */ -static enum ice_status +static int ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc_cpy; - enum ice_status status; bool is_cmd_for_retry; u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; + int status; opcode = le16_to_cpu(desc->opcode); is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); @@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf) { buf_cpy = kzalloc(buf_size, GFP_KERNEL); if (!buf_cpy) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&desc_cpy, desc, sizeof(desc_cpy)); @@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd = &desc->params.res_owner; bool lock_acquired = false; - enum ice_status status; + int status; /* When a package download is in process (i.e. when the firmware's * Global Configuration Lock resource is held), only the Download @@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, * * Get the firmware version (0x0001) from the admin queue commands */ -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; - enum ice_status status; + int status; resp = &desc.params.get_ver; @@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) * * Send the driver version (0x0002) to the firmware */ -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { @@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, cmd = &desc.params.driver_ver; if (!dv) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); @@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; @@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: - * 1) ICE_SUCCESS - acquired lock, and can perform download package - * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load - * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has - * successfully downloaded the package; the driver does - * not have to download the package and can continue - * loading + * 1) 0 - acquired lock, and can perform download package + * 2) -EIO - did not get lock, driver should fail to load + * 3) -EALREADY - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue @@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ -static enum ice_status +static int ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd_resp = &desc.params.res_owner; @@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = le32_to_cpu(cmd_resp->timeout); - return ICE_ERR_AQ_ERROR; + return -EIO; } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { - return ICE_ERR_AQ_NO_WORK; + return -EALREADY; } /* invalid FW response, force a timeout immediately */ *timeout = 0; - return ICE_ERR_AQ_ERROR; + return -EIO; } /* If the resource is held by some other driver, the command completes @@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, * * release common resource using the admin queue commands (0x0009) */ -static enum ice_status +static int ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { @@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * * This function will attempt to acquire the ownership of a resource. */ -enum ice_status +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; - enum ice_status status; + int status; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + /* A return code of -EALREADY means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) goto ice_acquire_res_exit; if (status) @@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) /* lock free, but no work to do */ break; @@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, /* lock acquired */ break; } - if (status && status != ICE_ERR_AQ_NO_WORK) + if (status && status != -EALREADY) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: - if (status == ICE_ERR_AQ_NO_WORK) { + if (status == -EALREADY) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); } return status; } @@ -1822,16 +1820,15 @@ ice_acquire_res_exit: */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { - enum ice_status status; u32 total_delay = 0; + int status; status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ - while ((status == ICE_ERR_AQ_TIMEOUT) && - (total_delay < hw->adminq.sq_cmd_timeout)) { + while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { mdelay(1); status = ice_aq_release_res(hw, res, 0, NULL); total_delay++; @@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) * * Helper function to allocate/free resources using the admin queue commands */ -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) @@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, cmd = &desc.params.sw_res_ctrl; if (!buf) - return ICE_ERR_PARAM; + return -EINVAL; if (buf_size < flex_array_size(buf, elem, num_entries)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to allocate resource. */ buf->num_elems = cpu_to_le16(num); @@ -1920,16 +1917,16 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to free resource. */ buf->num_elems = cpu_to_le16(num); @@ -2071,6 +2068,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; + case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: + caps->pcie_reset_avoidance = (number > 0); + ice_debug(hw, ICE_DBG_INIT, + "%s: pcie_reset_avoidance = %d\n", prefix, + caps->pcie_reset_avoidance); + break; + case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: + caps->reset_restrict_support = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "%s: reset_restrict_support = %d\n", prefix, + caps->reset_restrict_support); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2486,19 +2495,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -2517,16 +2526,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2551,16 +2560,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) * Read the function capabilities and extract them into the func_caps structure * for later use. */ -static enum ice_status +static int ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2650,9 +2659,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ -enum ice_status ice_get_caps(struct ice_hw *hw) +int ice_get_caps(struct ice_hw *hw) { - enum ice_status status; + int status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) @@ -2670,7 +2679,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) * * This function is used to write MAC address to the NVM (0x0108). */ -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { @@ -2692,7 +2701,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, * * Tell the firmware that the driver is taking over from PXE (0x0110). */ -static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +static int ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -2903,15 +2912,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -enum ice_status +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (!cfg) - return ICE_ERR_PARAM; + return -EINVAL; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { @@ -2952,13 +2961,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -enum ice_status ice_update_link_info(struct ice_port_info *pi) +int ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; li = &pi->phy.link_info; @@ -2974,7 +2983,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); @@ -3070,7 +3079,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { @@ -3078,7 +3087,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, u8 pause_mask = 0x0; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; switch (req_mode) { case ICE_FC_FULL: @@ -3117,23 +3126,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, * * Set the requested flow control mode. */ -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !aq_failures) - return ICE_ERR_BAD_PTR; + return -EINVAL; *aq_failures = 0; hw = pi->hw; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, @@ -3258,22 +3267,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; hw = pi->hw; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, (ice_fw_supports_report_dflt_cfg(hw) ? @@ -3313,7 +3322,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, cfg->link_fec_opt |= pcaps->link_fec_options; break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; break; } @@ -3344,13 +3353,13 @@ out: * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ -enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +int ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; - enum ice_status status = 0; + int status = 0; if (!pi || !link_up) - return ICE_ERR_PARAM; + return -EINVAL; phy_info = &pi->phy; @@ -3375,7 +3384,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) * * Sets up the link and restarts the Auto-Negotiation over the link. */ -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { @@ -3405,7 +3414,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, * * Set event mask (0x0613) */ -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { @@ -3430,7 +3439,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, * * Enable/disable loopback on a given port */ -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; @@ -3453,7 +3462,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) * * Set LED value for the given port (0x06e9) */ -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { @@ -3488,17 +3497,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, * * Read/Write SFF EEPROM (0x06EE) */ -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || (mem_addr & 0xff00)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; @@ -3527,23 +3536,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ -static enum ice_status +static int __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; u8 *lut; if (!params) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; + return -EINVAL; lut_size = params->lut_size; lut_type = params->lut_type; @@ -3572,7 +3581,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3607,7 +3616,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params } fallthrough; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3626,7 +3635,7 @@ ice_aq_get_set_rss_lut_exit: * * get the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); @@ -3639,7 +3648,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_ * * set the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); @@ -3654,10 +3663,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_ * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ -static enum -ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, - struct ice_aqc_get_set_rss_keys *key, - bool set) +static int +__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key, bool set) { struct ice_aqc_get_set_rss_key *cmd_resp; u16 key_size = sizeof(*key); @@ -3688,12 +3696,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, * * get the RSS key per VSI */ -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); @@ -3707,12 +3715,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, * * set the RSS key per VSI */ -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); @@ -3739,7 +3747,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ -static enum ice_status +static int ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) @@ -3754,10 +3762,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += struct_size(list, txqs, list->num_txqs); @@ -3766,7 +3774,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sum_size) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -3787,7 +3795,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * * Disable LAN Tx queue (0x0C31) */ -static enum ice_status +static int ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, @@ -3796,18 +3804,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 i, sz = 0; + int status; cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; cmd->num_entries = num_qgrps; @@ -3856,7 +3864,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sz) - return ICE_ERR_PARAM; + return -EINVAL; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); @@ -3914,8 +3922,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, cmd->num_qset_grps = num_qset_grps; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, - buf_size, cd)); + return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); } /* End of FW Admin Queue command wrappers */ @@ -4111,7 +4118,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { @@ -4141,7 +4148,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } } @@ -4185,7 +4192,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) * * This function adds one LAN queue */ -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -4193,19 +4200,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (num_qgrps > 1 || buf->num_txqs > 1) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4213,7 +4220,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4221,7 +4228,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4290,20 +4297,20 @@ ena_txq_exit: * * This function removes queues and their corresponding nodes in SW DB */ -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; + int status = -ENOENT; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; @@ -4315,13 +4322,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); - return ICE_ERR_CFG; + return -EIO; } buf_size = struct_size(qg_list, q_id, 1); qg_list = kzalloc(buf_size, GFP_KERNEL); if (!qg_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); @@ -4368,18 +4375,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, * * This function adds/updates the VSI queues per TC. */ -static enum ice_status +static int ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *maxqs, u8 owner) { - enum ice_status status = 0; + int status = 0; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4407,7 +4414,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, * * This function adds/updates the VSI LAN queues per TC. */ -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs) { @@ -4428,9 +4435,8 @@ int ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs) { - return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, - max_rdmaqs, - ICE_SCHED_NODE_OWNER_RDMA)); + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, + ICE_SCHED_NODE_OWNER_RDMA); } /** @@ -4451,7 +4457,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_add_rdma_qset_data *buf; struct ice_sched_node *parent; - enum ice_status status; struct ice_hw *hw; u16 i, buf_size; int ret; @@ -4502,12 +4507,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; for (i = 0; i < num_qsets; i++) { node.node_teid = buf->rdma_qsets[i].qset_teid; - status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, - &node); - if (status) { - ret = ice_status_to_errno(status); + ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, + &node); + if (ret) break; - } qset_teid[i] = le32_to_cpu(node.node_teid); } rdma_error_exit: @@ -4528,8 +4531,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { struct ice_aqc_dis_txq_item *qg_list; - enum ice_status status = 0; struct ice_hw *hw; + int status = 0; u16 qg_size; int i; @@ -4568,7 +4571,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, mutex_unlock(&pi->sched_lock); kfree(qg_list); - return ice_status_to_errno(status); + return status; } /** @@ -4577,7 +4580,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ -static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +static int ice_replay_pre_init(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; u8 i; @@ -4604,12 +4607,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Replay pre-initialization if there is any */ if (vsi_handle == ICE_MAIN_VSI_HANDLE) { @@ -4725,12 +4728,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, * * This function queries HW element information */ -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; - enum ice_status status; + int status; buf_size = sizeof(*buf); memset(buf, 0, buf_size); @@ -4775,7 +4778,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, cmd->param_indx = idx; cmd->param_val = cpu_to_le32(value); - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4796,7 +4799,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, { struct ice_aqc_driver_shared_params *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (idx >= ICE_AQC_DRIVER_PARAM_MAX) return -EIO; @@ -4810,7 +4813,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = le32_to_cpu(cmd->param_val); @@ -4840,7 +4843,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4860,7 +4863,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; @@ -4869,7 +4872,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = !!cmd->gpio_val; return 0; @@ -4903,13 +4906,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw) * * Gets the link default override for a port */ -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); @@ -4994,7 +4997,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) * * Set the LLDP MIB. (0x0A08) */ -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -5004,7 +5007,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); @@ -5044,7 +5047,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 65c1b3244264..1c57097ddf0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -14,108 +14,108 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 -enum ice_status ice_init_hw(struct ice_hw *hw); +int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); -enum ice_status ice_check_reset(struct ice_hw *hw); -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); +int ice_check_reset(struct ice_hw *hw); +int ice_reset(struct ice_hw *hw, enum ice_reset_req req); +int ice_create_all_ctrlq(struct ice_hw *hw); +int ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_destroy_all_ctrlq(struct ice_hw *hw); -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); -enum ice_status +int ice_get_link_status(struct ice_port_info *pi, bool *link_up); -enum ice_status ice_update_link_info(struct ice_port_info *pi); -enum ice_status +int ice_update_link_info(struct ice_port_info *pi); +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); -enum ice_status +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); bool ice_is_sbq_supported(struct ice_hw *hw); struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); -enum ice_status ice_get_caps(struct ice_hw *hw); +int ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_e810(struct ice_hw *hw); -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); -enum ice_status +int ice_clear_pf_cfg(struct ice_hw *hw); +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode fc); bool @@ -125,27 +125,27 @@ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); @@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id); -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * @@ -184,7 +184,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); int @@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 03bdb125be36..6bcfee295991 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); @@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->sq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->sq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.desc_buf.size = size; cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, @@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.size = 0; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } return 0; @@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); @@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->rq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->rq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.desc_buf.size = size; return 0; } @@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, sizeof(cq->rq.desc_buf), GFP_KERNEL); if (!cq->rq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ @@ -218,7 +218,7 @@ unwind_alloc_rq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); cq->rq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -226,7 +226,7 @@ unwind_alloc_rq_bufs: * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sizeof(cq->sq.desc_buf), GFP_KERNEL); if (!cq->sq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ @@ -266,10 +266,10 @@ unwind_alloc_sq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); cq->sq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } -static enum ice_status +static int ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ @@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) - return ICE_ERR_AQ_ERROR; + return -EIO; return 0; } @@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) * * Configure base address and length registers for the transmit queue */ -static enum ice_status -ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } @@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * * Configure base address and length registers for the receive (event queue) */ -static enum ice_status -ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status status; + int status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) @@ -361,19 +359,19 @@ do { \ * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->sq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -421,19 +419,19 @@ init_ctrlq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->rq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -474,15 +472,14 @@ init_ctrlq_exit: * * The main shutdown routine for the Control Transmit Queue */ -static enum ice_status -ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->sq_lock); if (!cq->sq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_sq_out; } @@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw) * * The main shutdown routine for the Control Receive Queue */ -static enum ice_status -ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_rq_out; } @@ -576,17 +572,17 @@ shutdown_rq_out: * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ -static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +static int ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - enum ice_status status; + int status; status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; if (!ice_aq_ver_check(hw)) { - status = ICE_ERR_FW_API_VER; + status = -EIO; goto init_ctrlq_free_rq; } @@ -612,10 +608,10 @@ init_ctrlq_free_rq: * * NOTE: this function does not initialize the controlq locks */ -static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; - enum ice_status ret_code; + int ret_code; switch (q_type) { case ICE_CTL_Q_ADMIN: @@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) cq = &hw->mailboxq; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { - return ICE_ERR_CFG; + return -EIO; } /* setup SQ command write back timeout */ @@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) * * NOTE: this function does not initialize the controlq locks. */ -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +int ice_init_all_ctrlq(struct ice_hw *hw) { - enum ice_status status; u32 retry = 0; + int status; /* Init FW admin queue */ do { @@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) return status; status = ice_init_check_adminq(hw); - if (status != ICE_ERR_AQ_FW_CRITICAL) + if (status != -EIO) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); @@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +int ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - enum ice_status status = 0; struct ice_sq_cd *details; u32 total_delay = 0; + int status = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) - return ICE_ERR_RESET_ONGOING; + return -EBUSY; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto sq_send_command_error; } @@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto sq_send_command_error; } @@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } @@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); - status = ICE_ERR_AQ_FULL; + status = -ENOSPC; goto sq_send_command_error; } @@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); - status = ICE_ERR_AQ_ERROR; + status = -EIO; } else { memcpy(buf, dma_buf->va, copy_size); } @@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) - status = ICE_ERR_AQ_ERROR; + status = -EIO; cq->sq_last_status = (enum ice_aq_err)retval; } @@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); - status = ICE_ERR_AQ_FW_CRITICAL; + status = -EIO; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); - status = ICE_ERR_AQ_TIMEOUT; + status = -EIO; } } @@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * the contents through e. It can also return how many events are * left to process through 'pending'. */ -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; enum ice_aq_err rq_last_status; - enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; + int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; @@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); - ret_code = ICE_ERR_AQ_EMPTY; + ret_code = -EIO; goto clean_rq_elem_err; } @@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = ICE_ERR_AQ_NO_WORK; + ret_code = -EALREADY; goto clean_rq_elem_out; } @@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { - ret_code = ICE_ERR_AQ_ERROR; + ret_code = -EIO; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), rq_last_status); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 241427cd9bc0..0b146a0d4205 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,7 +2,6 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -19,19 +18,19 @@ * * Requests the complete LLDP MIB (entire packet). (0x0A00) */ -static enum ice_status +static int ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_get_mib *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.lldp_get_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); @@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -static enum ice_status +static int ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { @@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, * * Start the embedded LLDP Agent on all ports. (0x0A06) */ -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; struct ice_aq_desc desc; @@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * * Parse DCB configuration from the LLDPDU */ -static enum ice_status -ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; - enum ice_status ret = 0; u16 offset = 0; + int ret = 0; u16 typelen; u16 type; u16 len; if (!lldpmib || !dcbcfg) - return ICE_ERR_PARAM; + return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; @@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) * * Query DCB configuration from the firmware */ -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { - enum ice_status ret; u8 *lldpmib; + int ret; /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ICE_LLDPDU_SIZE, NULL, NULL, NULL); @@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function - * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * returns 0, caller will need to check if FW returns back the same * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) */ -enum ice_status +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; struct ice_aq_desc desc; u16 opcode; + int status; cmd = &desc.params.lldp_agent_ctrl; @@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, * * Get CEE DCBX mode operational configuration from firmware (0x0A07) */ -static enum ice_status +static int ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd) @@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) return -EINVAL; @@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has @@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, * * Get IEEE or CEE mode DCB configuration from the Firmware */ -static enum ice_status -ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) +static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) { struct ice_dcbx_cfg *dcbx_cfg = NULL; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; if (dcbx_mode == ICE_DCBX_MODE_IEEE) dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; @@ -943,14 +939,14 @@ out: * * Get DCB configuration from the Firmware */ -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +int ice_get_dcb_cfg(struct ice_port_info *pi) { struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { @@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret = 0; + int ret = 0; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; qos_cfg->is_sw_lldp = true; @@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) return ret; qos_cfg->is_sw_lldp = false; } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { - return ICE_ERR_NOT_READY; + return -EBUSY; } /* Configure the LLDP MIB change event */ @@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) * * Configure (disable/enable) MIB */ -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret; + int ret; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) - return ICE_ERR_NOT_READY; + return -EBUSY; ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) @@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) * * Set DCB configuration to the Firmware */ -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +int ice_set_dcb_cfg(struct ice_port_info *pi) { u8 mib_type, *lldpmib = NULL; struct ice_dcbx_cfg *dcbcfg; - enum ice_status ret; struct ice_hw *hw; u16 miblen; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) @@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) * * query current port ETS configuration */ -static enum ice_status +static int ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_port_ets *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.port_ets; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); cmd->port_teid = pi->root->info.node_teid; @@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi, * * update the SW DB with the new TC changes */ -static enum ice_status +static int ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; struct ice_aqc_txsched_elem_data elem; - enum ice_status status = 0; u32 teid1, teid2; + int status = 0; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; /* suspend the missing TC nodes */ for (i = 0; i < pi->root->num_children; i++) { teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); @@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, * query current port ETS configuration and update the * SW DB with the TC changes */ -enum ice_status +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_aq_query_port_ets(pi, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 9b6f87a889a6..d73348f279f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -138,28 +138,27 @@ struct ice_cee_app_prio { } __packed; int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); -enum ice_status +int ice_get_dcb_cfg(struct ice_port_info *pi); +int ice_set_dcb_cfg(struct ice_port_info *pi); +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cmd_details); #ifdef CONFIG_DCB -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ -static inline enum ice_status +static inline int ice_aq_stop_lldp(struct ice_hw __always_unused *hw, bool __always_unused shutdown_lldp_agent, bool __always_unused persist, @@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_lldp(struct ice_hw __always_unused *hw, bool __always_unused persist, struct ice_sq_cd __always_unused *cd) @@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, bool __always_unused start_dcbx_agent, bool *dcbx_agent_status, @@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, bool __always_unused ena_mib) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a72e18320a22..b94d8daeaa58 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) struct ice_aqc_port_ets_elem buf = { 0 }; struct device *dev = ice_pf_to_dev(pf); struct ice_dcbx_cfg *err_cfg; - enum ice_status ret; + int ret; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 7fdeb411b6df..3eb01731e496 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; - pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) { - if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) - return ICE_DCB_NO_HW_CHG; + + /* DSCP configuration is not DCBx negotiated */ + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - } else { + else qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index b9bd9f9472f6..5aa18219920b 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -39,13 +39,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) /* We failed to locate the PBA, so just skip this entry */ - dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", - ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", + status); } static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -251,7 +251,6 @@ static int ice_devlink_info_get(struct devlink *devlink, struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_info_ctx *ctx; - enum ice_status status; size_t i; int err; @@ -266,20 +265,19 @@ static int ice_devlink_info_get(struct devlink *devlink, return -ENOMEM; /* discover capabilities first */ - status = ice_discover_dev_caps(hw, &ctx->dev_caps); - if (status) { - dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_discover_dev_caps(hw, &ctx->dev_caps); + if (err) { + dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); - err = -EIO; goto out_free_ctx; } if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { - status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); - if (status) { - dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); + if (err) { + dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_orom = false; @@ -287,10 +285,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { - status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); - if (status) { - dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); + if (err) { + dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; @@ -298,10 +296,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { - status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); - if (status) { - dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); + if (err) { + dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; @@ -373,63 +371,225 @@ out_free_ctx: } /** - * ice_devlink_flash_update - Update firmware stored in flash on the device - * @devlink: pointer to devlink associated with device to update - * @params: flash update parameters + * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE + * @limit: limits on what reload should do, such as not resetting * @extack: netlink extended ACK structure * - * Perform a device flash update. The bulk of the update logic is contained - * within the ice_flash_pldm_image function. + * Allow user to activate new Embedded Management Processor firmware by + * issuing device specific EMP reset. Called in response to + * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. * - * Returns: zero on success, or an error code on failure. + * Note that teardown and rebuild of the driver state happens automatically as + * part of an interrupt and watchdog task. This is because all physical + * functions on the device must be able to reset when an EMP reset occurs from + * any source. */ static int -ice_devlink_flash_update(struct devlink *devlink, - struct devlink_flash_update_params *params, - struct netlink_ext_ack *extack) +ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - u8 preservation; + u8 pending; int err; - if (!params->overwrite_mask) { - /* preserve all settings and identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_ALL; - } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) { - /* overwrite settings, but preserve the vital device identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_SELECTED; - } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS | - DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) { - /* overwrite both settings and identifiers, preserve nothing */ - preservation = ICE_AQC_NVM_NO_PRESERVATION; - } else { - NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); - return -EOPNOTSUPP; + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + + /* pending is a bitmask of which flash banks have a pending update, + * including the main NVM bank, the Option ROM bank, and the netlist + * bank. If any of these bits are set, then there is a pending update + * waiting to be activated. + */ + if (!pending) { + NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); + return -ECANCELED; } - if (!hw->dev_caps.common_cap.nvm_unified_update) { - NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); - return -EOPNOTSUPP; + if (pf->fw_emp_reset_disabled) { + NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); + return -ECANCELED; } - err = ice_check_for_pending_update(pf, NULL, extack); - if (err) + dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); + + err = ice_aq_nvm_update_empr(hw); + if (err) { + dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); return err; + } + + return 0; +} + +/** + * ice_devlink_reload_empr_finish - Wait for EMP reset to finish + * @devlink: pointer to the devlink instance reloading + * @action: the action requested + * @limit: limits imposed by userspace, such as not resetting + * @actions_performed: on return, indicate what actions actually performed + * @extack: netlink extended ACK structure + * + * Wait for driver to finish rebuilding after EMP reset is completed. This + * includes time to wait for both the actual device reset as well as the time + * for the driver's rebuild to complete. + */ +static int +ice_devlink_reload_empr_finish(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); - devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); + err = ice_wait_for_reset(pf, 60 * HZ); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); + return err; + } - return ice_flash_pldm_image(pf, params->fw, preservation, extack); + return 0; } static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + /* The ice driver currently does not support driver reinit */ + .reload_down = ice_devlink_reload_empr_start, + .reload_up = ice_devlink_reload_empr_finish, .eswitch_mode_get = ice_eswitch_mode_get, .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, .flash_update = ice_devlink_flash_update, }; +static int +ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + + ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; + + return 0; +} + +static int +ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool roce_ena = ctx->val.vbool; + int ret; + + if (!roce_ena) { + ice_unplug_aux_dev(pf); + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + return 0; + } + + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + ret = ice_plug_aux_dev(pf); + if (ret) + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + + return ret; +} + +static int +ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { + NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + + ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; + + return 0; +} + +static int +ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool iw_ena = ctx->val.vbool; + int ret; + + if (!iw_ena) { + ice_unplug_aux_dev(pf); + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + return 0; + } + + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; + ret = ice_plug_aux_dev(pf); + if (ret) + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + + return ret; +} + +static int +ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { + NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct devlink_param ice_devlink_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_roce_get, + ice_devlink_enable_roce_set, + ice_devlink_enable_roce_validate), + DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_iw_get, + ice_devlink_enable_iw_set, + ice_devlink_enable_iw_validate), + +}; + static void ice_devlink_free(void *devlink_ptr) { devlink_free((struct devlink *)devlink_ptr); @@ -470,6 +630,7 @@ void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); + devlink_set_features(devlink, DEVLINK_F_RELOAD); devlink_register(devlink); } @@ -484,6 +645,36 @@ void ice_devlink_unregister(struct ice_pf *pf) devlink_unregister(priv_to_devlink(pf)); } +int ice_devlink_register_params(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value value; + int err; + + err = devlink_params_register(devlink, ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); + if (err) + return err; + + value.vbool = false; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, + value); + + value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + value); + + return 0; +} + +void ice_devlink_unregister_params(struct ice_pf *pf) +{ + devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); +} + /** * ice_devlink_create_pf_port - Create a devlink port for this PF * @pf: the PF to create a devlink port for @@ -597,16 +788,20 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) } /** - * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents + * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents * @devlink: the devlink instance * @ops: the devlink region being snapshotted * @extack: extended ACK response structure * @data: on exit points to snapshot data buffer * * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for - * the shadow-ram devlink region. It captures a snapshot of the shadow ram - * contents. This snapshot can later be viewed via the devlink-region - * interface. + * the nvm-flash devlink region. It captures a snapshot of the full NVM flash + * contents, including both banks of flash. This snapshot can later be viewed + * via the devlink-region interface. + * + * It captures the flash using the FLASH_ONLY bit set when reading via + * firmware, so it does not read the current Shadow RAM contents. For that, + * use the shadow-ram region. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure. @@ -618,9 +813,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *nvm_data; u32 nvm_size; + int status; nvm_size = hw->flash.flash_size; nvm_data = vzalloc(nvm_size); @@ -633,7 +828,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); vfree(nvm_data); - return -EIO; + return status; } status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); @@ -643,7 +838,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); ice_release_nvm(hw); vfree(nvm_data); - return -EIO; + return status; } ice_release_nvm(hw); @@ -654,6 +849,66 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, } /** + * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents + * @devlink: the devlink instance + * @ops: the devlink region being snapshotted + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for + * the shadow-ram devlink region. It captures a snapshot of the shadow ram + * contents. This snapshot can later be viewed via the devlink-region + * interface. + * + * @returns zero on success, and updates the data pointer. Returns a non-zero + * error code on failure. + */ +static int +ice_devlink_sram_snapshot(struct devlink *devlink, + const struct devlink_region_ops __always_unused *ops, + struct netlink_ext_ack *extack, u8 **data) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 *sram_data; + u32 sram_size; + int err; + + sram_size = hw->flash.sr_words * 2u; + sram_data = vzalloc(sram_size); + if (!sram_data) + return -ENOMEM; + + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(sram_data); + return err; + } + + /* Read from the Shadow RAM, rather than directly from NVM */ + err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true); + if (err) { + dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", + sram_size, err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, + "Failed to read Shadow RAM contents"); + ice_release_nvm(hw); + vfree(sram_data); + return err; + } + + ice_release_nvm(hw); + + *data = sram_data; + + return 0; +} + +/** * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities * @devlink: the devlink instance * @ops: the devlink region being snapshotted @@ -675,8 +930,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *devcaps; + int status; devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); if (!devcaps) @@ -689,7 +944,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); vfree(devcaps); - return -EIO; + return status; } *data = (u8 *)devcaps; @@ -703,6 +958,12 @@ static const struct devlink_region_ops ice_nvm_region_ops = { .snapshot = ice_devlink_nvm_snapshot, }; +static const struct devlink_region_ops ice_sram_region_ops = { + .name = "shadow-ram", + .destructor = vfree, + .snapshot = ice_devlink_sram_snapshot, +}; + static const struct devlink_region_ops ice_devcaps_region_ops = { .name = "device-caps", .destructor = vfree, @@ -720,7 +981,7 @@ void ice_devlink_init_regions(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); - u64 nvm_size; + u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, @@ -731,6 +992,15 @@ void ice_devlink_init_regions(struct ice_pf *pf) pf->nvm_region = NULL; } + sram_size = pf->hw.flash.sr_words * 2u; + pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); + if (IS_ERR(pf->sram_region)) { + dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", + PTR_ERR(pf->sram_region)); + pf->sram_region = NULL; + } + pf->devcaps_region = devlink_region_create(devlink, &ice_devcaps_region_ops, 10, ICE_AQ_MAX_BUF_LEN); @@ -751,6 +1021,10 @@ void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) devlink_region_destroy(pf->nvm_region); + + if (pf->sram_region) + devlink_region_destroy(pf->sram_region); + if (pf->devcaps_region) devlink_region_destroy(pf->devcaps_region); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index b7f9551e4fc4..fe006d9946f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -8,6 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); +int ice_devlink_register_params(struct ice_pf *pf); +void ice_devlink_unregister_params(struct ice_pf *pf); int ice_devlink_create_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf); int ice_devlink_create_vf_port(struct ice_vf *vf); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 572519e402f4..e2e3ef7fba7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; - int ret = 0; + int ret; u8 *buf; dev = ice_pf_to_dev(pf); @@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!buf) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_acquire_nvm(hw, ICE_RES_READ); + if (ret) { + dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } - status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, - false); - if (status) { - dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, + false); + if (ret) { + dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto release; } @@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf) static u64 ice_link_test(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); - enum ice_status status; bool link_up = false; + int status; netdev_info(netdev, "link test\n"); status = ice_get_link_status(np->vsi->port_info, &link_up); if (status) { - netdev_err(netdev, "link query error, status = %s\n", - ice_stat_str(status)); + netdev_err(netdev, "link query error, status = %d\n", + status); return 1; } @@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; - enum ice_status status; - int err = 0; + int err; pi = vsi->port_info; @@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - caps, NULL); - if (status) { - err = -EAGAIN; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + caps, NULL); + if (err) goto done; - } /* Set supported/configured FEC modes based on PHY capability */ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) @@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { - enum ice_status status; + int status; /* Disable FW LLDP engine */ status = ice_cfg_lldp_mib_change(&pf->hw, false); @@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; pf->dcbx_cap |= DCB_CAP_DCBX_HOST; } else { - enum ice_status status; bool dcbx_agent_status; + int status; if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); @@ -1288,8 +1280,10 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) } if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down(vsi); - ice_up(vsi); + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { + ice_down(vsi); + ice_up(vsi); + } } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported @@ -1938,8 +1932,7 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - enum ice_status status; - int err = 0; + int err; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); @@ -1990,12 +1983,10 @@ ice_get_link_ksettings(struct net_device *netdev, if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (err) goto done; - } /* Set the advertised flow control based on the PHY capability */ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && @@ -2027,12 +2018,10 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); + if (err) goto done; - } /* Set supported FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); @@ -2210,11 +2199,10 @@ ice_set_link_ksettings(struct net_device *netdev, struct ice_pf *pf = np->vsi->back; struct ice_port_info *pi; u8 autoneg_changed = 0; - enum ice_status status; u64 phy_type_high = 0; u64 phy_type_low = 0; - int err = 0; bool linkup; + int err; pi = np->vsi->port_info; @@ -2234,15 +2222,13 @@ ice_set_link_ksettings(struct net_device *netdev, /* Get the PHY capabilities based on media */ if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - phy_caps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + phy_caps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - phy_caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); + if (err) goto done; - } /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; @@ -2308,11 +2294,9 @@ ice_set_link_ksettings(struct net_device *netdev, /* Call to get the current link speed */ pi->phy.get_link_info = true; - status = ice_get_link_status(pi, &linkup); - if (status) { - err = -EIO; + err = ice_get_link_status(pi, &linkup); + if (err) goto done; - } curr_link_speed = pi->phy.link_info.link_speed; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); @@ -2381,10 +2365,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); - if (status) { + err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); + if (err) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EIO; goto done; } @@ -2522,9 +2505,9 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; u64 hashed_flds; + int status; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2550,9 +2533,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); if (status) { - dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EINVAL; + dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", + vsi->vsi_num, status); + return status; } return 0; @@ -2686,7 +2669,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, } static void -ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2704,7 +2689,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) } static int -ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tx_ring *xdp_rings = NULL; @@ -2949,7 +2936,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status status; + int status; /* Initialize pause params */ pause->rx_pause = 0; @@ -2999,11 +2986,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_vsi *vsi = np->vsi; struct ice_hw *hw = &pf->hw; struct ice_port_info *pi; - enum ice_status status; u8 aq_failures; bool link_up; - int err = 0; u32 is_an; + int err; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -3029,11 +3015,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { kfree(pcaps); - return -EIO; + return err; } is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : @@ -3069,22 +3055,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EINVAL; /* Set the FC mode and only restart AN if link is up */ - status = ice_set_fc(pi, &aq_failures, link_up); + err = ice_set_fc(pi, &aq_failures, link_up); if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } @@ -3924,16 +3907,16 @@ ice_get_module_info(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 sff8472_comp = 0; u8 sff8472_swap = 0; u8 sff8636_rev = 0; u8 value = 0; + int status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 0, &value, 1, 0, NULL); if (status) - return -EIO; + return status; switch (value) { case ICE_MODULE_TYPE_SFP: @@ -3941,12 +3924,12 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_SFF_8472_COMP, 0x00, 0, &sff8472_comp, 1, 0, NULL); if (status) - return -EIO; + return status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, ICE_MODULE_SFF_8472_SWAP, 0x00, 0, &sff8472_swap, 1, 0, NULL); if (status) - return -EIO; + return status; if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { modinfo->type = ETH_MODULE_SFF_8079; @@ -3966,7 +3949,7 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_REVISION_ADDR, 0x00, 0, &sff8636_rev, 1, 0, NULL); if (status) - return -EIO; + return status; /* Check revision compliance */ if (sff8636_rev > 0x02) { /* Module is SFF-8636 compliant */ @@ -4001,11 +3984,11 @@ ice_get_module_eeprom(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; bool is_sfp = false; unsigned int i, j; u16 offset = 0; u8 page = 0; + int status; if (!ee || !ee->len || !data) return -EINVAL; @@ -4013,7 +3996,7 @@ ice_get_module_eeprom(struct net_device *netdev, status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, NULL); if (status) - return -EIO; + return status; if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 38960bcc384c..bbc64d6ce4cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -530,7 +530,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *hw_prof; struct ice_hw *hw = &pf->hw; - enum ice_status status; u64 entry1_h = 0; u64 entry2_h = 0; u64 prof_id; @@ -581,24 +580,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * actions (NULL) and zero actions 0. */ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); - if (status) - return ice_status_to_errno(status); - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - main_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + TNL_SEG_CNT(tun), &prof); + if (err) + return err; + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + if (err) goto err_prof; - } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + if (err) goto err_entry; - } hw_prof->fdir_seg[tun] = seg; hw_prof->entry_h[0][tun] = entry1_h; @@ -1190,7 +1185,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, struct ice_hw *hw = &pf->hw; struct ice_fltr_desc desc; struct ice_vsi *ctrl_vsi; - enum ice_status status; u8 *pkt, *frag_pkt; bool has_frag; int err; @@ -1209,11 +1203,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, } ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (err) goto err_free_all; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); if (err) goto err_free_all; @@ -1223,12 +1215,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, if (has_frag) { /* does not return error */ ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, - is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, + is_tun); + if (err) goto err_frag; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); if (err) goto err_frag; @@ -1268,7 +1258,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool is_tun = tun == ICE_FD_HW_SEG_TUN; int err; - if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num)) + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) continue; err = ice_fdir_write_fltr(pf, input, add, is_tun); if (err) @@ -1652,7 +1642,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) } /* return error if not an update and no available filters */ - fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1; + fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index cbd8424631e3..ae089d32ee9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, * @hw: pointer to the hardware structure * @cntr_id: returns counter index */ -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) * @hw: pointer to the hardware structure * @cntr_id: counter index to be freed */ -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) * @frag: generate a fragment packet * @tun: true implies generate a tunnel packet */ -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun) { @@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, if (ice_fdir_pkt[idx].flow == flow) break; if (idx == ICE_FDIR_NUM_PKT) - return ICE_ERR_PARAM; + return -EINVAL; if (!tun) { memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); loc = pkt; } else { - if (!ice_get_open_tunnel_port(hw, &tnl_port)) - return ICE_ERR_DOES_NOT_EXIST; + if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL)) + return -ENOENT; if (!ice_fdir_pkt[idx].tun_pkt) - return ICE_ERR_PARAM; + return -EINVAL; memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, ice_fdir_pkt[idx].tun_pkt_len); ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, @@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } if (input->flex_fltr) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index da4163856f4c..b6c7c6903f35 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -201,16 +201,14 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); void ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, struct ice_fltr_desc *fdesc, bool add); -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 23cfcceb1536..d29197ab3d02 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, } /** + * ice_hw_ptype_ena - check if the PTYPE is enabled or not + * @hw: pointer to the HW structure + * @ptype: the hardware PTYPE + */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) +{ + return ptype < ICE_FLOW_PTYPE_MAX && + test_bit(ptype, hw->hw_ptype); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void * +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** * ice_boost_tcam_handler * @sect_type: section type * @section: pointer to section @@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) * if it is found. The ice_seg parameter must not be NULL since the first call * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. */ -static enum ice_status +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, struct ice_boost_tcam_entry **entry) { @@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, memset(&state, 0, sizeof(state)); if (!ice_seg) - return ICE_ERR_PARAM; + return -EINVAL; do { tcam = ice_pkg_enum_entry(ice_seg, &state, @@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, } while (tcam); *entry = NULL; - return ICE_ERR_CFG; + return -EIO; } /** @@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) * ------------------------------ * Result: key: b01 10 11 11 00 00 */ -static enum ice_status +static int ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { @@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) - return ICE_ERR_CFG; + return -EIO; *key = 0; *key_inv = 0; @@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ -static enum ice_status +static int ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len) { @@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, /* size must be a multiple of 2 bytes. */ if (size % 2) - return ICE_ERR_CFG; + return -EIO; half_size = size / 2; if (off + len > half_size) - return ICE_ERR_CFG; + return -EIO; /* Make sure at most one bit is set in the never match mask. Having more * than one never match mask bit set will cause HW to consume excessive @@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, */ #define ICE_NVR_MTCH_BITS_MAX 1 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; + return -EIO; for (i = 0; i < len; i++) if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, dc ? dc[i] : 0, nm ? nm[i] : 0, key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, * or writing of the package. When attempting to obtain write access, the * caller must check for the following two return values: * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { - enum ice_status status; + int status; status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (!status) mutex_lock(&ice_global_cfg_lock_sw); - else if (status == ICE_ERR_AQ_NO_WORK) + else if (status == -EALREADY) ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; @@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) * * This function will request ownership of the change lock. */ -enum ice_status +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, @@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw) * * Download Package (0x0C40) */ -static enum ice_status +static int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, * * Update Package (0x0C42) */ -static enum ice_status +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, * * Obtains change lock and updates package. */ -static enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; u32 offset, info, i; + int status; status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) @@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) return status; } +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_LOAD_ERROR; + default: + return ICE_DDP_PKG_ERR; + } +} + /** * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure @@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * to the firmware. Metadata buffers are skipped, and the first metadata buffer * found indicates that the rest of the buffers are all metadata buffers. */ -static enum ice_status +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; + enum ice_aq_err err; u32 offset, info, i; + int status; if (!bufs || !count) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is @@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) */ bh = (struct ice_buf_hdr *)bufs; if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return 0; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; + return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } for (i = 0; i < count; i++) { @@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) &offset, &info, NULL); /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); - + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); break; } @@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) ice_release_global_cfg_lock(hw); - return status; + return state; } /** @@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * * Get Package Info List (0x0C43) */ -static enum ice_status +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, struct ice_aqc_get_pkg_info_resp *pkg_info, u16 buf_size, struct ice_sq_cd *cd) @@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw, * * Handles the download of a complete package. */ -static enum ice_status +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; @@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) * * Saves off the package details into the HW structure. */ -static enum ice_status +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { struct ice_generic_seg_hdr *seg_hdr; if (!pkg_hdr) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); if (seg_hdr) { @@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) ICE_SID_METADATA); if (!meta) { ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } hw->pkg_ver = meta->ver; @@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) * * Store details of the package currently loaded in HW into the HW structure. */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; u16 size; u32 i; size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = kzalloc(size, GFP_KERNEL); if (!pkg_info) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; goto init_pkg_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 @@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) init_pkg_free_alloc: kfree(pkg_info); - return status; + return state; } /** @@ -1176,28 +1259,28 @@ init_pkg_free_alloc: * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; if (len < struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* pkg must have at least one segment */ seg_count = le32_to_cpu(pkg->seg_count); if (seg_count < 1) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* make sure segment array fits in package length */ if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { @@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) /* segment header must fit */ if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); /* segment body must fit */ if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw) * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR * definitions. */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) { - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This function checks the package version compatibility with driver and NVM */ -static enum ice_status +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; + enum ice_ddp_state state; u16 size; u32 i; /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; + return state; } /* find ICE segment in given package */ @@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } /* Check if FW is compatible with the OS package */ size = struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = kzalloc(size, GFP_KERNEL); if (!pkg) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_LOAD_ERROR; goto fw_ddp_compat_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg->count); i++) { /* loop till we find the NVM package */ @@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, pkg->pkg_info[i].ver.major || (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; + state = ICE_DDP_PKG_FW_MISMATCH; ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ @@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } fw_ddp_compat_free_alloc: kfree(pkg); - return status; + return state; } /** @@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) * and store the index number in struct ice_switch_info *switch_info * in HW for following use. */ -static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) +static int ice_get_prof_index_max(struct ice_hw *hw) { u16 prof_index = 0, j, max_prof_index = 0; struct ice_pkg_enum state; @@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) memset(&state, 0, sizeof(state)); if (!hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; @@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) } /** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer @@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) { + bool already_loaded = false; + enum ice_ddp_state state; struct ice_pkg_hdr *pkg; - enum ice_status status; struct ice_seg *seg; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { + state = ice_verify_pkg(pkg, len); + if (state) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; + state); + return state; } /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; /* before downloading the package, check package version for * compatibility with driver */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { + state = ice_download_pkg(hw, seg); + if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = 0; + already_loaded = true; } /* Get information on the package currently loaded in HW, then make sure * the driver is compatible with this version. */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); } - if (!status) { + if (ice_is_init_pkg_successful(state)) { hw->seg = seg; /* on successful package download update other required * registers to support the package and fill HW tables @@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); + state); } - return status; + return state; } /** @@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) * package buffer, as the new copy will be managed by this function and * related routines. */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) { - enum ice_status status; + enum ice_ddp_state state; u8 *buf_copy; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - status = ice_init_pkg(hw, buf_copy, len); - if (status) { + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { /* Free the copy, since we failed to initialize the package */ devm_kfree(ice_hw_to_dev(hw), buf_copy); } else { @@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) hw->pkg_size = len; } - return status; + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } } /** @@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, * NOTE: The caller of the function is responsible for freeing the memory * allocated for every list entry. */ -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list) { @@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, memset(&state, 0, sizeof(state)); if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; do { @@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, } } while (fv); if (list_empty(fv_list)) - return ICE_ERR_CFG; + return -EIO; return 0; err: @@ -1711,7 +1847,7 @@ err: devm_kfree(ice_hw_to_dev(hw), fvl); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ -static enum ice_status +static int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; @@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) u16 data_end; if (!bld) - return ICE_ERR_PARAM; + return -EINVAL; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't increase table size */ section_count = le16_to_cpu(buf->section_count); if (section_count > 0) - return ICE_ERR_CFG; + return -EIO; if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; + return -EIO; bld->reserved_section_table_entries += count; data_end = le16_to_cpu(buf->data_end) + @@ -1899,9 +2035,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port + * @type: type of tunnel, can be TNL_LAST if it doesn't matter */ bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type) { bool res = false; u16 i; @@ -1909,7 +2047,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) mutex_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) - if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) { + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port && + (type == TNL_LAST || type == hw->tnl.tbl[i].type)) { *port = hw->tnl.tbl[i].port; res = true; break; @@ -1956,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type, * creating a package buffer with the tunnel info and issuing an update package * command. */ -static enum ice_status +static int ice_create_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_tunnel_end; } @@ -2027,26 +2166,26 @@ ice_create_tunnel_end: * targeting the specific updates requested and then performing an update * package. */ -static enum ice_status +static int ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); if (WARN_ON(!hw->tnl.tbl[index].valid || hw->tnl.tbl[index].type != type || hw->tnl.tbl[index].port != port)) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto ice_destroy_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_destroy_tunnel_end; } @@ -2094,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; u16 index; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; @@ -2102,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error adding UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error adding UDP tunnel - %d\n", + status); return -EIO; } @@ -2118,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error removing UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error removing UDP tunnel - %d\n", + status); return -EIO; } @@ -2142,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (fv_idx >= hw->blk[blk].es.fvw) - return ICE_ERR_PARAM; + return -EINVAL; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); @@ -2175,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ -static enum ice_status +static int ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) - return ICE_ERR_PARAM; + return -EINVAL; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return 0; @@ -2209,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ -static enum ice_status +static int ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) - return ICE_ERR_CFG; + return -EIO; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; @@ -2256,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ -static enum ice_status +static int ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { - enum ice_status status; u8 original_ptg; + int status; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) @@ -2401,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ -static enum ice_status +static int ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be @@ -2470,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) * for, the list must match exactly, including the order in which the * characteristics are listed. */ -static enum ice_status +static int ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct list_head *chs, u16 *vsig) { @@ -2484,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2496,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ -static enum ice_status -ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) +static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; struct ice_vsig_vsi *vsi_cur; @@ -2505,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; @@ -2557,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ -static enum ice_status +static int ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; @@ -2566,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) @@ -2577,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) - return ICE_ERR_CFG; + return -EIO; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); @@ -2594,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) /* verify if VSI was removed from group list */ if (!vsi_cur) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; @@ -2615,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ -static enum ice_status +static int ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; - enum ice_status status; u16 orig_vsig, idx; + int status; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) @@ -2738,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @masks: masks for FV * @prof_id: receives the profile ID */ -static enum ice_status +static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { @@ -2749,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, * field vector and mask. This will cause rule interference. */ if (blk == ICE_BLK_FD) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; @@ -2765,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2818,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ -static enum ice_status +static int ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } @@ -2838,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, * * This function frees an entry in a Profile ID TCAM for a specific block. */ -static enum ice_status +static int ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } @@ -2858,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ -static enum ice_status -ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) +static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { - enum ice_status status; u16 res_type; u16 get_prof; + int status; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) @@ -2883,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) * * This function frees a profile ID, which also corresponds to a Field Vector. */ -static enum ice_status -ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } @@ -2901,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ -static enum ice_status -ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; hw->blk[blk].es.ref_count[prof_id]++; @@ -3022,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw) * @mask: the 16-bit mask * @mask_idx: variable to receive the mask index */ -static enum ice_status +static int ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, u16 *mask_idx) { bool found_unused = false, found_copy = false; - enum ice_status status = ICE_ERR_MAX_LIMIT; u16 unused_idx = 0, copy_idx = 0; + int status = -ENOSPC; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->blk[blk].masks.lock); @@ -3090,15 +3225,15 @@ err_ice_alloc_prof_mask: * @blk: hardware block * @mask_idx: index of mask */ -static enum ice_status +static int ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) { if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; if (!(mask_idx >= hw->blk[blk].masks.first && mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; mutex_lock(&hw->blk[blk].masks.lock); @@ -3132,14 +3267,14 @@ exit_ice_free_prof_mask: * @blk: hardware block * @prof_id: profile ID */ -static enum ice_status +static int ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) { u32 mask_bm; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mask_bm = hw->blk[blk].es.mask_ena[prof_id]; for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) @@ -3194,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw) * @prof_id: profile ID * @masks: masks */ -static enum ice_status +static int ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, u16 *masks) { @@ -3224,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, if (ena_mask & BIT(i)) ice_free_prof_mask(hw, blk, i); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /* enable the masks for this profile */ @@ -3266,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ -static enum ice_status +static int ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { @@ -3708,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) * ice_init_hw_tbls - init hardware table memory * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +int ice_init_hw_tbls(struct ice_hw *hw) { u8 i; @@ -3827,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) err: ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -3843,7 +3978,7 @@ err: * @nm_msk: never match mask * @key: output of profile ID key */ -static enum ice_status +static int ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3899,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, * @dc_msk: don't care mask * @nm_msk: never match mask */ -static enum ice_status +static int ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3907,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; - enum ice_status status; + int status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); @@ -3926,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ -static enum ice_status +static int ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; @@ -3935,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { @@ -3976,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -3996,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, sizeof(p->es[0])); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->prof_id); @@ -4014,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4030,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct_size(p, entry, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); @@ -4050,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4066,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->ptype); @@ -4082,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4101,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->vsi); @@ -4121,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, * @blk: hardware block * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct list_head *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; - enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; + int status; u16 sects; /* count number of sections we need */ @@ -4164,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_pkg_buf_reserve_section(b, sects); if (status) @@ -4201,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); - if (status == ICE_ERR_AQ_ERROR) + if (status == -EIO) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: @@ -4273,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { * @prof_id: profile ID * @es: extraction sequence (length of array is determined by the block) */ -static enum ice_status +static int ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) { DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); @@ -4328,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) /* check for room */ if (first_free + 1 < (s8)ice_fd_pairs[index].count) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* place in extraction sequence */ for (k = 0; k < ice_fd_pairs[index].count; k++) { @@ -4338,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ice_fd_pairs[index].off + (k * 2); if (k > first_free) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* keep track of non-relevant fields */ mask_sel |= BIT(first_free - k); @@ -4449,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, * @attr: array of attributes that will be considered * @attr_cnt: number of elements in the attribute array */ -static enum ice_status +static int ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, const struct ice_ptype_attributes *attr, u16 attr_cnt) { @@ -4465,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, &prof->attr[prof->ptg_cnt]); if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } if (!found) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; return 0; } @@ -4490,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ -enum ice_status +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks) @@ -4498,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; - enum ice_status status; u8 byte = 0; u8 prof_id; + int status; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -4538,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], /* add profile info */ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); if (!prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof; } @@ -4581,7 +4716,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], */ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt); - if (status == ICE_ERR_MAX_LIMIT) + if (status == -ENOSPC) break; if (status) { /* This is simple a PTYPE/PTG with no @@ -4658,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) * @blk: hardware block * @idx: the index to release */ -static enum ice_status -ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) +static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status; + int status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, @@ -4685,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) * @blk: hardware block * @prof: pointer to profile structure to remove */ -static enum ice_status +static int ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { - enum ice_status status; + int status; u16 i; for (i = 0; i < prof->tcam_count; i++) @@ -4698,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) - return ICE_ERR_HW_TABLE; + return -EIO; } return 0; @@ -4711,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, * @vsig: the VSIG to remove * @chg: the change list */ -static enum ice_status +static int ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; + int status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, @@ -4745,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; @@ -4768,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ -static enum ice_status +static int ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; + int status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, @@ -4792,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, return status; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -4801,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, * @blk: hardware block * @id: profile tracking ID */ -static enum ice_status -ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) +static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; - enum ice_status status; struct list_head chg; + int status; u16 i; INIT_LIST_HEAD(&chg); @@ -4842,16 +4975,16 @@ err_ice_rem_flow_all: * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ -enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; - enum ice_status status; + int status; mutex_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_rem_prof; } @@ -4878,20 +5011,20 @@ err_ice_rem_prof: * @hdl: profile handle * @chg: change list */ -static enum ice_status +static int ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_get_prof; } @@ -4901,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_get_prof; } @@ -4933,7 +5066,7 @@ err_ice_get_prof: * * This routine makes a copy of the list of profiles in the specified VSIG. */ -static enum ice_status +static int ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *lst) { @@ -4961,7 +5094,7 @@ err_ice_get_profs_vsig: devm_kfree(ice_hw_to_dev(hw), ent1); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -4971,25 +5104,25 @@ err_ice_get_profs_vsig: * @lst: the list to be added to * @hdl: profile handle of entry to add */ -static enum ice_status +static int ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_to_lst; } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_to_lst; } @@ -5018,17 +5151,17 @@ err_ice_add_prof_to_lst: * @vsig: the VSIG to move the VSI to * @chg: the change list */ -static enum ice_status +static int ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) @@ -5078,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) * * This function appends an enable or disable TCAM entry in the change log */ -static enum ice_status +static int ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; + int status; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; @@ -5117,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, tcam->attr.flags, @@ -5151,13 +5284,13 @@ err_ice_prof_tcam_ena_dis: * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ -static enum ice_status +static int ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 idx; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -5222,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @rev: true to add entries to the end of the list * @chg: the change list */ -static enum ice_status +static int ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct list_head *chg) { @@ -5230,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; + int status = 0; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; /* new VSIG profile structure */ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); if (!t) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_id_vsig; } @@ -5264,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_id_vsig; } @@ -5334,21 +5467,21 @@ err_ice_add_prof_id_vsig: * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { - status = ICE_ERR_HW_TABLE; + status = -EIO; goto err_ice_create_prof_id_vsig; } @@ -5384,18 +5517,18 @@ err_ice_create_prof_id_vsig: * @new_vsig: return of new VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct list_head *lst, u16 *new_vsig, struct list_head *chg) { struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) - return ICE_ERR_HW_TABLE; + return -EIO; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) @@ -5425,8 +5558,8 @@ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; - enum ice_status status; struct list_head lst; + int status; INIT_LIST_HEAD(&lst); @@ -5456,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head union_lst; - enum ice_status status; struct list_head chg; + int status; u16 vsig; INIT_LIST_HEAD(&union_lst); @@ -5489,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto err_ice_add_prof_id_flow; } @@ -5597,7 +5730,7 @@ err_ice_add_prof_id_flow: * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ -static enum ice_status +static int ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; @@ -5609,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5623,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head chg, copy; - enum ice_status status; + int status; u16 vsig; INIT_LIST_HEAD(©); @@ -5724,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) } } } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } /* update hardware tables */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 344c2637facd..6cbc29bcb02f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,10 +18,67 @@ #define ICE_PKG_CNT 4 -enum ice_status +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* The signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + + /* An error occurred in firmware while loading the DDP package */ + ICE_DDP_PKG_LOAD_ERROR = -11, + + /* Other errors */ + ICE_DDP_PKG_ERR = -12 +}; + +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); void @@ -29,32 +86,37 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, unsigned long *bm); void ice_init_prof_result_bm(struct ice_hw *hw); -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list); bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); -enum ice_status +/* Rx parser PTYPE functions */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); + +/* XLT2/VSI group functions */ +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks); -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); -enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +int ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); -enum ice_status -ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 0f572a36d021..fc087e0b5292 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -160,6 +160,7 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_TXPARSER_BOOST_TCAM 66 @@ -201,6 +202,24 @@ enum ice_sect { ICE_SECT_COUNT }; +/* Packet Type (PTYPE) values */ +#define ICE_PTYPE_MAC_PAY 1 +#define ICE_PTYPE_IPV4_PAY 23 +#define ICE_PTYPE_IPV4_UDP_PAY 24 +#define ICE_PTYPE_IPV4_TCP_PAY 26 +#define ICE_PTYPE_IPV4_SCTP_PAY 27 +#define ICE_PTYPE_IPV6_PAY 89 +#define ICE_PTYPE_IPV6_UDP_PAY 90 +#define ICE_PTYPE_IPV6_TCP_PAY 92 +#define ICE_PTYPE_IPV6_SCTP_PAY 93 +#define ICE_MAC_IPV4_ESP 160 +#define ICE_MAC_IPV6_ESP 161 +#define ICE_MAC_IPV4_AH 162 +#define ICE_MAC_IPV6_AH 163 +#define ICE_MAC_IPV4_NAT_T_ESP 164 +#define ICE_MAC_IPV6_NAT_T_ESP 165 +#define ICE_MAC_IPV4_GTPU 329 +#define ICE_MAC_IPV6_GTPU 330 #define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 #define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 #define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 @@ -221,6 +240,10 @@ enum ice_sect { #define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 #define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 #define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 +#define ICE_MAC_IPV4_PFCP_SESSION 352 +#define ICE_MAC_IPV6_PFCP_SESSION 354 +#define ICE_MAC_IPV4_L2TPV3 360 +#define ICE_MAC_IPV6_L2TPV3 361 /* Attributes that can modify PTYPE definitions. * @@ -329,6 +352,25 @@ struct ice_boost_tcam_section { sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + struct ice_xlt1_section { __le16 count; __le16 offset; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index f160672448a0..2c5332953679 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -609,8 +609,6 @@ struct ice_flow_prof_params { ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ ICE_FLOW_SEG_HDR_NAT_T_ESP) -#define ICE_FLOW_SEG_HDRS_L2_MASK \ - (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) #define ICE_FLOW_SEG_HDRS_L3_MASK \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ @@ -625,8 +623,7 @@ struct ice_flow_prof_params { * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ -static enum ice_status -ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) +static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; @@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; } return 0; @@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ -static enum ice_status -ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) +static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; u8 i; @@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ -static enum ice_status +static int ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld, u64 match) { @@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, prot_id = ICE_PROT_GRE_OF; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } /* Each extraction sequence entry is a word in size, and extracts a @@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * does not exceed the block's capability */ if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * @params: information about the flow to be processed * @seg: index of packet segment whose raw fields are to be extracted */ -static enum ice_status +static int ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg) { @@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, if (params->prof->segs[seg].raws_cnt > ARRAY_SIZE(params->prof->segs[seg].raws)) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* Offsets within the segment headers are not supported */ hdrs_sz = ice_flow_calc_seg_sz(params, seg); if (!hdrs_sz) - return ICE_ERR_PARAM; + return -EINVAL; fv_words = hw->blk[params->blk].es.fvw; @@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, */ if (params->es_cnt >= hw->blk[params->blk].es.count || params->es_cnt >= ICE_MAX_FV_WORDS) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ -static enum ice_status +static int ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { struct ice_flow_prof *prof = params->prof; - enum ice_status status = 0; + int status = 0; u8 i; for (i = 0; i < prof->segs_cnt; i++) { @@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, * @hw: pointer to the HW struct * @params: information about the flow to be processed */ -static enum ice_status +static int ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { - enum ice_status status; + int status; status = ice_flow_proc_seg_hdrs(params); if (status) @@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) status = 0; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } return status; @@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) * @blk: classification stage * @entry: flow entry to be removed */ -static enum ice_status +static int ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, struct ice_flow_entry *entry) { if (!entry) - return ICE_ERR_BAD_PTR; + return -EINVAL; list_del(&entry->l_entry); @@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; - enum ice_status status; + int status; u8 i; if (!prof) - return ICE_ERR_BAD_PTR; + return -EINVAL; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), GFP_KERNEL); if (!params->prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto free_params; } @@ -1432,11 +1428,11 @@ free_params: * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { - enum ice_status status; + int status; /* Remove all remaining flow entries before removing the flow profile */ if (!list_empty(&prof->entries)) { @@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (!test_bit(vsi_handle, prof->vsis)) { status = ice_add_prof_id_flow(hw, blk, @@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (test_bit(vsi_handle, prof->vsis)) { status = ice_rem_prof_id_flow(hw, blk, @@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @segs_cnt: number of packet segments provided * @prof: stores the returned flow profile added */ -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { - enum ice_status status; + int status; if (segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!segs_cnt) - return ICE_ERR_PARAM; + return -EINVAL; if (!segs) - return ICE_ERR_BAD_PTR; + return -EINVAL; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) @@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; - enum ice_status status; + int status; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -1608,34 +1603,34 @@ out: * @data: pointer to a data buffer containing flow entry's match values/masks * @entry_h: pointer to buffer that receives the new flow entry's handle */ -enum ice_status +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, void *data, u64 *entry_h) { struct ice_flow_entry *e = NULL; struct ice_flow_prof *prof; - enum ice_status status; + int status; /* No flow entry data is expected for RSS */ if (!entry_h || (!data && blk != ICE_BLK_RSS)) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } else { /* Allocate memory for the entry being added and associate * the VSI to the found flow profile */ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); if (!e) - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; else status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); } @@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, case ICE_BLK_RSS: break; default: - status = ICE_ERR_NOT_IMPL; + status = -EOPNOTSUPP; goto out; } @@ -1680,15 +1675,14 @@ out: * @blk: classification stage * @entry_h: handle to the flow entry to be removed */ -enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, - u64 entry_h) +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h) { struct ice_flow_entry *entry; struct ice_flow_prof *prof; - enum ice_status status = 0; + int status = 0; if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) - return ICE_ERR_PARAM; + return -EINVAL; entry = ICE_FLOW_ENTRY_PTR(entry_h); @@ -1836,7 +1830,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, * header value to set flow field segment for further use in flow * profile entry or removal. */ -static enum ice_status +static int ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, u32 flow_hdr) { @@ -1853,15 +1847,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) - return ICE_ERR_PARAM; + return -EINVAL; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -1899,14 +1893,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (list_empty(&hw->fl_profs[blk])) return 0; @@ -1966,7 +1960,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { struct ice_rss_cfg *r, *rss_cfg; @@ -1981,7 +1975,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), GFP_KERNEL); if (!rss_cfg) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; @@ -2022,21 +2016,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; - enum ice_status status; + int status; if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_PARAM; + return -EINVAL; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2128,15 +2122,15 @@ exit: * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ -enum ice_status +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2159,18 +2153,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; - enum ice_status status; + int status; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2182,7 +2176,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -2216,15 +2210,15 @@ out: * removed. Calls are made to underlying flow s which will APIs * turn build or update buffers for RSS XLT1 section. */ -enum ice_status __maybe_unused +int __maybe_unused ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2279,20 +2273,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { - enum ice_status status = 0; + int status = 0; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) - return ICE_ERR_CFG; + return -EIO; hash_flds = avf_hash; @@ -2352,7 +2345,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) } if (rss_hash == ICE_HASH_INVALID) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, ICE_FLOW_SEG_HDR_NONE); @@ -2368,13 +2361,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status = 0; struct ice_rss_cfg *r; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 2a2d8c1536cb..d8782b28323e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -383,18 +383,16 @@ struct ice_rss_cfg { u32 packet_hdr; }; -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof); -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); -enum ice_status +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); -enum ice_status -ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range); @@ -402,14 +400,13 @@ void ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, u16 val_loc, u16 mask_loc); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); -enum ice_status +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index c2e78eaf4ccb..94903b450345 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info, } /** + * ice_fltr_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Set VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); +} + +/** + * ice_fltr_clear_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Clear VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); +} + +/** + * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** + * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** * ice_fltr_add_mac_list - add list of MAC filters * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_mac(&vsi->back->hw, list); } @@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_mac(&vsi->back->hw, list); } @@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_vlan(&vsi->back->hw, list); } @@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_vlan(&vsi->back->hw, list); @@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_eth_mac(&vsi->back->hw, list); } @@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_eth_mac(&vsi->back->hw, list); } @@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status (*mac_action)(struct ice_vsi *, - struct list_head *)) + int (*mac_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status(*mac_action) + int(*mac_action) (struct ice_vsi *, struct list_head *)) { u8 broadcast[ETH_ALEN]; - enum ice_status result; LIST_HEAD(tmp_list); + int result; eth_broadcast_addr(broadcast); if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @vlan_action: pointer to add or remove VLAN function */ -static enum ice_status +static int ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, enum ice_sw_fwd_act_type action, - enum ice_status (*vlan_action)(struct ice_vsi *, - struct list_head *)) + int (*vlan_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = vlan_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, * @action: action to be performed on filter match * @eth_action: pointer to add or remove ethertype function */ -static enum ice_status +static int ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action, - enum ice_status (*eth_action)(struct ice_vsi *, - struct list_head *)) + int (*eth_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = eth_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); } @@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action) { @@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @mac: filter MAC to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); } @@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, * @vlan_id: VLAN ID to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_add_vlan_list); @@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, * @vlan_id: filter VLAN to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_remove_vlan_list); @@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, * @flag: direction of packet to be filtered, Tx or Rx * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_add_eth_list); @@ -389,8 +439,8 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @flag: direction of filter * @action: action to remove */ -enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, - u16 flag, enum ice_sw_fwd_act_type action) +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); @@ -406,17 +456,17 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, * @src: source VSI * @new_flags: combinations of lb_en and lan_en */ -static enum ice_status +static int ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, u32 act, u16 type, u16 src, u32 new_flags) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status err; u32 flags_mask; + int err; s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; act &= ~flags_mask; @@ -465,7 +515,7 @@ static u32 ice_fltr_build_action(u16 vsi_id) * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and * ICE_SINGLE_ACT_LAN_ENABLE. */ -enum ice_status +int ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, u32 new_flags) { diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 8eec4febead1..0e65fd021ad2 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -5,38 +5,52 @@ #define _ICE_FLTR_H_ void ice_fltr_free_list(struct device *dev, struct list_head *h); -enum ice_status +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); -enum ice_status + +int +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags); +int ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index f8601d5b0b19..665a344fb9c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -16,6 +16,18 @@ struct ice_fwu_priv { /* Track which NVM banks to activate at the end of the update */ u8 activate_flags; + + /* Track the firmware response of the required reset to complete the + * flash update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + */ + u8 reset_level; + + /* Track if EMP reset is available */ + u8 emp_reset_available; }; /** @@ -40,8 +52,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 *package_data; + int status; dev_dbg(dev, "Sending PLDM record package data to firmware\n"); @@ -54,9 +66,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) kfree(package_data); if (status) { - dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); return -EIO; } @@ -203,8 +214,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; size_t length; + int status; switch (component->identifier) { case NVM_COMP_ID_OROM: @@ -240,9 +251,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon kfree(comp_tbl); if (status) { - dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); return -EIO; } @@ -259,6 +269,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * @block_size: size of the block to write, up to 4k * @block: pointer to block of data to write * @last_cmd: whether this is the last command + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Write a block of data to a flash module, and await for the completion @@ -266,18 +277,24 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Note this function assumes the caller has acquired the NVM resource. * + * On successful return, reset level indicates the device reset required to + * complete the update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + * * Returns: zero on success, or a negative error code on failure. */ static int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, u16 block_size, u8 *block, bool last_cmd, - struct netlink_ext_ack *extack) + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u32 completion_offset; int err; @@ -286,11 +303,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", block_size, module, offset); - status = ice_aq_update_nvm(hw, module, offset, block_size, block, - last_cmd, 0, NULL); - if (status) { - dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n", - module, block_size, offset, ice_stat_str(status), + err = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (err) { + dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n", + module, block_size, offset, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); return -EIO; @@ -338,6 +355,24 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, return -EIO; } + /* For the last command to write the NVM bank, newer versions of + * firmware indicate the required level of reset to complete + * activation of firmware. If the firmware supports this, cache the + * response for indicating to the user later. Otherwise, assume that + * a full power cycle is required. + */ + if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { + if (hw->dev_caps.common_cap.pcie_reset_avoidance) { + *reset_level = (event.desc.params.nvm.cmd_flags & + ICE_AQC_NVM_RESET_LVL_M); + dev_dbg(dev, "Firmware reported required reset level as %u\n", + *reset_level); + } else { + *reset_level = ICE_AQC_NVM_POR_FLAG; + dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n"); + } + } + return 0; } @@ -348,6 +383,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, * @component: the name of the component being updated * @image: buffer of image data to write to the NVM * @length: length of the buffer + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Loop over the data for a given NVM module and program it in 4 Kb @@ -360,7 +396,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, */ static int ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, - const u8 *image, u32 length, + const u8 *image, u32 length, u8 *reset_level, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); @@ -394,7 +430,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, memcpy(block, image + offset, block_size); err = ice_write_one_nvm_block(pf, module, offset, block_size, - block, last_cmd, extack); + block, last_cmd, reset_level, + extack); if (err) break; @@ -445,7 +482,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct devlink *devlink; - enum ice_status status; int err; dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); @@ -456,10 +492,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); - status = ice_aq_erase_nvm(hw, module, NULL); - if (status) { - dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", - component, module, ice_stat_str(status), + err = ice_aq_erase_nvm(hw, module, NULL); + if (err) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n", + component, module, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); err = -EIO; @@ -511,6 +547,7 @@ out_notify_devlink: * ice_switch_flash_banks - Tell firmware to switch NVM banks * @pf: Pointer to the PF data structure * @activate_flags: flags used for the activation command + * @emp_reset_available: on return, indicates if EMP reset is available * @extack: netlink extended ACK structure * * Notify firmware to activate the newly written flash banks, and wait for the @@ -518,27 +555,43 @@ out_notify_devlink: * * Returns: zero on success or an error code on failure. */ -static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, - struct netlink_ext_ack *extack) +static int +ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, + u8 *emp_reset_available, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u16 completion_retval; + u8 response_flags; int err; memset(&event, 0, sizeof(event)); - status = ice_nvm_write_activate(hw, activate_flags); - if (status) { - dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_nvm_write_activate(hw, activate_flags, &response_flags); + if (err) { + dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); return -EIO; } + /* Newer versions of firmware have support to indicate whether an EMP + * reset to reload firmware is available. For older firmware, EMP + * reset is always available. + */ + if (emp_reset_available) { + if (hw->dev_caps.common_cap.reset_restrict_support) { + *emp_reset_available = response_flags & ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware indicated that EMP reset is %s\n", + *emp_reset_available ? + "available" : "not available"); + } else { + *emp_reset_available = ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n"); + } + } + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ, &event); if (err) { @@ -579,6 +632,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; const char *name; + u8 *reset_level; u16 module; u8 flag; int err; @@ -587,16 +641,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) case NVM_COMP_ID_OROM: module = ICE_SR_1ST_OROM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_OROM; + reset_level = NULL; name = "fw.undi"; break; case NVM_COMP_ID_NVM: module = ICE_SR_1ST_NVM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NVM; + reset_level = &priv->reset_level; name = "fw.mgmt"; break; case NVM_COMP_ID_NETLIST: module = ICE_SR_NETLIST_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; + reset_level = NULL; name = "fw.netlist"; break; default: @@ -616,7 +673,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) return err; return ice_write_nvm_module(pf, module, name, component->component_data, - component->component_size, extack); + component->component_size, reset_level, + extack); } /** @@ -634,110 +692,75 @@ static int ice_finalize_update(struct pldmfw *context) struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; + struct devlink *devlink; + int err; /* Finally, notify firmware to activate the written NVM banks */ - return ice_switch_flash_banks(pf, priv->activate_flags, extack); -} + err = ice_switch_flash_banks(pf, priv->activate_flags, + &priv->emp_reset_available, extack); + if (err) + return err; -static const struct pldmfw_ops ice_fwu_ops = { - .match_record = &pldmfw_op_pci_match_record, - .send_package_data = &ice_send_package_data, - .send_component_table = &ice_send_component_table, - .flash_component = &ice_flash_component, - .finalize_update = &ice_finalize_update, -}; + devlink = priv_to_devlink(pf); -/** - * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device - * @pf: private device driver structure - * @fw: firmware object pointing to the relevant firmware file - * @preservation: preservation level to request from firmware - * @extack: netlink extended ACK structure - * - * Parse the data for a given firmware file, verifying that it is a valid PLDM - * formatted image that matches this device. - * - * Extract the device record Package Data and Component Tables and send them - * to the firmware. Extract and write the flash data for each of the three - * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify - * firmware once the data is written to the inactive banks. - * - * Returns: zero on success or a negative error code on failure. - */ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - struct ice_fwu_priv priv; - enum ice_status status; - int err; + /* If the required reset is EMPR, but EMPR is disabled, report that + * a reboot is required instead. + */ + if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG && + !priv->emp_reset_available) { + dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n"); + priv->reset_level = ICE_AQC_NVM_PERST_FLAG; + } - switch (preservation) { - case ICE_AQC_NVM_PRESERVE_ALL: - case ICE_AQC_NVM_PRESERVE_SELECTED: - case ICE_AQC_NVM_NO_PRESERVATION: - case ICE_AQC_NVM_FACTORY_DEFAULT: + switch (priv->reset_level) { + case ICE_AQC_NVM_EMPR_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by devlink reload", + NULL, 0, 0); break; + case ICE_AQC_NVM_PERST_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by rebooting the system", + NULL, 0, 0); + break; + case ICE_AQC_NVM_POR_FLAG: default: - WARN(1, "Unexpected preservation level request %u", preservation); - return -EINVAL; - } - - memset(&priv, 0, sizeof(priv)); - - priv.context.ops = &ice_fwu_ops; - priv.context.dev = dev; - priv.extack = extack; - priv.pf = pf; - priv.activate_flags = preservation; - - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; - } - - err = pldmfw_flash_image(&priv.context, fw); - if (err == -ENOENT) { - dev_err(dev, "Firmware image has no record matching this device\n"); - NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); - } else if (err) { - /* Do not set a generic extended ACK message here. A more - * specific message may already have been set by one of our - * ops. - */ - dev_err(dev, "Failed to flash PLDM image, err %d", err); + devlink_flash_update_status_notify(devlink, + "Activate new firmware by power cycling the system", + NULL, 0, 0); + break; } - ice_release_nvm(hw); + pf->fw_emp_reset_disabled = !priv->emp_reset_available; - return err; + return 0; } +static const struct pldmfw_ops ice_fwu_ops = { + .match_record = &pldmfw_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; + /** - * ice_check_for_pending_update - Check for a pending flash update + * ice_get_pending_updates - Check if the component has a pending update * @pf: the PF driver structure - * @component: if not NULL, the name of the component being updated - * @extack: Netlink extended ACK structure + * @pending: on return, bitmap of updates pending + * @extack: Netlink extended ACK * - * Check whether the device already has a pending flash update. If such an - * update is found, cancel it so that the requested update may proceed. + * Check if the device has any pending updates on any flash components. * - * Returns: zero on success, or a negative error code on failure. + * Returns: zero on success, or a negative error code on failure. Updates + * pending with the bitmap of pending updates. */ -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack) +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack) { - struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); struct ice_hw_dev_caps *dev_caps; struct ice_hw *hw = &pf->hw; - enum ice_status status; - u8 pending = 0; int err; dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); @@ -749,30 +772,60 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, * may have changed, e.g. if an update was previously completed and * the system has not yet rebooted. */ - status = ice_discover_dev_caps(hw, dev_caps); - if (status) { + err = ice_discover_dev_caps(hw, dev_caps); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); kfree(dev_caps); - return -EIO; + return err; } + *pending = 0; + if (dev_caps->common_cap.nvm_update_pending_nvm) { dev_info(dev, "The fw.mgmt flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; } if (dev_caps->common_cap.nvm_update_pending_orom) { dev_info(dev, "The fw.undi flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; } if (dev_caps->common_cap.nvm_update_pending_netlist) { dev_info(dev, "The fw.netlist flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; } kfree(dev_caps); + return 0; +} + +/** + * ice_cancel_pending_update - Cancel any pending update for a component + * @pf: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Cancel any pending update for the specified component. If component is + * NULL, all device updates will be canceled. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_cancel_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 pending; + int err; + + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + /* If the flash_update request is for a specific component, ignore all * of the other components. */ @@ -798,17 +851,107 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, "Canceling previous pending update", component, 0, 0); - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; + return err; } pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; - err = ice_switch_flash_banks(pf, pending, extack); + err = ice_switch_flash_banks(pf, pending, NULL, extack); + + ice_release_nvm(hw); + + /* Since we've canceled the pending update, we no longer know if EMP + * reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + + return err; +} + +/** + * ice_devlink_flash_update - Write a firmware image to the device + * @devlink: pointer to devlink associated with the device to update + * @params: devlink flash update parameters + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Returns: zero on success or a negative error code on failure. + */ +int ice_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_fwu_priv priv; + u8 preservation; + int err; + + if (!params->overwrite_mask) { + /* preserve all settings and identifiers */ + preservation = ICE_AQC_NVM_PRESERVE_ALL; + } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) { + /* overwrite settings, but preserve the vital device identifiers */ + preservation = ICE_AQC_NVM_PRESERVE_SELECTED; + } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS | + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) { + /* overwrite both settings and identifiers, preserve nothing */ + preservation = ICE_AQC_NVM_NO_PRESERVATION; + } else { + NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); + return -EOPNOTSUPP; + } + + if (!hw->dev_caps.common_cap.nvm_unified_update) { + NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + memset(&priv, 0, sizeof(priv)); + + priv.context.ops = &ice_fwu_ops; + priv.context.dev = dev; + priv.extack = extack; + priv.pf = pf; + priv.activate_flags = preservation; + + devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); + + err = ice_cancel_pending_update(pf, NULL, extack); + if (err) + return err; + + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return err; + } + + err = pldmfw_flash_image(&priv.context, params->fw); + if (err == -ENOENT) { + dev_err(dev, "Firmware image has no record matching this device\n"); + NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); + } else if (err) { + /* Do not set a generic extended ACK message here. A more + * specific message may already have been set by one of our + * ops. + */ + dev_err(dev, "Failed to flash PLDM image, err %d", err); + } ice_release_nvm(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h index c6390f6851ff..750574885716 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.h +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -4,9 +4,10 @@ #ifndef _ICE_FW_UPDATE_H_ #define _ICE_FW_UPDATE_H_ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack); -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack); +int ice_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index adcc9a251595..fc3580167e7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -288,7 +288,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) adev->id = pf->aux_idx; adev->dev.release = ice_adev_release; adev->dev.parent = &pf->pdev->dev; - adev->name = IIDC_RDMA_ROCE_NAME; + adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; ret = auxiliary_device_init(adev); if (ret) { @@ -335,6 +335,6 @@ int ice_init_rdma(struct ice_pf *pf) dev_err(dev, "failed to reserve vectors for RDMA\n"); return ret; } - + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; return ice_plug_aux_dev(pf); } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 40562600a8cf..1999b12708de 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; - /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) @@ -286,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; - enum ice_status status; + int status; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -300,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); kfree(ctxt); } @@ -704,15 +709,15 @@ bool ice_is_aux_ena(struct ice_pf *pf) static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; + int status; if (ice_is_safe_mode(pf)) return; status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status) - dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -1540,8 +1545,8 @@ ice_vsi_cfg_rss_exit: static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1552,8 +1557,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); if (status) - dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -1572,8 +1577,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1585,57 +1590,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for IPv6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp4 with input set IP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", + vsi_num, status); } /** @@ -1744,22 +1749,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action) int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; - int err = 0; + int err; dev = ice_pf_to_dev(pf); - status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!status) { + err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); + if (!err) { vsi->num_vlan--; - } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); + } else if (err == -ENOENT) { + dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n", + vid, vsi->vsi_num, err); + err = 0; } else { - dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", + vid, vsi->vsi_num, err); } return err; @@ -2100,8 +2104,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -2119,12 +2122,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2143,8 +2144,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; /* do not allow modifying VLAN stripping when a port VLAN is configured * on this VSI @@ -2172,12 +2172,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", - ena, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n", + ena, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2319,10 +2317,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n", ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, - ice_stat_str(status), - ice_aq_str(pf->hw.adminq.sq_last_status)); + status, ice_aq_str(pf->hw.adminq.sq_last_status)); goto err_out; } @@ -2400,11 +2397,11 @@ clear_reg_idx: */ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { - enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, - enum ice_sw_fwd_act_type act); + int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; @@ -2423,9 +2420,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) } if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", + dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, ice_stat_str(status)); + vsi->vsi_num, status); } /** @@ -2445,7 +2442,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) struct ice_port_info *port_info; struct ice_pf *pf = vsi->back; u32 agg_node_id_start = 0; - enum ice_status status; + int status; /* create (as needed) scheduler aggregator node and move VSI into * corresponding aggregator node @@ -2571,7 +2568,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; struct ice_vsi *vsi; int ret, i; @@ -2720,11 +2716,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (status) { - dev_err(dev, "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); goto unroll_clear_rings; } @@ -3031,8 +3027,8 @@ void ice_napi_del(struct ice_vsi *vsi) */ int ice_vsi_release(struct ice_vsi *vsi) { - enum ice_status err; struct ice_pf *pf; + int err; if (!vsi->back) return -ENODEV; @@ -3268,7 +3264,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) int prev_num_q_vectors = 0; struct ice_vf *vf = NULL; enum ice_vsi_type vtype; - enum ice_status status; struct ice_pf *pf; int ret, i; @@ -3416,14 +3411,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) /* If MQPRIO is set, means channel code path, hence for main * VSI's, use TC as 1 */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + if (ret) { + dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); if (init_vsi) { ret = -EIO; goto err_vectors; @@ -3658,7 +3653,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctx; - enum ice_status status; struct device *dev; int i, ret = 0; u8 num_tc = 0; @@ -3700,25 +3694,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); - status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); - if (status) { + ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (ret) { dev_info(dev, "Failed VSI Update\n"); - ret = -EIO; goto out; } if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, - max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(dev, "VSI %d failed TC config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - ret = -EIO; + if (ret) { + dev_err(dev, "VSI %d failed TC config, error %d\n", + vsi->vsi_num, ret); goto out; } ice_vsi_update_q_map(vsi, ctx); @@ -3771,39 +3762,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) } /** - * ice_status_to_errno - convert from enum ice_status to Linux errno - * @err: ice_status value to convert - */ -int ice_status_to_errno(enum ice_status err) -{ - switch (err) { - case ICE_SUCCESS: - return 0; - case ICE_ERR_DOES_NOT_EXIST: - return -ENOENT; - case ICE_ERR_OUT_OF_RANGE: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_EMPTY: - case ICE_ERR_AQ_FW_CRITICAL: - return -EIO; - case ICE_ERR_PARAM: - case ICE_ERR_INVAL_SIZE: - return -EINVAL; - case ICE_ERR_NO_MEMORY: - return -ENOMEM; - case ICE_ERR_MAX_LIMIT: - return -EAGAIN; - case ICE_ERR_RESET_ONGOING: - return -EBUSY; - case ICE_ERR_AQ_FULL: - return -ENOSPC; - default: - return -EINVAL; - } -} - -/** * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used * @sw: switch to check if its default forwarding VSI is free * @@ -3844,8 +3802,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) */ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) { - enum ice_status status; struct device *dev; + int status; if (!sw || !vsi) return -EINVAL; @@ -3868,9 +3826,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EIO; + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", + vsi->vsi_num, status); + return status; } sw->dflt_vsi = vsi; @@ -3890,8 +3848,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) int ice_clear_dflt_vsi(struct ice_sw *sw) { struct ice_vsi *dflt_vsi; - enum ice_status status; struct device *dev; + int status; if (!sw) return -EINVAL; @@ -3907,8 +3865,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", - dflt_vsi->vsi_num, ice_stat_str(status)); + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", + dflt_vsi->vsi_num, status); return -EIO; } @@ -3982,8 +3940,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi) int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4009,7 +3967,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", @@ -4021,7 +3979,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) if (status) { dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", @@ -4043,8 +4001,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4070,7 +4028,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", @@ -4082,7 +4040,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) if (status) { dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", @@ -4102,7 +4060,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) struct device *dev = ice_pf_to_dev(vsi->back); struct ice_port_info *pi = vsi->port_info; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return -EINVAL; @@ -4114,16 +4072,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) * a success code. Return an error if FW returns an error code other * than ICE_AQ_RC_EMODE */ - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) - dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); } else if (status) { - dev_err(dev, "can't set link to %s, err %s aq_err %s\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_err(dev, "can't set link to %s, err %d aq_err %s\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6c803407b67d..b2ed189527d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); - -int ice_status_to_errno(enum ice_status err); - void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); -enum ice_status -ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); +int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_aux_ena(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f099797f35e3..865f2231bb24 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - enum ice_status status; struct ice_vsi *vsi; u8 *perm_addr; @@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) return -EINVAL; perm_addr = vsi->port_info->mac.perm_addr; - status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); - if (status) - return -EIO; - - return 0; + return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); } /** @@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** - * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF + * ice_set_promisc - Enable promiscuous mode for a given PF * @vsi: the VSI being configured * @promisc_m: mask of promiscuous config bits - * @set_promisc: enable or disable promisc flag request * */ -static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) +static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) { - struct ice_hw *hw = &vsi->back->hw; - enum ice_status status = 0; + int status; if (vsi->type != ICE_VSI_PF) return 0; - if (vsi->num_vlan > 1) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - set_promisc); - } else { - if (set_promisc) - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - } + if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; +} - if (status) - return -EIO; +/** + * ice_clear_promisc - Disable promiscuous mode for a given PF + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * + */ +static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) +{ + int status; - return 0; + if (vsi->type != ICE_VSI_PF) + return 0; + + if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; } /** @@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) bool promisc_forced_on = false; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status = 0; u32 changed_flags = 0; u8 promisc_m; - int err = 0; + int err; if (!vsi->netdev) return -EINVAL; @@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); ice_fltr_free_list(dev, &vsi->tmp_unsync_list); - if (status) { + if (err) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ - if (status == ICE_ERR_NO_MEMORY) { - err = -ENOMEM; + if (err == -ENOMEM) goto out; - } } /* Add MAC addresses in the sync list */ - status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. */ - if (status && status != ICE_ERR_ALREADY_EXISTS) { + if (err && err != -EEXIST) { netdev_err(netdev, "Failed to add MAC filters\n"); /* If there is no more space for new umac filters, VSI * should go into promiscuous mode. There should be some @@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", vsi->vsi_num); } else { - err = -EIO; goto out; } } + err = 0; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { @@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, true); + err = ice_set_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, false); + err = ice_clear_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + err = 0; ice_cfg_vlan_pruning(vsi, false); } } else { @@ -695,12 +695,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { struct ice_aqc_get_phy_caps_data *caps; const char *an_advertised; - enum ice_status status; const char *fec_req; const char *speed; const char *fec; const char *fc; const char *an; + int status; if (!vsi) return; @@ -1020,10 +1020,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, { struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; - enum ice_status status; struct ice_vsi *vsi; u16 old_link_speed; bool old_link; + int status; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; @@ -1036,8 +1036,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ status = ice_update_link_info(pi); if (status) - dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", - pi->lport, ice_stat_str(status), + dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", + pi->lport, status, ice_aq_str(pi->hw->adminq.sq_last_status)); ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); @@ -1407,15 +1407,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) return 0; do { - enum ice_status ret; u16 opcode; + int ret; ret = ice_clean_rq_elem(hw, cq, &event, &pending); - if (ret == ICE_ERR_AQ_NO_WORK) + if (ret == -EALREADY) break; if (ret) { - dev_err(dev, "%s Receive Queue event error %s\n", qtype, - ice_stat_str(ret)); + dev_err(dev, "%s Receive Queue event error %d\n", qtype, + ret); break; } @@ -1866,19 +1866,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, - NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps, NULL); - if (status) { + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto out; } @@ -1977,8 +1975,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EIO; @@ -1988,14 +1985,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) return -ENOMEM; if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto err_out; } @@ -2049,8 +2045,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) struct ice_aqc_set_phy_cfg_data *cfg; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; + int err; /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) @@ -2070,12 +2065,11 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { - dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2089,15 +2083,14 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { - dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { + dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2150,12 +2143,10 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Enable link and link update */ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); - if (status) { - dev_err(dev, "Failed to set phy config, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; - } + err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); + if (err) + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, err); kfree(cfg); done: @@ -2549,9 +2540,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .vsi_map_offset = vsi->alloc_txq, .mapping_mode = ICE_VSI_MAP_CONTIG }; - enum ice_status status; struct device *dev; int i, v_idx; + int status; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2605,11 +2596,22 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", - ice_stat_str(status)); + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", + status); goto clear_xdp_rings; } - ice_vsi_assign_bpf_prog(vsi, prog); + + /* assign the prog only when it's not already present on VSI; + * this flow is a subject of both ethtool -L and ndo_bpf flows; + * VSI rebuild that happens under ethtool -L can expose us to + * the bpf_prog refcount issues as we would be swapping same + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put + * on it as it would be treated as an 'old_prog'; for ndo_bpf + * this is not harmful as dev_xdp_install bumps the refcount + * before calling the op exposed by the driver; + */ + if (!ice_is_xdp_ena_vsi(vsi)) + ice_vsi_assign_bpf_prog(vsi, prog); return 0; clear_xdp_rings: @@ -2785,6 +2787,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { + /* safe to call even when prog == vsi->xdp_prog as + * dev_xdp_install in net/core/dev.c incremented prog's + * refcount so corresponding bpf_prog_put won't cause + * underflow + */ ice_vsi_assign_bpf_prog(vsi, prog); } @@ -3504,7 +3511,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi; - int status = 0; + int status; if (ice_is_reset_in_progress(pf->state)) return -EBUSY; @@ -3517,10 +3524,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf) INIT_LIST_HEAD(&vsi->ch_list); status = ice_cfg_netdev(vsi); - if (status) { - status = -ENODEV; + if (status) goto unroll_vsi_setup; - } /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); @@ -3544,14 +3549,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf) if (status) { dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", vsi->vsi_num, status); - status = -EINVAL; goto unroll_napi_add; } status = ice_init_mac_fltr(pf); if (status) goto free_cpu_rx_map; - return status; + return 0; free_cpu_rx_map: ice_free_cpu_rx_rmap(vsi); @@ -4007,8 +4011,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); struct ice_vsi_ctx *ctxt; - enum ice_status status; struct ice_hw *hw; + int status; if (!vsi) return; @@ -4038,9 +4042,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } else { vsi->info.sec_flags = ctxt->info.sec_flags; vsi->info.sw_flags2 = ctxt->info.sw_flags2; @@ -4053,109 +4056,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) /** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info - * @status: status of package load + * @state: state of package load */ -static void -ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) { - struct ice_pf *pf = (struct ice_pf *)hw->back; - struct device *dev = ice_pf_to_dev(pf); + struct ice_pf *pf = hw->back; + struct device *dev; - switch (*status) { - case ICE_SUCCESS: - /* The package download AdminQ command returned success because - * this download succeeded or ICE_ERR_AQ_NO_WORK since there is - * already a package loaded on the device. - */ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, - sizeof(hw->pkg_name))) { - if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) - dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - else - dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *status = ICE_ERR_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft, - hw->pkg_name, - hw->pkg_ver.major, - hw->pkg_ver.minor, - hw->pkg_ver.update, - hw->pkg_ver.draft); - } else { - dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); - *status = ICE_ERR_NOT_SUPPORTED; - } + dev = ice_pf_to_dev(pf); + + switch (state) { + case ICE_DDP_PKG_SUCCESS: + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); break; - case ICE_ERR_FW_DDP_MISMATCH: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + break; + case ICE_DDP_PKG_FW_MISMATCH: dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; - case ICE_ERR_BUF_TOO_SHORT: - case ICE_ERR_CFG: + case ICE_DDP_PKG_INVALID_FILE: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; - case ICE_ERR_NOT_SUPPORTED: - /* Package File version not supported */ - if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); - else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); break; - case ICE_ERR_AQ_ERROR: - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_ESVN: - dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_LOAD_ERROR: + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); /* poll for reset to complete */ if (ice_check_reset(hw)) dev_err(dev, "Error resetting device. Please reload the driver\n"); - return; - default: - break; - } - fallthrough; - default: - dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", - *status); break; + case ICE_DDP_PKG_ERR: + default: + dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); } } @@ -4170,24 +4144,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { - enum ice_status status = ICE_ERR_PARAM; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ if (firmware && !hw->pkg_copy) { - status = ice_copy_and_init_pkg(hw, firmware->data, - firmware->size); - ice_log_pkg_init(hw, &status); + state = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, state); } else if (!firmware && hw->pkg_copy) { /* Reload package during rebuild after CORER/GLOBR reset */ - status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); - ice_log_pkg_init(hw, &status); + state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, state); } else { dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); } - if (status) { + if (!ice_is_init_pkg_successful(state)) { /* Safe Mode */ clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); return; @@ -4218,9 +4192,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * ice_send_version - update firmware with driver version * @pf: PF struct * - * Returns ICE_SUCCESS on success, else error code + * Returns 0 on success, else error code */ -static enum ice_status ice_send_version(struct ice_pf *pf) +static int ice_send_version(struct ice_pf *pf) { struct ice_driver_ver dv; @@ -4510,7 +4484,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * true */ if (ice_is_safe_mode(pf)) { - dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these @@ -4705,6 +4678,10 @@ probe_done: if (err) goto err_netdev_reg; + err = ice_devlink_register_params(pf); + if (err) + goto err_netdev_reg; + /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); if (ice_is_aux_ena(pf)) { @@ -4712,7 +4689,7 @@ probe_done: if (pf->aux_idx < 0) { dev_err(dev, "Failed to allocate device ID for AUX driver\n"); err = -ENOMEM; - goto err_netdev_reg; + goto err_devlink_reg_param; } err = ice_init_rdma(pf); @@ -4731,6 +4708,8 @@ probe_done: err_init_aux_unroll: pf->adev = NULL; ida_free(&ice_aux_ida, pf->aux_idx); +err_devlink_reg_param: + ice_devlink_unregister_params(pf); err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); @@ -4787,9 +4766,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 mac_addr[ETH_ALEN]; struct ice_vsi *vsi; + int status; u8 flags; if (!pf->wol_ena) @@ -4811,9 +4790,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); if (status) - dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } /** @@ -4845,6 +4823,7 @@ static void ice_remove(struct pci_dev *pdev) ice_unplug_aux_dev(pf); if (pf->aux_idx >= 0) ida_free(&ice_aux_ida, pf->aux_idx); + ice_devlink_unregister_params(pf); set_bit(ICE_DOWN, pf->state); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); @@ -5351,11 +5330,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; - enum ice_status status; u8 old_mac[ETH_ALEN]; u8 flags = 0; - int err = 0; u8 *mac; + int err; mac = (u8 *)addr->sa_data; @@ -5387,22 +5365,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) netif_addr_unlock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); + if (err && err != -ENOENT) { err = -EADDRNOTAVAIL; goto err_update_filters; } /* Add filter for new MAC. If filter exists, return success */ - status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) + err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (err == -EEXIST) /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (status) + else if (err) /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; @@ -5421,10 +5399,10 @@ err_update_filters: /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; - status = ice_aq_manage_mac_write(hw, mac, flags, NULL); - if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", - mac, ice_stat_str(status)); + err = ice_aq_manage_mac_write(hw, mac, flags, NULL); + if (err) { + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, err); } return 0; } @@ -5466,8 +5444,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - enum ice_status status; u16 q_handle; + int status; u8 tc; /* Validate maxrate requested is within permitted range */ @@ -5487,13 +5465,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) else status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, q_handle, ICE_MAX_BW, maxrate * 1000); - if (status) { - netdev_err(netdev, "Unable to set Tx max rate, error %s\n", - ice_stat_str(status)); - return -EIO; - } + if (status) + netdev_err(netdev, "Unable to set Tx max rate, error %d\n", + status); - return 0; + return status; } /** @@ -5865,6 +5841,9 @@ static int ice_up_complete(struct ice_vsi *vsi) netif_carrier_on(vsi->netdev); } + /* clear this now, and the first stats read will be used as baseline */ + vsi->stat_offsets_loaded = false; + ice_service_task_schedule(pf); return 0; @@ -5911,14 +5890,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st /** * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters * @vsi: the VSI to be updated + * @vsi_stats: the stats struct to be updated * @rings: rings to work on * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, - u16 count) +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, + struct rtnl_link_stats64 *vsi_stats, + struct ice_tx_ring **rings, u16 count) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { @@ -5942,15 +5922,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + struct rtnl_link_stats64 *vsi_stats; u64 pkts, bytes; int i; - /* reset netdev stats */ - vsi_stats->tx_packets = 0; - vsi_stats->tx_bytes = 0; - vsi_stats->rx_packets = 0; - vsi_stats->rx_bytes = 0; + vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); + if (!vsi_stats) + return; /* reset non-netdev (extended) stats */ vsi->tx_restart = 0; @@ -5962,7 +5940,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, + vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -5977,10 +5956,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update XDP Tx rings counters */ if (ice_is_xdp_ena_vsi(vsi)) - ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, vsi->num_xdp_txq); rcu_read_unlock(); + + vsi->net_stats.tx_packets = vsi_stats->tx_packets; + vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; + vsi->net_stats.rx_packets = vsi_stats->rx_packets; + vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + + kfree(vsi_stats); } /** @@ -6243,14 +6229,15 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) /** * ice_down - Shutdown the connection * @vsi: The VSI being stopped + * + * Caller of this function is expected to set the vsi->state ICE_DOWN bit */ int ice_down(struct ice_vsi *vsi) { int i, tx_err, rx_err, link_err = 0; - /* Caller of this function is expected to set the - * vsi->state ICE_DOWN bit - */ + WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); + if (vsi->netdev && vsi->type == ICE_VSI_PF) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); @@ -6517,7 +6504,6 @@ static void ice_vsi_release_all(struct ice_pf *pf) static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int i, err; ice_for_each_vsi(pf, i) { @@ -6535,12 +6521,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) } /* replay filters for the VSI */ - status = ice_replay_vsi(&pf->hw, vsi->idx); - if (status) { - dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", - ice_stat_str(status), vsi->idx, - ice_vsi_type_str(type)); - return -EIO; + err = ice_replay_vsi(&pf->hw, vsi->idx); + if (err) { + dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); + return err; } /* Re-map HW VSI number, using VSI handle that has been @@ -6603,7 +6588,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status ret; int err; if (test_bit(ICE_DOWN, pf->state)) @@ -6611,10 +6595,17 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); - ret = ice_init_all_ctrlq(hw); - if (ret) { - dev_err(dev, "control queues init failed %s\n", - ice_stat_str(ret)); + if (reset_type == ICE_RESET_EMPR) { + /* If an EMP reset has occurred, any previously pending flash + * update will have completed. We no longer know whether or + * not the NVM update EMP reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + } + + err = ice_init_all_ctrlq(hw); + if (err) { + dev_err(dev, "control queues init failed %d\n", err); goto err_init_ctrlq; } @@ -6628,10 +6619,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_load_pkg(NULL, pf); } - ret = ice_clear_pf_cfg(hw); - if (ret) { - dev_err(dev, "clear PF configuration failed %s\n", - ice_stat_str(ret)); + err = ice_clear_pf_cfg(hw); + if (err) { + dev_err(dev, "clear PF configuration failed %d\n", err); goto err_init_ctrlq; } @@ -6643,21 +6633,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_clear_pxe_mode(hw); - ret = ice_init_nvm(hw); - if (ret) { - dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); + err = ice_init_nvm(hw); + if (err) { + dev_err(dev, "ice_init_nvm failed %d\n", err); goto err_init_ctrlq; } - ret = ice_get_caps(hw); - if (ret) { - dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + err = ice_get_caps(hw); + if (err) { + dev_err(dev, "ice_get_caps failed %d\n", err); goto err_init_ctrlq; } - ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); - if (ret) { - dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); + err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (err) { + dev_err(dev, "set_mac_cfg failed %d\n", err); goto err_init_ctrlq; } @@ -6740,10 +6730,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_update_pf_netdev_link(pf); /* tell the firmware we are up */ - ret = ice_send_version(pf); - if (ret) { - dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", - ice_stat_str(ret)); + err = ice_send_version(pf); + if (err) { + dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", + err); goto err_vsi_rebuild; } @@ -6924,78 +6914,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err) } /** - * ice_stat_str - convert status err code to a string - * @stat_err: the status error code to convert - */ -const char *ice_stat_str(enum ice_status stat_err) -{ - switch (stat_err) { - case ICE_SUCCESS: - return "OK"; - case ICE_ERR_PARAM: - return "ICE_ERR_PARAM"; - case ICE_ERR_NOT_IMPL: - return "ICE_ERR_NOT_IMPL"; - case ICE_ERR_NOT_READY: - return "ICE_ERR_NOT_READY"; - case ICE_ERR_NOT_SUPPORTED: - return "ICE_ERR_NOT_SUPPORTED"; - case ICE_ERR_BAD_PTR: - return "ICE_ERR_BAD_PTR"; - case ICE_ERR_INVAL_SIZE: - return "ICE_ERR_INVAL_SIZE"; - case ICE_ERR_DEVICE_NOT_SUPPORTED: - return "ICE_ERR_DEVICE_NOT_SUPPORTED"; - case ICE_ERR_RESET_FAILED: - return "ICE_ERR_RESET_FAILED"; - case ICE_ERR_FW_API_VER: - return "ICE_ERR_FW_API_VER"; - case ICE_ERR_NO_MEMORY: - return "ICE_ERR_NO_MEMORY"; - case ICE_ERR_CFG: - return "ICE_ERR_CFG"; - case ICE_ERR_OUT_OF_RANGE: - return "ICE_ERR_OUT_OF_RANGE"; - case ICE_ERR_ALREADY_EXISTS: - return "ICE_ERR_ALREADY_EXISTS"; - case ICE_ERR_NVM: - return "ICE_ERR_NVM"; - case ICE_ERR_NVM_CHECKSUM: - return "ICE_ERR_NVM_CHECKSUM"; - case ICE_ERR_BUF_TOO_SHORT: - return "ICE_ERR_BUF_TOO_SHORT"; - case ICE_ERR_NVM_BLANK_MODE: - return "ICE_ERR_NVM_BLANK_MODE"; - case ICE_ERR_IN_USE: - return "ICE_ERR_IN_USE"; - case ICE_ERR_MAX_LIMIT: - return "ICE_ERR_MAX_LIMIT"; - case ICE_ERR_RESET_ONGOING: - return "ICE_ERR_RESET_ONGOING"; - case ICE_ERR_HW_TABLE: - return "ICE_ERR_HW_TABLE"; - case ICE_ERR_DOES_NOT_EXIST: - return "ICE_ERR_DOES_NOT_EXIST"; - case ICE_ERR_FW_DDP_MISMATCH: - return "ICE_ERR_FW_DDP_MISMATCH"; - case ICE_ERR_AQ_ERROR: - return "ICE_ERR_AQ_ERROR"; - case ICE_ERR_AQ_TIMEOUT: - return "ICE_ERR_AQ_TIMEOUT"; - case ICE_ERR_AQ_FULL: - return "ICE_ERR_AQ_FULL"; - case ICE_ERR_AQ_NO_WORK: - return "ICE_ERR_AQ_NO_WORK"; - case ICE_ERR_AQ_EMPTY: - return "ICE_ERR_AQ_EMPTY"; - case ICE_ERR_AQ_FW_CRITICAL: - return "ICE_ERR_AQ_FW_CRITICAL"; - } - - return "ICE_ERR_UNKNOWN"; -} - -/** * ice_set_rss_lut - Set RSS LUT * @vsi: Pointer to VSI structure * @lut: Lookup table @@ -7007,7 +6925,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7018,14 +6936,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_set_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7038,20 +6953,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7066,7 +6978,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7077,14 +6989,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_get_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7097,20 +7006,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7151,8 +7057,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; vsi_props = &vsi->info; @@ -7170,12 +7075,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", - bmode, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", + bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } /* Update sw flags for book keeping */ @@ -7207,7 +7110,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, struct ice_pf *pf = np->vsi->back; struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct ice_sw *pf_sw; int rem, v, err = 0; @@ -7241,14 +7143,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, /* Update the unicast switch filter rules for the corresponding * switch of the netdev */ - status = ice_update_sw_rule_bridge_mode(hw); - if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", - mode, ice_stat_str(status), + err = ice_update_sw_rule_bridge_mode(hw); + if (err) { + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", + mode, err, ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); - return -EIO; + return err; } pf_sw->bridge_mode = mode; @@ -8423,7 +8325,6 @@ int ice_open_internal(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; - enum ice_status status; int err; if (test_bit(ICE_NEEDS_RESTART, pf->state)) { @@ -8434,11 +8335,10 @@ int ice_open_internal(struct net_device *netdev) netif_carrier_off(netdev); pi = vsi->port_info; - status = ice_update_link_info(pi); - if (status) { - netdev_err(netdev, "Failed to get link info, error %s\n", - ice_stat_str(status)); - return -EIO; + err = ice_update_link_info(pi); + if (err) { + netdev_err(netdev, "Failed to get link info, error %d\n", err); + return err; } ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index fee37a5844cf..cd739a2c64e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -16,7 +16,7 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static enum ice_status +static int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) @@ -27,7 +27,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); @@ -60,21 +60,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { - enum ice_status status; u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; + int status; *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); - return ICE_ERR_PARAM; + return -EINVAL; } do { @@ -119,7 +119,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, * * Update the NVM using the admin queue commands (0x0703) */ -enum ice_status +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) @@ -131,7 +131,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); @@ -158,8 +158,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, * * Erase the NVM sector using the admin queue commands (0x0702) */ -enum ice_status -ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; @@ -184,12 +183,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ -static enum ice_status -ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) +static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); - enum ice_status status; __le16 data_local; + int status; /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and * Shadow RAM sector restrictions necessary when reading from the NVM. @@ -210,8 +208,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * * This function will request NVM ownership. */ -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->flash.blank_nvm_mode) return 0; @@ -318,18 +315,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ -static enum ice_status +static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { - enum ice_status status; + int status; u32 start; start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); - return ICE_ERR_PARAM; + return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_READ); @@ -353,11 +350,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ -static enum ice_status +static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -377,7 +374,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ -static enum ice_status +static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); @@ -392,11 +389,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u * * Read a word from the specified netlist bank. */ -static enum ice_status +static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -414,9 +411,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { @@ -438,13 +435,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - enum ice_status status; u16 pfa_len, pfa_ptr; u16 next_tlv; + int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { @@ -482,7 +479,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, *module_tlv_len = tlv_len; return 0; } - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) @@ -490,7 +487,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -501,12 +498,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, * * Reads the part number string from the NVM. */ -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; - enum ice_status status; u16 pba_word, pba_size; + int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, @@ -525,7 +521,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Subtract one to get PBA word count (PBA Size word is included in @@ -534,7 +530,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); - return ICE_ERR_PARAM; + return -EINVAL; } for (i = 0; i < pba_size; i++) { @@ -561,11 +557,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the NVM info structure. */ -static enum ice_status +static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; - enum ice_status status; + int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { @@ -601,7 +597,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ -enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } @@ -615,49 +611,73 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ -static enum ice_status +static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { - struct ice_orom_civd_info tmp; - enum ice_status status; + u8 *orom_data; + int status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. * The first 4 bytes must contain the ASCII characters "$CIV". * A simple modulo 256 sum of all of the bytes of the structure must * equal 0. + * + * The exact location is unknown and varies between images but is + * usually somewhere in the middle of the bank. We need to scan the + * Option ROM bank to locate it. + * + * It's significantly faster to read the entire Option ROM up front + * using the maximum page size, than to read each possible location + * with a separate firmware command. */ + orom_data = vzalloc(hw->flash.banks.orom_size); + if (!orom_data) + return -ENOMEM; + + status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0, + orom_data, hw->flash.banks.orom_size); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); + return status; + } + + /* Scan the memory buffer to locate the CIVD data section */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { + struct ice_orom_civd_info *tmp; u8 sum = 0, i; - status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, - offset, (u8 *)&tmp, sizeof(tmp)); - if (status) { - ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n"); - return status; - } + tmp = (struct ice_orom_civd_info *)&orom_data[offset]; /* Skip forward until we find a matching signature */ - if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0) + if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0) continue; + ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n", + offset); + /* Verify that the simple checksum is zero */ - for (i = 0; i < sizeof(tmp); i++) + for (i = 0; i < sizeof(*tmp); i++) /* cppcheck-suppress objectIndex */ - sum += ((u8 *)&tmp)[i]; + sum += ((u8 *)tmp)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); - return ICE_ERR_NVM; + goto err_invalid_checksum; } - *civd = tmp; + *civd = *tmp; + vfree(orom_data); return 0; } - return ICE_ERR_NVM; + ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); + +err_invalid_checksum: + vfree(orom_data); + return -EIO; } /** @@ -669,12 +689,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, * Read Option ROM version and security revision from the Option ROM flash * section. */ -static enum ice_status +static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; - enum ice_status status; u32 combo_ver; + int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { @@ -700,7 +720,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o * section of flash. Used to access version data for a pending update that has * not yet been activated. */ -enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } @@ -715,13 +735,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ -static enum ice_status +static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; - enum ice_status status; u16 *id_blk; + int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) @@ -730,7 +750,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); @@ -741,7 +761,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); @@ -751,7 +771,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); if (!id_blk) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, @@ -791,7 +811,7 @@ exit_error: * extract version data of a pending flash update in order to display the * version data. */ -enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } @@ -804,10 +824,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ -static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -819,7 +839,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); - if (status == ICE_ERR_AQ_ERROR && + if (status == -EIO && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); @@ -859,10 +879,9 @@ err_read_flat_nvm: * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) +static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -891,10 +910,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) +static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -917,12 +935,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) * structure for later use in order to calculate the correct offset to read * from the active module. */ -static enum ice_status -ice_determine_active_flash_banks(struct ice_hw *hw) +static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; - enum ice_status status; u16 ctrl_word; + int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { @@ -933,7 +950,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw) /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); - return ICE_ERR_CFG; + return -EIO; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) @@ -997,12 +1014,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw) * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ -enum ice_status ice_init_nvm(struct ice_hw *hw) +int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; - enum ice_status status; u32 fla, gens_stat; u8 sr_size; + int status; /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. @@ -1021,7 +1038,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); - return ICE_ERR_NVM_BLANK_MODE; + return -EIO; } status = ice_discover_flash_size(hw); @@ -1060,11 +1077,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) * * Verify NVM PFA checksum validity (0x0706) */ -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -1080,7 +1097,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) if (!status) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) - status = ICE_ERR_NVM_CHECKSUM; + status = -EIO; return status; } @@ -1088,22 +1105,35 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) /** * ice_nvm_write_activate * @hw: pointer to the HW struct - * @cmd_flags: NVM activate admin command bits (banks to be validated) + * @cmd_flags: flags for write activate command + * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) + * + * cmd_flags controls which banks to activate, and the preservation level to + * use when activating the NVM bank. + * + * On successful return of the firmware command, the response_flags variable + * is updated with the flags reported by firmware indicating certain status, + * such as whether EMP reset is enabled. */ -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; + int err; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); cmd->cmd_flags = cmd_flags; - return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!err && response_flags) + *response_flags = cmd->cmd_flags; + + return err; } /** @@ -1113,7 +1143,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) * Update empr (0x0709). This command allows SW to * request an EMPR to activate new FW. */ -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +int ice_aq_nvm_update_empr(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -1136,7 +1166,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) * as part of the NVM update as the first cmd in the flow. */ -enum ice_status +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd) { @@ -1144,7 +1174,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, struct ice_aq_desc desc; if (length != 0 && !data) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pkg_data; @@ -1173,17 +1203,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, * the TransferFlag is set to End or StartAndEnd. */ -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || !comp_response || !comp_response_code) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pass_comp_tbl; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index c6f05f43d593..856d1ad4398b 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -12,38 +12,34 @@ struct ice_orom_civd_info { __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ } __packed; -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type); -enum ice_status -ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); -enum ice_status -ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); -enum ice_status +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); -enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); -enum ice_status +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); +int ice_init_nvm(struct ice_hw *hw); +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); -enum ice_status +int ice_nvm_validate_checksum(struct ice_hw *hw); +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags); +int ice_aq_nvm_update_empr(struct ice_hw *hw); +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd); -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index bf7247c6f58e..0014a1002ed3 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) scaled_ppm = -scaled_ppm; } - while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) { + while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) { /* handle overflow by scaling down the scaled_ppm and * the divisor, losing some precision */ @@ -1205,10 +1205,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) static int ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: ice_set_tx_tstamp(pf, false); @@ -1540,19 +1536,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work) if (err) continue; - /* Check if the timestamp is valid */ - if (!(raw_tstamp & ICE_PTP_TS_VALID)) + /* Check if the timestamp is invalid or stale */ + if (!(raw_tstamp & ICE_PTP_TS_VALID) || + raw_tstamp == tx->tstamps[idx].cached_tstamp) continue; - /* clear the timestamp register, so that it won't show valid - * again when re-used. - */ - ice_clear_phy_tstamp(hw, tx->quad, phy_idx); - /* The timestamp is valid, so we'll go ahead and clear this * index and then send the timestamp up to the stack. */ spin_lock(&tx->lock); + tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index f71ad317d6c8..53c15fc9d996 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -55,15 +55,21 @@ struct ice_perout_channel { * struct ice_tx_tstamp - Tracking for a single Tx timestamp * @skb: pointer to the SKB for this timestamp request * @start: jiffies when the timestamp was first requested + * @cached_tstamp: last read timestamp * * This structure tracks a single timestamp request. The SKB pointer is * provided when initiating a request. The start time is used to ensure that * we discard old requests that were not fulfilled within a 2 second time * window. + * Timestamp values in the PHY are read only and do not get cleared except at + * hardware reset or when a new timestamp value is captured. The cached_tstamp + * field is used to detect the case where a new timestamp has not yet been + * captured, ensuring that we avoid sending stale timestamp data to the stack. */ struct ice_tx_tstamp { struct sk_buff *skb; unsigned long start; + u64 cached_tstamp; }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ce3c7bded4cb..7947223536e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -11,7 +11,7 @@ * This function inserts the root node of the scheduling tree topology * to the SW DB. */ -static enum ice_status +static int ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { @@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi, struct ice_hw *hw; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); if (!root) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&root->info, info, sizeof(*info)); @@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) * * This function sends a scheduling elements cmd (cmd_opc) */ -static enum ice_status +static int ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * * This function inserts a scheduler node to the SW DB. */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!parent) { ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); - return ICE_ERR_PARAM; + return -EINVAL; } /* query the current node information from FW before adding it @@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (hw->max_children[layer]) { /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), @@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } } @@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, * * Delete scheduling elements (0x040F) */ -static enum ice_status +static int ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) @@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * * This function remove nodes from HW */ -static enum ice_status +static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; - enum ice_status status; u16 buf_size; + int status; buf_size = struct_size(buf, teid, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) * * Get default scheduler topology (0x400) */ -static enum ice_status +static int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, * * Add scheduling elements (0x0401) */ -static enum ice_status +static int ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) @@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, * * Configure scheduling elements (0x0403) */ -static enum ice_status +static int ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -static enum ice_status +static int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, * * Suspend scheduling elements (0x0409) */ -static enum ice_status +static int ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * resume scheduling elements (0x040A) */ -static enum ice_status +static int ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * Query scheduler resource allocation (0x0412) */ -static enum ice_status +static int ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) @@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static enum ice_status +static int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; - enum ice_status status; __le32 *buf; + int status; buf_size = sizeof(*buf) * num_nodes; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_nodes; i++) buf[i] = cpu_to_le32(node_teids[i]); @@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->lan_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); @@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate RDMA queue contexts */ if (!vsi_ctx->rdma_q_ctx[tc]) { vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->rdma_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; return 0; } @@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); @@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * * RL profile function to add, query, or remove profile(s) */ -static enum ice_status +static int ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.rl_profile; @@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, * * Add RL profile (0x0410) */ -static enum ice_status +static int ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Remove RL profile (0x0415) */ -static enum ice_status +static int ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ -static enum ice_status +static int ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; - enum ice_status status; u16 num_profiles = 1; + int status; if (rl_info->prof_id_ref != 0) - return ICE_ERR_IN_USE; + return -EBUSY; /* Safe to remove profile ID */ buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) - return ICE_ERR_CFG; + return -EIO; /* Delete stale entry now */ list_del(&rl_info->list_entry); @@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &pi->rl_prof_list[ln], list_entry) { struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) * * This function add nodes to HW as well as to SW DB for a given layer */ -static enum ice_status +static int ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid) @@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; size_t buf_size; + int status = 0; u32 teid; buf_size = struct_size(buf, generic, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); - return ICE_ERR_CFG; + return -EIO; } *num_nodes_added = num_nodes; @@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * * Add nodes into specific HW layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, return 0; if (!parent || layer < pi->hw->sw_entry_point_layer) - return ICE_ERR_PARAM; + return -EINVAL; /* max children per node per layer */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) - return ICE_ERR_CFG; - return ICE_ERR_MAX_LIMIT; + return -EIO; + return -ENOSPC; } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, @@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, { u32 *first_teid_ptr = first_node_teid; u16 new_num_nodes = num_nodes; - enum ice_status status = 0; + int status = 0; *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { @@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (*num_nodes_added > num_nodes) { ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, *num_nodes_added); - status = ICE_ERR_CFG; + status = -EIO; break; } /* break if all the nodes are added successfully */ if (!status && (*num_nodes_added == num_nodes)) break; /* break if the error is not max limit */ - if (status && status != ICE_ERR_MAX_LIMIT) + if (status && status != -ENOSPC) break; /* Exceeded the max children */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; + int status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) * resources, default topology created by firmware and storing the information * in SW DB. */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; + int status; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Query the Default Topology from FW */ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Query default scheduling tree topology */ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, @@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", num_branches); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", num_elems); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1300,11 +1300,11 @@ err_init_port: * * query FW for allocated scheduler resources and store in HW struct */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; - enum ice_status status = 0; __le16 max_sibl; + int status = 0; u16 i; if (hw->layer_info) @@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); if (status) @@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) sizeof(*hw->layer_info)), GFP_KERNEL); if (!hw->layer_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto sched_query_out; } @@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ -static enum ice_status +static int ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; + int status; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ -static enum ice_status +static int ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { @@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (i == vsil) parent->vsi_handle = vsi_handle; @@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, * * This function adds a new VSI into scheduler tree */ -static enum ice_status +static int ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); @@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) * * This function updates the VSI child nodes based on the number of queues */ -static enum ice_status +static int ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { @@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; + int status = 0; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; if (owner == ICE_SCHED_NODE_OWNER_LAN) prev_numqs = vsi_ctx->sched.max_lanq[tc]; @@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); /* suspend the VSI if TC is not enabled */ @@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; @@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ -static enum ice_status +static int ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { - enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; + int status = -EINVAL; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); @@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (ice_sched_is_leaf_node_present(vsi_node)) { ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = ICE_ERR_IN_USE; + status = -EBUSY; goto exit_sched_rm_vsi_cfg; } while (j < vsi_node->num_children) { @@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg: * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } @@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) * This function clears the VSI and its RDMA children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); } @@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -static enum ice_status +static int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; - enum ice_status status = 0; u16 i, grps_movd = 0; struct ice_hw *hw; + int status = 0; u16 buf_len; hw = pi->hw; if (!parent || !num_items) - return ICE_ERR_PARAM; + return -EINVAL; /* Does parent have enough space */ if (parent->num_children + num_items > hw->max_children[parent->tx_sched_layer]) - return ICE_ERR_AQ_FULL; + return -ENOSPC; buf_len = struct_size(buf, teid, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto move_err_exit; } @@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, &grps_movd, NULL); if (status && grps_movd != 1) { - status = ICE_ERR_CFG; + status = -EIO; goto move_err_exit; } @@ -2251,28 +2251,28 @@ move_err_exit: * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ -static enum ice_status +static int ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; - enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) @@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; } move_nodes: @@ -2333,14 +2333,14 @@ move_nodes: * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; - enum ice_status status = 0; + int status = 0; list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, list_entry) { @@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) * This function removes the aggregator node and intermediate nodes if any * from the given TC */ -static enum ice_status +static int ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; @@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Can't remove the aggregator node if it has children */ if (ice_sched_is_agg_inuse(pi, agg_node)) - return ICE_ERR_IN_USE; + return -EBUSY; /* need to remove the whole subtree if aggregator node is the * only child. @@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) struct ice_sched_node *parent = agg_node->parent; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (parent->num_children > 1) break; @@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { - enum ice_status status = 0; + int status = 0; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc: * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, unsigned long *tc_bitmap) { @@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, * This function creates an aggregator node and intermediate nodes if required * for the given TC */ -static enum ice_status +static int ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; + int status = 0; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); /* Does Agg node already exist ? */ @@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) parent = tc_node; for (i = hw->sw_entry_point_layer; i <= aggl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, unsigned long *tc_bitmap) { struct ice_sched_agg_info *agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); @@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), GFP_KERNEL); if (!agg_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; agg_info->agg_id = agg_id; agg_info->agg_type = agg_type; @@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, * * This function configures aggregator node(s). */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); - status = ice_sched_cfg_agg(pi, agg_id, agg_type, - (unsigned long *)&bitmap); + status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap); if (!status) - status = ice_save_agg_tc_bitmap(pi, agg_id, - (unsigned long *)&bitmap); + status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap); mutex_unlock(&pi->sched_lock); return status; } @@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { @@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; struct ice_sched_agg_info *agg_info, *old_agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* If the VSI is already part of another aggregator then update * its VSI info list */ @@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_vsi_info), GFP_KERNEL); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; /* add VSI ID into the aggregator list */ agg_vsi_info->vsi_handle = vsi_handle; @@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; + int status; buf = *info; /* Parent TEID is reserved field in this aq call */ @@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, &elem_cfgd, NULL); if (status || elem_cfgd != num_elems) { ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); - return ICE_ERR_CFG; + return -EIO; } /* Config success case */ @@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, * * This function configures node element's BW allocation. */ -static enum ice_status +static int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); } else { - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, * * Move or associate VSI to a new or default aggregator node. */ -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * * This function converts the BW to profile structure format. */ -static enum ice_status +static int ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { - enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; + int status = -EINVAL; bool found = false; s32 encode = 0; s64 mv = 0; @@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, profile->rl_encode = cpu_to_le16(encode); status = 0; } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } return status; @@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 profile_type; + int status; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) return NULL; @@ -3249,7 +3247,7 @@ exit_add_rl_prof: * * This function configures node element's BW limit. */ -static enum ice_status +static int ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { @@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, * hence only one of them may be set for any given element */ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) - return ICE_ERR_CFG; + return -EIO; data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); break; @@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && (le16_to_cpu(data->eir_bw.bw_profile_idx) != ICE_SCHED_DFLT_RL_PROF_ID)) - return ICE_ERR_CFG; + return -EIO; /* EIR BW is set to default, disable it */ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; /* Okay to enable shared BW now */ @@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, break; default: /* Unknown rate limit type */ - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ -static enum ice_status +static int ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; - enum ice_status status = 0; + int status = 0; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) - return ICE_ERR_PARAM; + return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); - if (status && status != ICE_ERR_IN_USE) + if (status && status != -EBUSY) ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } - if (status == ICE_ERR_IN_USE) + if (status == -EBUSY) status = 0; return status; } @@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { - enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; u16 old_id; + int status; hw = pi->hw; switch (rl_type) { @@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); @@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * them may be set for any given element. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_eir_srl_excl(struct ice_port_info *pi, struct ice_sched_node *node, u8 layer_num, enum ice_rl_type rl_type, u32 bw) @@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; - enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; + int status = -EINVAL; rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); if (!rl_prof_info) @@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * It updates node's BW limit parameters like BW RL profile ID of type CIR, * EIR, or SRL. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { struct ice_sched_node *cfg_node = node; - enum ice_status status; + int status; struct ice_hw *hw; u8 layer_num; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(pi); layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; if (rl_type == ICE_SHARED_BW) { /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(node, layer_num); if (!cfg_node) - return ICE_ERR_CFG; + return -EIO; } /* EIR BW and Shared BW profiles are mutually exclusive and * hence only one of them may be set for any given element @@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) @@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the @@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) (node->parent && node->parent->num_children == 1))) return 0; - return ICE_ERR_CFG; + return -EIO; } /** @@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) * * Save BW information of queue type node for post replay use. */ -static enum ice_status +static int ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { @@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) * * This function sets BW limit of queue scheduling node. */ -static enum ice_status +static int ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; + int status = -EINVAL; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); if (!q_ctx) @@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (sel_layer >= pi->hw->num_tx_sched_layers) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit_q_bw_lmt; } status = ice_sched_validate_srl_node(node, sel_layer); @@ -3794,7 +3792,7 @@ exit_q_bw_lmt: * * This function configures BW limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { @@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * This function configures BW default limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { @@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -static enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; + int status = -EINVAL; if (!pi) return status; @@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc: * This function configures BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function configures default BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { @@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || bytes > ICE_MAX_BURST_SIZE_ALLOWED) - return ICE_ERR_PARAM; + return -EINVAL; if (ice_round_to_num(bytes, 64) <= ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { /* 64 byte granularity case */ @@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; - enum ice_status status = ICE_ERR_PARAM; + int status = -EINVAL; u16 bw_alloc; if (!node) @@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw) if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, @@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) * their node bandwidth information. This function needs to be called with * scheduler lock held. */ -static enum ice_status -ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) return 0; /* Not present in list - default Agg case */ @@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; /* Following also checks the presence of node in tree */ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!q_node) - return ICE_ERR_PARAM; + return -EINVAL; return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 6bddcbecaf5e..4f91577fed56 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -65,12 +65,12 @@ struct ice_sched_agg_info { }; /* FW AQ command calls */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); -enum ice_status ice_sched_init_port(struct ice_port_info *pi); -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +int ice_sched_init_port(struct ice_port_info *pi); +int ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_get_psm_clk_freq(struct ice_hw *hw); void ice_sched_clear_port(struct ice_port_info *pi); @@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); @@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); /* Tx scheduler rate limiter functions */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap); -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap); -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type); -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +int +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw); +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index aa11d07793d4..52c6bac41bf7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -18,7 +18,7 @@ * queue and asynchronously sending message via * ice_sq_send_cmd() function */ -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd) { @@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw, * sent per VF and marks the VF as malicious if it exceeds * the permissible number of messages to send. */ -static enum ice_status +static int ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, enum ice_mbx_snapshot_state *new_state, bool *is_malvf) @@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; if (vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* increment the message count in the VF array */ snap->mbx_vf.vf_cntr[vf_id]++; @@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) * Detect: If pending message count exceeds watermark traverse * the static snapshot and look for a malicious VF. */ -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_malvf) @@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_snap_buffer_data *snap_buf; struct ice_ctl_q_info *cq = &hw->mailboxq; enum ice_mbx_snapshot_state new_state; - enum ice_status status = 0; + int status = 0; if (!is_malvf || !mbx_data) - return ICE_ERR_BAD_PTR; + return -EINVAL; /* When entering the mailbox state machine assume that the VF * is not malicious until detected. @@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * interrupt is not less than the defined AVF message threshold. */ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* The watermark value should not be lesser than the threshold limit * set for the number of asynchronous messages a VF can send to mailbox @@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, */ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) - return ICE_ERR_PARAM; + return -EINVAL; new_state = ICE_MAL_VF_DETECT_STATE_INVALID; snap_buf = &snap->mbx_buf; @@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, default: new_state = ICE_MAL_VF_DETECT_STATE_INVALID; - status = ICE_ERR_CFG; + status = -EIO; } snap_buf->state = new_state; @@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * the input vf_id against the bitmap to verify if the VF has been * detected in any previous mailbox iterations. */ -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf) { if (!all_malvfs || !report_malvf) - return ICE_ERR_PARAM; + return -EINVAL; *report_malvf = false; if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; if (vf_id >= bitmap_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* If the vf_id is found in the bitmap set bit and boolean to true */ if (!test_and_set_bit(vf_id, all_malvfs)) @@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, * that the new VF loaded is not considered malicious before going * through the overflow detection algorithm. */ -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id) { if (!snap || !all_malvfs) - return ICE_ERR_PARAM; + return -EINVAL; if (bitmap_len < snap->mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* Ensure VF ID value is not larger than bitmap or VF counter length */ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ clear_bit(vf_id, all_malvfs); @@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, * called to ensure that the vf_count can be compared against the number * of VFs supported as defined in the functional capabilities of the device. */ -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) { struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; @@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) * the functional capabilities of the PF. */ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); if (!snap->mbx_vf.vf_cntr) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Setting the VF counter length to the number of allocated * VFs for given PF's functional capabilities. diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 161dc55d9e9c..68686a3fd7e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -14,24 +14,24 @@ #define ICE_ASYNC_VF_MSG_THRESHOLD 63 #ifdef CONFIG_PCI_IOV -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_mal_vf); -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id); -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); void ice_mbx_deinit_snapshot(struct ice_hw *hw); -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf); #else /* CONFIG_PCI_IOV */ -static inline enum ice_status +static inline int ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, u16 __always_unused vfid, u32 __always_unused v_opcode, u32 __always_unused v_retval, u8 __always_unused *msg, diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h deleted file mode 100644 index dbf66057371d..000000000000 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ - -#ifndef _ICE_STATUS_H_ -#define _ICE_STATUS_H_ - -/* Error Codes */ -enum ice_status { - ICE_SUCCESS = 0, - - /* Generic codes : Range -1..-49 */ - ICE_ERR_PARAM = -1, - ICE_ERR_NOT_IMPL = -2, - ICE_ERR_NOT_READY = -3, - ICE_ERR_NOT_SUPPORTED = -4, - ICE_ERR_BAD_PTR = -5, - ICE_ERR_INVAL_SIZE = -6, - ICE_ERR_DEVICE_NOT_SUPPORTED = -8, - ICE_ERR_RESET_FAILED = -9, - ICE_ERR_FW_API_VER = -10, - ICE_ERR_NO_MEMORY = -11, - ICE_ERR_CFG = -12, - ICE_ERR_OUT_OF_RANGE = -13, - ICE_ERR_ALREADY_EXISTS = -14, - ICE_ERR_DOES_NOT_EXIST = -15, - ICE_ERR_IN_USE = -16, - ICE_ERR_MAX_LIMIT = -17, - ICE_ERR_RESET_ONGOING = -18, - ICE_ERR_HW_TABLE = -19, - ICE_ERR_FW_DDP_MISMATCH = -20, - - ICE_ERR_NVM = -50, - ICE_ERR_NVM_CHECKSUM = -51, - ICE_ERR_BUF_TOO_SHORT = -52, - ICE_ERR_NVM_BLANK_MODE = -53, - ICE_ERR_AQ_ERROR = -100, - ICE_ERR_AQ_TIMEOUT = -101, - ICE_ERR_AQ_FULL = -102, - ICE_ERR_AQ_NO_WORK = -103, - ICE_ERR_AQ_EMPTY = -104, - ICE_ERR_AQ_FW_CRITICAL = -105, -}; - -#endif /* _ICE_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 793f4a9fc2cd..e477c2b1d5bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) +int ice_init_def_sw_recp(struct ice_hw *hw) { struct ice_sw_recipe *recps; u8 i; @@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*recps), GFP_KERNEL); if (!recps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; @@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) * in response buffer. The caller of this function to use *num_elems while * parsing the response buffer. */ -static enum ice_status +static int ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; @@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, * * Add a VSI context to the hardware (0x0210) */ -static enum ice_status +static int ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; res = &desc.params.add_update_free_vsi_res; @@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware (0x0213) */ -static enum ice_status +static int ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -static enum ice_status +static int ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw) * If this function gets called after reset for existing VSIs then update * with the new HW VSI number in the corresponding VSI handle list entry. */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_vsi_ctx *tmp_vsi_ctx; - enum ice_status status; + int status; if (vsi_handle >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_add_vsi(hw, vsi_ctx, cd); if (status) return status; @@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, sizeof(*tmp_vsi_ctx), GFP_KERNEL); if (!tmp_vsi_ctx) { ice_aq_free_vsi(hw, vsi_ctx, false, cd); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } *tmp_vsi_ctx = *vsi_ctx; ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); @@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware as well as from VSI handle list */ -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); if (!status) @@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware */ -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); return ice_aq_update_vsi(hw, vsi_ctx, cd); } @@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) else ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; - return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); + return ice_update_vsi(hw, vsi_handle, ctx, NULL); } /** @@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) * * allocates or free a VSI list resource */ -static enum ice_status +static int ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *vsi_ele; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_alloc_free_vsi_list_exit; } @@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit: * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_remove_sw_rules) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; return status; } @@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -static enum ice_status +static int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) @@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -static enum ice_status +static int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 buf_size; + int status; if (*num_recipes != ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.add_get_recipe; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); @@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw, * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -static enum ice_status +static int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -static enum ice_status +static int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.recipe_to_profile; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); @@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = kzalloc(buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << @@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * bookkeeping so that we have a current list of all the recipes that are * programmed in the firmware. */ -static enum ice_status +static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, bool *refresh_required) { @@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, struct ice_aqc_recipe_data_elem *tmp; u16 num_recps = ICE_MAX_NUM_RECIPES; struct ice_prot_lkup_ext *lkup_exts; - enum ice_status status; u8 fv_word_idx = 0; u16 sub_recps; + int status; bitmap_zero(result_bm, ICE_MAX_FV_WORDS); /* we need a buffer big enough to accommodate all the recipes */ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp[0].recipe_indx = rid; status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); @@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), GFP_KERNEL); if (!rg_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), GFP_KERNEL); if (!recps[rid].root_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, /* ice_get_initial_sw_cfg - Get initial port and default VSI data * @hw: pointer to the hardware structure */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +int ice_get_initial_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; - enum ice_status status; u16 req_desc = 0; u16 num_elems; + int status; u16 i; rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL); if (!rbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need @@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action */ -static enum ice_status +static int ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { @@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; - enum ice_status status; u16 lg_act_size; u16 rules_size; + int status; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: @@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); @@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * Call AQ command to add a new switch rule or update existing switch rule * using the given VSI list ID */ -static enum ice_status +static int ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; u16 rule_type; + int status; int i; if (!num_vsi) - return ICE_ERR_PARAM; + return -EINVAL; if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || @@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else - return ICE_ERR_PARAM; + return -EINVAL; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_vsi; i++) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit; } /* AQ call requires hw_vsi_id(s) */ @@ -1869,11 +1869,11 @@ exit: * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ -static enum ice_status +static int ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { - enum ice_status status; + int status; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * to the corresponding filter management list to track this switch rule * and VSI mapping */ -static enum ice_status +static int ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { @@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; struct ice_sw_recipe *recp; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_pkt_fwd_rule_exit; } @@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit: * Call AQ command to update a previously created switch rule with a * VSI list ID */ -static enum ice_status +static int ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); @@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) * * Updates unicast switch filter rules based on VEB/VEPA mode */ -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ + int status = 0; rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; @@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_add_update_vsi_list(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_entry, struct ice_fltr_info *cur_fltr, struct ice_fltr_info *new_fltr) { - enum ice_status status = 0; u16 vsi_list_id = 0; + int status = 0; if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->fltr_act == ICE_FWD_TO_Q || new_fltr->fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->fltr_act == ICE_FWD_TO_VSI || cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle; @@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, vsi_list_id); if (!m_entry->vsi_list_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list @@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, enum ice_adminq_opc opcode; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, * * Adds or updates the rule lists for a given recipe */ -static enum ice_status +static int ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { @@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, * The VSI list should be emptied before this function is called to remove the * VSI list. */ -static enum ice_status +static int ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; + int status; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); @@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_list) { enum ice_sw_lkup_type lkup_type; - enum ice_status status = 0; u16 vsi_list_id; + int status = 0; if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = fm_list->fltr_info.lkup_type; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; @@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * @recp_id: recipe ID for which the rule needs to removed * @f_entry: rule entry containing filter information */ -static enum ice_status +static int ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *list_elem; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; bool remove_rule = false; u16 vsi_handle; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, mutex_lock(rule_lock); list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); if (!list_elem) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { remove_rule = true; } else if (!list_elem->vsi_list_info) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } else if (list_elem->vsi_list_info->ref_cnt > 1) { /* a ref_cnt > 1 indicates that the vsi_list is being @@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto exit; } @@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) * check for duplicates in this case, removing duplicates from a given * list should be taken care of in the caller of this function. */ -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; @@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) u16 total_elem_left, s_rule_size; struct ice_switch_info *sw; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; u16 num_unicast = 0; + int status = 0; u8 elem_sent; if (!m_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; s_rule = NULL; sw = hw->switch_info; @@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) m_list_itr->fltr_info.flag = ICE_FLTR_TX; vsi_handle = m_list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; m_list_itr->fltr_info.src = hw_vsi_id; if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || is_zero_ether_addr(add)) - return ICE_ERR_PARAM; + return -EINVAL; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ mutex_lock(rule_lock); if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, &m_list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; } mutex_unlock(rule_lock); num_unicast++; @@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } @@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } fm_entry->fltr_info = *f_info; @@ -2737,7 +2737,7 @@ ice_add_mac_exit: * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information */ -static enum ice_status +static int ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; @@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) enum ice_sw_lkup_type lkup_type; u16 vsi_list_id = 0, vsi_handle; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) - return ICE_ERR_PARAM; + return -EINVAL; if (new_fltr->src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; new_fltr->src = new_fltr->fwd_id.hw_vsi_id; lkup_type = new_fltr->lkup_type; @@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } /* reuse VSI list for new rule and increment ref_cnt */ @@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); - status = ICE_ERR_CFG; + status = -EIO; goto exit; } @@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* A rule already exists with the new VSI being added */ if (cur_handle == vsi_handle) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto exit; } @@ -2890,16 +2890,16 @@ exit: * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(v_list_itr, v_list, list_entry) { if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); if (v_list_itr->status) @@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) * the filter list with the necessary fields (including flags to * indicate Tx or Rx rules). */ -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(em_list_itr, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr); @@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) * @hw: pointer to the hardware structure * @em_list: list of ethertype or ethertype MAC entries */ -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr, *tmp; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_remove_rule_internal(hw, l_type, em_list_itr); @@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) * add filter rule to set/unset given VSI as default VSI for the switch * (represented by swid) */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; - enum ice_status status; u16 s_rule_size; u16 hw_vsi_id; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : @@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memset(&f_info, 0, sizeof(f_info)); @@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, * This function removes either a MAC filter rule or a specific VSI from a * VSI list for a multicast MAC address. * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. + * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should + * be aware that this call will only work if all the entries passed into m_list + * were added previously. It will not attempt to do a partial remove of entries + * that were found. */ -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { @@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_itr->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); @@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, &list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } mutex_unlock(rule_lock); } @@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status -ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_VLAN, v_list_itr); @@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) * fltr_info.fwd_id fields. These are set such that later logic can * extract which VSI to remove the fltr from, and pass on that information. */ -static enum ice_status +static int ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *vsi_list_head, struct ice_fltr_info *fi) @@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, */ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp->fltr_info = *fi; @@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, * Note that this means all entries in vsi_list_head must be explicitly * deallocated by the caller when done with list. */ -static enum ice_status +static int ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; + int status = 0; /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) @@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) * @recp_id: recipe ID for which the rule needs to removed * @v_list: list of promisc entries */ -static enum ice_status -ice_remove_promisc(struct ice_hw *hw, u8 recp_id, - struct list_head *v_list) +static int +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; @@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id, * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous */ -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { @@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct ice_fltr_mgmt_list_entry *itr; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; @@ -3444,20 +3439,20 @@ free_fltr_list: * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous */ -enum ice_status +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; struct ice_fltr_list_entry f_list_entry; struct ice_fltr_info new_fltr; - enum ice_status status = 0; bool is_tx_fltr; + int status = 0; u16 hw_vsi_id; int pkt_type; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); memset(&new_fltr, 0, sizeof(new_fltr)); @@ -3558,7 +3553,7 @@ set_promisc_exit: * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc) { @@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct list_head vsi_list_head; struct list_head *vlan_head; struct mutex *vlan_lock; /* Lock to protect filter rule list */ - enum ice_status status; u16 vlan_id; + int status; INIT_LIST_HEAD(&vsi_list_head); vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; @@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, struct list_head *rule_head; struct ice_fltr_list_entry *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status; + int status; INIT_LIST_HEAD(&remove_list_head); rule_lock = &sw->recp_list[lkup].filt_rule_lock; @@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) * @num_items: number of entries requested for FD resource type * @counter_id: counter index returned by AQ call */ -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Allocate resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3719,19 +3714,19 @@ exit: * @num_items: number of entries to be freed for FD resource type * @counter_id: counter ID resource which needs to be freed */ -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Free resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3796,10 +3791,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match + * @tun_type: type of recipe tunnel * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +static u16 +ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -3860,8 +3858,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) } /* If for "i"th recipe the found was never set to false * then it means we found our match + * Also tun type of recipe needs to be checked */ - if (found) + if (found && recp[i].tun_type == tun_type) return i; /* Return the recipe ID */ } } @@ -3937,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, * and start grouping them in 4-word groups. Each group makes up one * recipe. */ -static enum ice_status +static int ice_create_first_fit_recp_def(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, struct list_head *rg_list, @@ -3961,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, sizeof(*entry), GFP_KERNEL); if (!entry) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; list_add(&entry->l_entry, rg_list); grp = &entry->r_group; (*recp_cnt)++; @@ -3987,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ -static enum ice_status +static int ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, struct list_head *rg_list) { @@ -4029,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, * invalid pair */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; } } @@ -4111,7 +4110,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, * @rm: recipe management list entry * @profiles: bitmap of profiles that will be associated. */ -static enum ice_status +static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { @@ -4119,11 +4118,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *buf; struct ice_recp_grp_entry *entry; - enum ice_status status; u16 free_res_idx; u16 recipe_count; u8 chain_idx; u8 recps = 0; + int status; /* When more than one recipe are required, another recipe is needed to * chain them together. Matching a tunnel metadata ID takes up one of @@ -4139,22 +4138,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (rm->n_grp_count > 1) { if (rm->n_grp_count > free_res_idx) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; rm->n_grp_count++; } if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), GFP_KERNEL); if (!buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_mem; } @@ -4214,7 +4213,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, */ if (chain_idx >= ICE_MAX_FV_WORDS) { ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = ICE_ERR_MAX_LIMIT; + status = -ENOSPC; goto err_unroll; } @@ -4245,7 +4244,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[0].recipe_bitmap, rm->r_bitmap, sizeof(buf[0].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } /* Applicable only for ROOT_RECIPE, set the fwd_priority for @@ -4281,7 +4280,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, sizeof(*last_chain_entry), GFP_KERNEL); if (!last_chain_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } last_chain_entry->rid = rid; @@ -4316,7 +4315,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, sizeof(buf[recps].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } buf[recps].content.act_ctrl_fwd_priority = rm->priority; @@ -4350,7 +4349,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } if (!idx_found) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto err_unroll; } @@ -4403,12 +4402,12 @@ err_mem: * @rm: recipe management list entry * @lkup_exts: lookup elements */ -static enum ice_status +static int ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_prot_lkup_ext *lkup_exts) { - enum ice_status status; u8 recp_count = 0; + int status; rm->n_grp_count = 0; @@ -4438,21 +4437,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, * @bm: bitmap of field vectors to consider * @fv_list: pointer to a list that holds the returned field vectors */ -static enum ice_status +static int ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, unsigned long *bm, struct list_head *fv_list) { - enum ice_status status; u8 *prot_ids; + int status; u16 i; prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); if (!prot_ids) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < lkups_cnt; i++) if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { - status = ICE_ERR_CFG; + status = -EIO; goto free_mem; } @@ -4489,7 +4488,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) * @rinfo: other information regarding the rule e.g. priority and action info * @lkup_exts: lookup word structure */ -static enum ice_status +static int ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { @@ -4506,7 +4505,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; lkup_exts->field_mask[word] = mask; } else { - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } @@ -4557,7 +4556,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, * @rinfo: other information regarding the rule e.g. priority and action info * @rid: return the recipe ID of the recipe created */ -static enum ice_status +static int ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) { @@ -4568,16 +4567,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *fvit; struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; - enum ice_status status = 0; struct ice_sw_recipe *rm; + int status = 0; u8 i; if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); if (!lkup_exts) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Determine the number of words to be matched and if it exceeds a * recipe's restrictions @@ -4586,20 +4585,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } count = ice_fill_valid_words(&lkups[i], lkup_exts); if (!count) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } } rm = kzalloc(sizeof(*rm), GFP_KERNEL); if (!rm) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_free_lkup_exts; } @@ -4651,11 +4650,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts); + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; + rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ status = ice_add_sw_recipe(hw, rm, profiles); if (status) @@ -4844,7 +4844,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt_len: packet length of dummy packet * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, const u8 *dummy_pkt, u16 pkt_len, @@ -4878,7 +4878,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /* this should never happen in a correct calling sequence */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; switch (lkups[i].type) { case ICE_MAC_OFOS: @@ -4915,12 +4915,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, len = sizeof(struct ice_udp_tnl_hdr); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* the length should be a word multiple */ if (len % ICE_BYTES_PER_WORD) - return ICE_ERR_CFG; + return -EIO; /* We have the offset to the header start, the length, the * caller's header values and mask. Use this information to @@ -4950,7 +4950,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { @@ -4958,11 +4958,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, switch (tun_type) { case ICE_SW_TUN_VXLAN: + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) + return -EIO; + break; case ICE_SW_TUN_GENEVE: - if (!ice_get_open_tunnel_port(hw, &open_port)) - return ICE_ERR_CFG; + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) + return -EIO; break; - default: /* Nothing needs to be done for this tunnel type */ return 0; @@ -4982,7 +4984,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, } } - return ICE_ERR_CFG; + return -EIO; } /** @@ -5047,25 +5049,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_adv_add_update_vsi_list(struct ice_hw *hw, struct ice_adv_fltr_mgmt_list_entry *m_entry, struct ice_adv_rule_info *cur_fltr, struct ice_adv_rule_info *new_fltr) { - enum ice_status status; u16 vsi_list_id = 0; + int status; if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -5078,7 +5080,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->sw_act.fwd_id.hw_vsi_id == new_fltr->sw_act.fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; @@ -5111,7 +5113,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->sw_act.vsi_handle; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -5153,7 +5155,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, * rinfo describes other information related to this rule such as forwarding * IDs, priority of this rule, etc. */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry) @@ -5164,10 +5166,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_aqc_sw_rules_elem *s_rule = NULL; struct list_head *rule_head; struct ice_switch_info *sw; - enum ice_status status; const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; + int status; u8 q_rgn; /* Initialize profile to result index bitmap */ @@ -5177,7 +5179,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; /* get # of words we need to match */ word_cnt = 0; @@ -5191,13 +5193,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) - return ICE_ERR_PARAM; + return -EINVAL; /* make sure that we can locate a dummy packet */ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, &pkt_offsets); if (!pkt) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_ice_add_adv_rule; } @@ -5205,11 +5207,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) - return ICE_ERR_CFG; + return -EIO; vsi_handle = rinfo->sw_act.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) rinfo->sw_act.fwd_id.hw_vsi_id = @@ -5243,7 +5245,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (!rinfo->flags_info.act_valid) { act |= ICE_SINGLE_ACT_LAN_ENABLE; act |= ICE_SINGLE_ACT_LB_ENABLE; @@ -5277,7 +5279,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ICE_SINGLE_ACT_VALID_BIT; break; default: - status = ICE_ERR_CFG; + status = -EIO; goto err_ice_add_adv_rule; } @@ -5322,14 +5324,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, sizeof(struct ice_adv_fltr_mgmt_list_entry), GFP_KERNEL); if (!adv_fltr) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, lkups_cnt * sizeof(*lkups), GFP_KERNEL); if (!adv_fltr->lkups) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } @@ -5372,12 +5374,12 @@ err_ice_add_adv_rule: * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. * It is required to pass valid VSI handle. */ -static enum ice_status +static int ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, struct list_head *list_head) { struct ice_fltr_mgmt_list_entry *itr; - enum ice_status status = 0; + int status = 0; u16 hw_vsi_id; if (list_empty(list_head)) @@ -5426,22 +5428,22 @@ end: * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_adv_fltr_mgmt_list_entry *fm_list) { struct ice_vsi_list_map_info *vsi_list_info; enum ice_sw_lkup_type lkup_type; - enum ice_status status; u16 vsi_list_id; + int status; if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = ICE_SW_LKUP_LAST; vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; @@ -5461,7 +5463,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -5525,27 +5527,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * header. rinfo describes other information related to this rule such as * forwarding IDs, priority of this rule, etc. */ -static enum ice_status +static int ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo) { struct ice_adv_fltr_mgmt_list_entry *list_elem; struct ice_prot_lkup_ext lkup_exts; - enum ice_status status = 0; bool remove_rule = false; struct mutex *rule_lock; /* Lock to protect filter rule list */ u16 i, rid, vsi_handle; + int status = 0; memset(&lkup_exts, 0, sizeof(lkup_exts)); for (i = 0; i < lkups_cnt; i++) { u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) - return ICE_ERR_CFG; + return -EIO; count = ice_fill_valid_words(&lkups[i], &lkup_exts); if (!count) - return ICE_ERR_CFG; + return -EIO; } /* Create any special protocol/offset pairs, such as looking at tunnel @@ -5555,10 +5557,10 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) return status; - rid = ice_find_recp(hw, &lkup_exts); + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); @@ -5590,7 +5592,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); @@ -5598,7 +5600,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; mutex_lock(rule_lock); @@ -5623,7 +5625,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * the remove_entry parameter. This function will remove rule for a given * vsi_handle with a given rule_id which is passed as parameter in remove_entry */ -enum ice_status +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry) { @@ -5634,7 +5636,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, sw = hw->switch_info; if (!sw->recp_list[remove_entry->rid].recp_created) - return ICE_ERR_PARAM; + return -EINVAL; list_head = &sw->recp_list[remove_entry->rid].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { if (list_itr->rule_info.fltr_rule_id == @@ -5646,7 +5648,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } } /* either list is empty or unable to find rule */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5656,10 +5658,10 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, * * Replays filters for requested VSI via vsi_handle. */ -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; + int status = 0; u8 i; for (i = 0; i < ICE_SW_LKUP_LAST; i++) { diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index d8a38906f16f..d8334beaaa8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -256,7 +256,7 @@ struct ice_vsi_list_map_info { struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_status status; + int status; struct ice_fltr_info fltr_info; }; @@ -302,75 +302,69 @@ enum ice_promisc_flags { }; /* VSI related commands */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); void ice_clear_all_vsi_ctx(struct ice_hw *hw); /* Switch config */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); +int ice_get_initial_sw_cfg(struct ice_hw *hw); -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id); -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry); -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); -int -ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw); +int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); +int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); -enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); /* Promisc/defport setup for VSIs */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); -enum ice_status +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); -enum ice_status -ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry); -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); +int ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e5d23feb6701..e8aab664270a 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; } -static enum ice_protocol_type -ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) { - if (inner) { - switch (ip_proto) { - case IPPROTO_UDP: - return ICE_UDP_ILOS; - } - } else { - switch (ip_proto) { - case IPPROTO_TCP: - return ICE_TCP_IL; - case IPPROTO_UDP: - return ICE_UDP_OF; - } + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_ILOS; } return 0; @@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { - list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && + hdr->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_OF; list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; i++; @@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; - list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); + list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; @@ -405,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_hw *hw = &vsi->back->hw; struct ice_adv_lkup_elem *list; u32 flags = fltr->flags; - enum ice_status status; int lkups_cnt; - int ret = 0; + int ret; int i; if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { @@ -456,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; - status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; - } else if (status) { + } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); - ret = -EIO; goto exit; } @@ -802,7 +793,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, headers->l3_mask.ttl = match.mask->ttl; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && + fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { struct flow_match_ports match; flow_rule_match_enc_ports(rule, &match); @@ -1168,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_rem.vsi_handle = fltr->dest_id; err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); if (err) { - if (err == ICE_ERR_DOES_NOT_EXIST) { + if (err == -ENOENT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); return -ENOENT; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index bc3ba19dc88f..3987a0dd0e11 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -3,8 +3,9 @@ /* The driver transmit and receive code */ -#include <linux/prefetch.h> #include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/dsfield.h> #include <net/xdp.h> @@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; + /* get the bql data ready */ + if (!ice_ring_is_xdp(tx_ring)) + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); + tx_buf = &tx_ring->tx_buf[i]; tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) if (!eop_desc) break; + /* follow the guidelines of other drivers */ + prefetchw(&tx_buf->skb->users); + smp_rmb(); /* prevent any other reads prior to eop_desc */ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); @@ -304,8 +312,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); - netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, - total_bytes); + if (ice_ring_is_xdp(tx_ring)) + return !!budget; + + netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && @@ -314,11 +324,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->q_index) && + if (netif_tx_queue_stopped(txring_txq(tx_ring)) && !test_bit(ICE_VSI_DOWN, vsi->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->q_index); + netif_tx_wake_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; } } @@ -1517,7 +1525,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) */ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) { - netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); + netif_tx_stop_queue(txring_txq(tx_ring)); /* Memory barrier before checking head and tail */ smp_mb(); @@ -1525,8 +1533,8 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) if (likely(ICE_DESC_UNUSED(tx_ring) < size)) return -EBUSY; - /* A reprieve! - use start_subqueue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_tx_start_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; return 0; } @@ -1568,6 +1576,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, struct sk_buff *skb; skb_frag_t *frag; dma_addr_t dma; + bool kick; td_tag = off->td_l2tag1; td_cmd = off->td_cmd; @@ -1649,9 +1658,6 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, tx_buf = &tx_ring->tx_buf[i]; } - /* record bytecount for BQL */ - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - /* record SW timestamp if HW timestamp is not available */ skb_tx_timestamp(first->skb); @@ -1680,7 +1686,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, ice_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, + netdev_xmit_more()); + if (kick) + /* notify HW of packet */ writel(i, tx_ring->tail); return; @@ -2265,6 +2274,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) return NETDEV_TX_BUSY; } + /* prefetch for bql data which is infrequently used */ + netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); + offload.tx_ring = tx_ring; /* record the location of the first descriptor for this packet */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9e0c2923c62e..58b1907e3ff1 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -7,7 +7,6 @@ #define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_DWORD 4 -#include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" @@ -279,6 +278,10 @@ struct ice_hw_common_caps { #define ICE_NVM_PENDING_NETLIST BIT(2) bool nvm_unified_update; #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) + /* PCIe reset avoidance */ + bool pcie_reset_avoidance; + /* Post update reset restriction */ + bool reset_restrict_support; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -873,8 +876,6 @@ struct ice_hw { u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; - enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; @@ -919,6 +920,7 @@ struct ice_hw { struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; struct ice_mbx_snapshot mbx_snapshot; + DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u16 io_expander_handle; }; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index eee180d8c024..d64df81d4893 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf { u32 flow_id; }; -static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_GTPU_EH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -struct virtchnl_fdir_pattern_match_item { - enum virtchnl_proto_hdr_type *list; - u64 input_set; - u64 *meta; -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, - {vc_pattern_ether, 0, NULL}, - {vc_pattern_ipv4_gtpu, 0, NULL}, - {vc_pattern_ipv4_gtpu_eh, 0, NULL}, - {vc_pattern_ipv4_l2tpv3, 0, NULL}, - {vc_pattern_ipv6_l2tpv3, 0, NULL}, - {vc_pattern_ipv4_esp, 0, NULL}, - {vc_pattern_ipv6_esp, 0, NULL}, - {vc_pattern_ipv4_ah, 0, NULL}, - {vc_pattern_ipv6_ah, 0, NULL}, - {vc_pattern_ipv4_nat_t_esp, 0, NULL}, - {vc_pattern_ipv6_nat_t_esp, 0, NULL}, - {vc_pattern_ipv4_pfcp, 0, NULL}, - {vc_pattern_ipv6_pfcp, 0, NULL}, -}; - struct virtchnl_fdir_inset_map { enum virtchnl_proto_hdr_field field; enum ice_flow_field fld; @@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_flow_seg_info *old_seg; struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *vf_prof; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); - ret = ice_status_to_errno(status); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", flow, vf->vf_id); goto err_prof; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", @@ -911,83 +716,6 @@ err_exit: } /** - * ice_vc_fdir_match_pattern - * @fltr: virtual channel add cmd buffer - * @type: virtual channel protocol filter header type - * - * Matching the header type by comparing fltr and type's value. - * - * Return: true on success, and false on error. - */ -static bool -ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, - enum virtchnl_proto_hdr_type *type) -{ - struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; - int i = 0; - - while ((i < proto->count) && - (*type == proto->proto_hdr[i].type) && - (*type != VIRTCHNL_PROTO_HDR_NONE)) { - type++; - i++; - } - - return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); -} - -/** - * ice_vc_fdir_get_pattern - get while list pattern - * @vf: pointer to the VF info - * @len: filter list length - * - * Return: pointer to allowed filter list - */ -static const struct virtchnl_fdir_pattern_match_item * -ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) -{ - const struct virtchnl_fdir_pattern_match_item *item; - struct ice_pf *pf = vf->pf; - struct ice_hw *hw; - - hw = &pf->hw; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - item = vc_fdir_pattern_comms; - *len = ARRAY_SIZE(vc_fdir_pattern_comms); - } else { - item = vc_fdir_pattern_os; - *len = ARRAY_SIZE(vc_fdir_pattern_os); - } - - return item; -} - -/** - * ice_vc_fdir_search_pattern - * @vf: pointer to the VF info - * @fltr: virtual channel add cmd buffer - * - * Search for matched pattern from supported pattern list - * - * Return: 0 on success, and other on error. - */ -static int -ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) -{ - const struct virtchnl_fdir_pattern_match_item *pattern; - int len, i; - - pattern = ice_vc_fdir_get_pattern(vf, &len); - - for (i = 0; i < len; i++) - if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) - return 0; - - return -EINVAL; -} - -/** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info * @fltr: virtual channel add cmd buffer @@ -1299,11 +1027,11 @@ static int ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf) { + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - ret = ice_vc_fdir_search_pattern(vf, fltr); - if (ret) - return ret; + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, struct ice_fdir_fltr *input = &conf->input; struct ice_vsi *vsi, *ctrl_vsi; struct ice_fltr_desc desc; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - ret = ice_status_to_errno(status); + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); if (ret) { dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", vf->vf_id, input->flow_type); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 217ff5e9a6f1..61b2db3342ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -9,6 +9,7 @@ #include "ice_flow.h" #include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" +#include "ice_flex_pipe.h" #define FIELD_SELECTOR(proto_hdr_field) \ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) @@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type { u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ }; -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = { - {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, - {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, - {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, - {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, -}; - -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = { +static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, @@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type { }; static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = { - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - ICE_FLOW_HASH_IPV4}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - ICE_FLOW_HASH_IPV6}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - ICE_FLOW_HASH_TCP_PORT}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - ICE_FLOW_HASH_UDP_PORT}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - ICE_FLOW_HASH_SCTP_PORT}, -}; - -static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { +ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), @@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** - * ice_err_to_virt_err - translate errors for VF return code - * @ice_err: error return code - */ -static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) -{ - switch (ice_err) { - case ICE_SUCCESS: - return VIRTCHNL_STATUS_SUCCESS; - case ICE_ERR_BAD_PTR: - case ICE_ERR_INVAL_SIZE: - case ICE_ERR_DEVICE_NOT_SUPPORTED: - case ICE_ERR_PARAM: - case ICE_ERR_CFG: - return VIRTCHNL_STATUS_ERR_PARAM; - case ICE_ERR_NO_MEMORY: - return VIRTCHNL_STATUS_ERR_NO_MEMORY; - case ICE_ERR_NOT_READY: - case ICE_ERR_RESET_FAILED: - case ICE_ERR_FW_API_VER: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_FULL: - case ICE_ERR_AQ_NO_WORK: - case ICE_ERR_AQ_EMPTY: - return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - default: - return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - } -} - -/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) struct ice_hw *hw = &vsi->back->hw; struct ice_aqc_vsi_props *info; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) { struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi *vsi = ice_get_vf_vsi(vf); - enum ice_status status; u8 broadcast[ETH_ALEN]; + int status; if (ice_is_eswitch_mode_switchdev(vf->pf)) return 0; @@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; } vf->num_mac++; @@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n", + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", &vf->hw_lan_addr.addr[0], vf->vf_id, - ice_stat_str(status)); - return ice_status_to_errno(status); + status); + return status; } vf->num_mac++; @@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf) ice_flush(hw); } -/** - * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s) - * @vf: pointer to the VF info - * @vsi: the VSI being configured - * @promisc_m: mask of promiscuous config bits - * @rm_promisc: promisc flag request from the VF to remove or add filter - * - * This function configures VF VSI promiscuous mode, based on the VF requests, - * for Unicast, Multicast and VLAN - */ -static enum ice_status -ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, - bool rm_promisc) +static int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) { - struct ice_pf *pf = vf->pf; - enum ice_status status = 0; - struct ice_hw *hw; + struct ice_hw *hw = &vsi->back->hw; + int status; - hw = &pf->hw; - if (vsi->num_vlan) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - rm_promisc); - } else if (vf->port_vlan_info) { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - } else { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); + if (vf->port_vlan_info) + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -EEXIST) { + dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; } - return status; + return 0; +} + +static int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + struct ice_hw *hw = &vsi->back->hw; + int status; + + if (vf->port_vlan_info) + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -ENOENT) { + dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; } static void ice_vf_clear_counters(struct ice_vf *vf) @@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; if (!vsi->agg_node) return; @@ -1617,6 +1502,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vc_set_default_allowlist(vf); ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VFs since it should be * setup only when VF creates its first FDIR rule. */ @@ -1742,11 +1628,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) else promisc_m = ICE_UCAST_PROMISC_BITS; - if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) + if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) dev_err(dev, "disabling promiscuous mode failed\n"); } ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VF since it should be setup * only when VF creates its first FDIR rule. */ @@ -1844,7 +1731,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; u8 broadcast[ETH_ALEN]; - enum ice_status status; struct ice_vsi *vsi; struct device *dev; int err; @@ -1864,11 +1750,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) } eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n", - vf->vf_id, ice_stat_str(status)); - err = ice_status_to_errno(status); + err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", + vf->vf_id, err); goto release_vsi; } @@ -2021,6 +1906,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (ret) goto err_unroll_sriov; + /* rearm global interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + return 0; err_unroll_sriov: @@ -2110,7 +1999,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int err; err = ice_check_sriov_allowed(pf); @@ -2130,9 +2018,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) return -EBUSY; } - status = ice_mbx_init_snapshot(&pf->hw, num_vfs); - if (status) - return ice_status_to_errno(status); + err = ice_mbx_init_snapshot(&pf->hw, num_vfs); + if (err) + return err; err = ice_pci_sriov_ena(pf, num_vfs); if (err) { @@ -2266,9 +2154,9 @@ int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { - enum ice_status aq_ret; struct device *dev; struct ice_pf *pf; + int aq_ret; if (!vf) return -EINVAL; @@ -2300,8 +2188,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n", - vf->vf_id, ice_stat_str(aq_ret), + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", + vf->vf_id, aq_ret, ice_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO; } @@ -2550,6 +2438,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len) } /** + * ice_vc_validate_pattern + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * + * validate the pattern is supported or not. + * + * Return: true on success, false on error. + */ +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) +{ + bool is_ipv4 = false; + bool is_ipv6 = false; + bool is_udp = false; + u16 ptype = -1; + int i = 0; + + while (i < proto->count && + proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { + switch (proto->proto_hdr[i].type) { + case VIRTCHNL_PROTO_HDR_ETH: + ptype = ICE_PTYPE_MAC_PAY; + break; + case VIRTCHNL_PROTO_HDR_IPV4: + ptype = ICE_PTYPE_IPV4_PAY; + is_ipv4 = true; + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ptype = ICE_PTYPE_IPV6_PAY; + is_ipv6 = true; + break; + case VIRTCHNL_PROTO_HDR_UDP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_UDP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_UDP_PAY; + is_udp = true; + break; + case VIRTCHNL_PROTO_HDR_TCP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_TCP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_TCP_PAY; + break; + case VIRTCHNL_PROTO_HDR_SCTP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_SCTP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_SCTP_PAY; + break; + case VIRTCHNL_PROTO_HDR_GTPU_IP: + case VIRTCHNL_PROTO_HDR_GTPU_EH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_GTPU; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_GTPU; + goto out; + case VIRTCHNL_PROTO_HDR_L2TPV3: + if (is_ipv4) + ptype = ICE_MAC_IPV4_L2TPV3; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_L2TPV3; + goto out; + case VIRTCHNL_PROTO_HDR_ESP: + if (is_ipv4) + ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : + ICE_MAC_IPV4_ESP; + else if (is_ipv6) + ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : + ICE_MAC_IPV6_ESP; + goto out; + case VIRTCHNL_PROTO_HDR_AH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_AH; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_AH; + goto out; + case VIRTCHNL_PROTO_HDR_PFCP: + if (is_ipv4) + ptype = ICE_MAC_IPV4_PFCP_SESSION; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_PFCP_SESSION; + goto out; + default: + break; + } + i++; + } + +out: + return ice_hw_ptype_ena(&vf->pf->hw, ptype); +} + +/** * ice_vc_parse_rss_cfg - parses hash fields and headers from * a specific virtchnl RSS cfg * @hw: pointer to the hardware @@ -2572,18 +2554,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - hf_list = ice_vc_hash_field_list_comms; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms); - hdr_list = ice_vc_hdr_list_comms; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms); - } else { - hf_list = ice_vc_hash_field_list_os; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os); - hdr_list = ice_vc_hdr_list_os; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os); - } + hf_list = ice_vc_hash_field_list; + hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); + hdr_list = ice_vc_hdr_list; + hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr = @@ -2685,10 +2659,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { struct ice_vsi_ctx *ctx; - enum ice_status status; u8 lut_type, hash_type; + int status; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : @@ -2717,9 +2696,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) status = ice_update_vsi(hw, vsi->idx, ctx, NULL); if (status) { - dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { vsi->info.q_opt_rss = ctx->info.q_opt_rss; @@ -2744,19 +2722,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) vsi->vsi_num, v_ret); } } else { - enum ice_status status; + int status; status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, addl_hdrs); - /* We just ignore ICE_ERR_DOES_NOT_EXIST, because - * if two configurations share the same profile remove - * one of them actually removes both, since the - * profile is deleted. + /* We just ignore -ENOENT, because if two configurations + * share the same profile remove one of them actually + * removes both, since the profile is deleted. */ - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + if (status && status != -ENOENT) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n", - vf->vf_id, ice_stat_str(status)); + dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", + vf->vf_id, status); } } } @@ -2914,7 +2891,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) struct ice_pf *pf = np->vsi->back; struct ice_vsi_ctx *ctx; struct ice_vsi *vf_vsi; - enum ice_status status; struct device *dev; struct ice_vf *vf; int ret; @@ -2964,12 +2940,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)); } - status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); - if (status) { - dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n", - ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, - ice_stat_str(status)); - ret = -EIO; + ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); + if (ret) { + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n", + ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret); goto out; } @@ -3015,10 +2989,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf) static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - enum ice_status mcast_status = 0, ucast_status = 0; bool rm_promisc, alluni = false, allmulti = false; struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; + int mcast_err = 0, ucast_err = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; struct device *dev; @@ -3100,24 +3074,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) ucast_m = ICE_UCAST_PROMISC_BITS; } - ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m, - !alluni); - if (ucast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - alluni ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(ucast_status); - } + if (alluni) + ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); + else + ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); - mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m, - !allmulti); - if (mcast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - allmulti ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(mcast_status); - } + if (allmulti) + mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); + else + mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); + + if (ucast_err || mcast_err) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - if (!mcast_status) { + if (!mcast_err) { if (allmulti && !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", @@ -3127,7 +3098,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) vf->vf_id); } - if (!ucast_status) { + if (!ucast_err) { if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id); @@ -3807,8 +3778,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; - int ret = 0; + int ret; /* device MAC already added */ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) @@ -3819,18 +3789,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return -EPERM; } - status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); + if (ret == -EEXIST) { dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, vf->vf_id); /* don't return since we might need to update * the primary MAC in ice_vfhw_mac_add() below */ - ret = -EEXIST; - } else if (status) { - dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); - return -EIO; + } else if (ret) { + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, ret); + return ret; } else { vf->num_mac++; } @@ -3907,20 +3876,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; + int status; if (!ice_can_vf_change_mac(vf) && ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) return 0; status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_DOES_NOT_EXIST) { + if (status == -ENOENT) { dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, vf->vf_id); return -ENOENT; } else if (status) { - dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_id, status); return -EIO; } @@ -5283,9 +5252,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, s16 vf_id = le16_to_cpu(event->desc.retval); struct device *dev = ice_pf_to_dev(pf); struct ice_mbx_data mbxdata; - enum ice_status status; bool malvf = false; struct ice_vf *vf; + int status; if (ice_validate_vf_id(pf, vf_id)) return false; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 7e28ecbbe7af..752487a1bdd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -203,6 +203,8 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index ff55cb415b11..bb9a80847298 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) while (i--) { dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.status_error0 = 0; rx_desc++; xdp++; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index fb1029352c3e..51a2dcaf553d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -864,7 +864,9 @@ static void igb_get_drvinfo(struct net_device *netdev, } static void igb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev, } static int igb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 18a019a47182..219aac3e62f5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, struct vf_mac_filter *entry = NULL; int ret = 0; + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + switch (info) { case E1000_VF_MAC_FILTER_CLR: /* remove all unicast MAC filters related to the current VF */ @@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, } break; case E1000_VF_MAC_FILTER_ADD: - if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && - !vf_data->trusted) { - dev_warn(&pdev->dev, - "VF %d requested MAC filter but is administratively denied\n", - vf); - return -EINVAL; - } - if (!is_valid_ether_addr(addr)) { - dev_warn(&pdev->dev, - "VF %d attempted to set invalid MAC filter\n", - vf); - return -EINVAL; - } - /* try to find empty slot in the list */ list_for_each(pos, &adapter->vf_macs.l) { entry = list_entry(pos, struct vf_mac_filter, l); @@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector); - return min(work_done, budget - 1); + return work_done; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0011b15e678c..0ac4cc5eaa2d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1015,10 +1015,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 06e5bd646a0e..9d4322b74163 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -175,7 +175,9 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 74ccd622251a..4d988da68394 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_hw_init: + netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index e0a76ac1bbbc..8cc077b712ad 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -567,8 +567,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev, return ret_val; } -static void igc_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -578,8 +581,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev, ring->tx_pending = adapter->tx_ring_count; } -static int igc_ethtool_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index b2ef9fde97b3..b6807e16eea9 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) ltrv = rd32(IGC_LTRMAXV); if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | - (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); wr32(IGC_LTRMAXV, ltrv); } } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8e448288ee26..142c57b7a451 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring, static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - union igc_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(va - IGC_SKB_PAD, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IGC_SKB_PAD); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); igc_rx_buffer_flip(rx_buffer, truesize); return skb; @@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct xdp_buff *xdp, ktime_t timestamp) { + unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); void *va = xdp->data; @@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); if (unlikely(!skb)) return NULL; @@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } /* update all of the pointers */ size -= headlen; @@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) if (!skb) { xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), - igc_rx_offset(rx_ring) + pkt_offset, size, false); + igc_rx_offset(rx_ring) + pkt_offset, + size, true); skb = igc_xdp_run_prog(adapter, &xdp); } @@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } else if (skb) igc_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + skb = igc_build_skb(rx_ring, rx_buffer, &xdp); else skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, timestamp); @@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); - if (metasize) + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } return skb; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 30568e3544cd..71813fa8f928 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct hwtstamp_config *config) { - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: igc_ptp_disable_tx_timestamp(adapter); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index 582099a5ad41..46efcfab7234 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -464,7 +464,9 @@ ixgb_get_drvinfo(struct net_device *netdev, static void ixgb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_desc_ring *txdr = &adapter->tx_ring; @@ -478,7 +480,9 @@ ixgb_get_ringparam(struct net_device *netdev, static int ixgb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_desc_ring *txdr = &adapter->tx_ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 8362822316a9..f70967c32116 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1118,7 +1118,9 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, } static void ixgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; @@ -1131,7 +1133,9 @@ static void ixgbe_get_ringparam(struct net_device *netdev, } static int ixgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0f9f022260d7..45e2ec4d264d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + /* remove NBASE-T speeds from default autonegotiation + * to accommodate broken network switches in the field + * which cannot cope with advertised NBASE-T speeds + */ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | IXGBE_LINK_SPEED_2_5GB_FULL); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 23ddfd79fc8b..336426a67ac1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9724ffb16518..e4b50c7781ff 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); + /* set MDIO speed before talking to the PHY in case it's the 1st time */ + ixgbe_set_mdio_speed(hw); + /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 8380f905e708..3b41f83c8dff 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -225,7 +225,9 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, } static void ixgbevf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -236,7 +238,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev, } static int ixgbevf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; |