summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c663
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c1697
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c840
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c919
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c605
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c733
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c415
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c1221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h13
46 files changed, 8532 insertions, 1544 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 29c6c6743450..980bbcc64b4b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,10 +17,14 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
+ ice_fltr.o \
+ ice_fdir.o \
+ ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5c11448bfbb3..5792ee616b5c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,9 +34,14 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
+#include <linux/cpu_rmap.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
+#include <net/geneve.h>
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
+#include <net/vxlan.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -46,7 +51,9 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_fdir.h"
#include "ice_xsk.h"
+#include "ice_arfs.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -62,6 +69,7 @@ extern const char ice_drv_ver[];
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MIN_MSIX 2
+#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -90,6 +98,7 @@ extern const char ice_drv_ver[];
#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
+#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
@@ -210,6 +219,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_FD_FLUSH_REQ,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
@@ -244,8 +254,8 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
- int num_q_vectors;
- int base_vector; /* IRQ base for OS reserved vectors */
+ u16 num_q_vectors;
+ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -253,6 +263,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
u16 ethtype; /* Ethernet protocol for pause frame */
+ u16 num_gfltr;
+ u16 num_bfltr;
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
@@ -261,6 +273,14 @@ struct ice_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
+ /* aRFS members only allocated for the PF VSI */
+#define ICE_MAX_ARFS_LIST 1024
+#define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
+ struct hlist_head *arfs_fltr_list;
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
+ atomic_t *arfs_last_fltr_id;
+
u16 max_frame;
u16 rx_buf_len;
@@ -335,12 +355,14 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
+ ICE_FLAG_FD_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -362,11 +384,13 @@ struct ice_pf {
*/
u16 sriov_base_vector;
+ u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
- int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
@@ -385,11 +409,11 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause MSIX vector index */
- u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 oicr_idx; /* Other interrupt cause MSIX vector index */
+ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u32 num_lan_msix; /* Total MSIX vectors for base driver */
+ u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -500,8 +524,27 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
return NULL;
}
+/**
+ * ice_get_ctrl_vsi - Get the control VSI
+ * @pf: PF instance
+ */
+static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
+{
+ /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
+ if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return pf->vsi[pf->ctrl_vsi_idx];
+}
+
+#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
+#define ICE_FD_STAT_PF_IDX(base_idx) \
+ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
+#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_open_ctrl(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
@@ -523,7 +566,24 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+const char *ice_stat_str(enum ice_status stat_err);
+const char *ice_aq_str(enum ice_aq_err aq_err);
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun);
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+void ice_fdir_release_flows(struct ice_hw *hw);
+void ice_fdir_replay_flows(struct ice_hw *hw);
+void ice_fdir_replay_fltrs(struct ice_pf *pf);
+int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
+void ice_service_task_schedule(struct ice_pf *pf);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 2381b4014ed6..92f82f2a8af4 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -107,6 +107,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_RXQS 0x0041
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
u8 major_ver;
@@ -155,13 +156,11 @@ struct ice_aqc_manage_mac_write {
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
#define ICE_AQC_MAN_MAC_WR_S 6
-#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_WR_M ICE_M(3, ICE_AQC_MAN_MAC_WR_S)
#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
-#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S)
- /* High 16 bits of MAC address in big endian order */
- __be16 sah;
- /* Low 32 bits of MAC address in big endian order */
- __be32 sal;
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
+ /* byte stream in network order */
+ u8 mac_addr[ETH_ALEN];
__le32 addr_high;
__le32 addr_low;
};
@@ -232,6 +231,11 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
@@ -240,6 +244,9 @@ struct ice_aqc_get_sw_cfg_resp {
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
+#define ICE_AQC_RES_TYPE_S 0
+#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S)
+
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
*/
@@ -541,7 +548,7 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
- (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
/* Bit 17:18 - Defines other actions */
/* Other action = 0 - Mirror VSI */
@@ -967,7 +974,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1059,6 +1066,25 @@ struct ice_aqc_set_phy_cfg_data {
u8 rsvd1;
};
+/* Set MAC Config command data structure (direct 0x0603) */
+struct ice_aqc_set_mac_cfg {
+ __le16 max_frame_size;
+ u8 params;
+#define ICE_AQ_SET_MAC_PACE_S 3
+#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S)
+#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7)
+#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0
+#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M
+ u8 tx_tmr_priority;
+ __le16 tx_tmr_value;
+ __le16 fc_refresh_threshold;
+ u8 drop_opts;
+#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0)
+#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0
+#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0)
+ u8 reserved[7];
+};
+
/* Restart AN command data structure (direct 0x0605)
* Also used for response, with only the lport_num field present.
*/
@@ -1264,6 +1290,33 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
+/* The result of netlist NVM read comes in a TLV format. The actual data
+ * (netlist header) starts from word offset 1 (byte 2). The FW strips
+ * out the type field from the TLV header so all the netlist fields
+ * should adjust their offset value by 1 word (2 bytes) in order to map
+ * their correct location.
+ */
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
+#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
+#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
+#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1648,10 +1701,12 @@ struct ice_pkg_ver {
};
#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_NAME_SIZE 28
struct ice_aqc_get_pkg_info {
struct ice_pkg_ver ver;
- char name[ICE_PKG_NAME_SIZE];
+ char name[ICE_SEG_NAME_SIZE];
+ __le32 track_id;
u8 is_in_nvm;
u8 is_active;
u8 is_active_at_boot;
@@ -1738,6 +1793,7 @@ struct ice_aq_desc {
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_set_mac_cfg set_mac_cfg;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
@@ -1770,6 +1826,7 @@ enum ice_aq_err {
ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
@@ -1834,6 +1891,7 @@ enum ice_adminq_opc {
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_set_mac_cfg = 0x0603,
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
new file mode 100644
index 000000000000..6560acd76c94
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+
+/**
+ * ice_is_arfs_active - helper to check is aRFS is active
+ * @vsi: VSI to check
+ */
+static bool ice_is_arfs_active(struct ice_vsi *vsi)
+{
+ return !!vsi->arfs_fltr_list;
+}
+
+/**
+ * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
+ * @hw: pointer to the HW structure
+ * @flow_type: flow type as Flow Director understands it
+ *
+ * Flow Director will query this function to see if aRFS is currently using
+ * the specified flow_type for perfect (4-tuple) filters.
+ */
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ struct ice_pf *pf = hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ /* active counters can be updated by multiple CPUs */
+ smp_mb__before_atomic();
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
+ * @vsi: VSI that aRFS is active on
+ * @entry: aRFS entry used to change counters
+ * @add: true to increment counter, false to decrement
+ */
+static void
+ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
+ struct ice_arfs_entry *entry, bool add)
+{
+ struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ switch (entry->fltr_info.flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv6_cnt);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
+ entry->fltr_info.flow_type);
+ }
+}
+
+/**
+ * ice_arfs_del_flow_rules - delete the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be deleted
+ * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
+ *
+ * Loop through the delete list passed in and remove the rules from HW. After
+ * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
+ * longer being referenced by the aRFS hash table.
+ */
+static void
+ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, e, false);
+ else
+ dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, e->fltr_state, e->fltr_info.fltr_id,
+ e->flow_id, e->fltr_info.q_index);
+
+ /* The aRFS hash table is no longer referencing this entry */
+ hlist_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_arfs_add_flow_rules - add the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be added
+ * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
+ *
+ * Loop through the add list passed in and remove the rules from HW. After each
+ * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
+ * the ice_arfs_entry(s) because they are still being referenced in the aRFS
+ * hash table.
+ */
+static void
+ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
+{
+ struct ice_arfs_entry_ptr *ep;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back,
+ &ep->arfs_entry->fltr_info, true,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
+ true);
+ else
+ dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, ep->arfs_entry->fltr_state,
+ ep->arfs_entry->fltr_info.fltr_id,
+ ep->arfs_entry->flow_id,
+ ep->arfs_entry->fltr_info.q_index);
+
+ hlist_del(&ep->list_entry);
+ devm_kfree(dev, ep);
+ }
+}
+
+/**
+ * ice_arfs_is_flow_expired - check if the aRFS entry has expired
+ * @vsi: VSI containing the aRFS entry
+ * @arfs_entry: aRFS entry that's being checked for expiration
+ *
+ * Return true if the flow has expired, else false. This function should be used
+ * to determine whether or not an aRFS entry should be removed from the hardware
+ * and software structures.
+ */
+static bool
+ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
+{
+#define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
+ if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
+ arfs_entry->flow_id,
+ arfs_entry->fltr_info.fltr_id))
+ return true;
+
+ /* expiration timer only used for UDP filters */
+ if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
+ arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ return false;
+
+ return time_in_range64(arfs_entry->time_activated +
+ ICE_ARFS_TIME_DELTA_EXPIRATION,
+ arfs_entry->time_activated, get_jiffies_64());
+}
+
+/**
+ * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
+ * @vsi: the VSI to be forwarded to
+ * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
+ * @add_list: list to populate with filters to be added to Flow Director
+ * @del_list: list to populate with filters to be deleted from Flow Director
+ *
+ * Iterate over the hlist at the index given in the aRFS hash table and
+ * determine if there are any aRFS entries that need to be either added or
+ * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
+ * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
+ * the flow has expired delete the filter from HW. The caller of this function
+ * is expected to add/delete rules on the add_list/del_list respectively.
+ */
+static void
+ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
+ struct hlist_head *add_list,
+ struct hlist_head *del_list)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* go through the aRFS hlist at this idx and check for needed updates */
+ hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
+ /* check if filter needs to be added to HW */
+ if (e->fltr_state == ICE_ARFS_INACTIVE) {
+ enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
+ struct ice_arfs_entry_ptr *ep =
+ devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
+
+ if (!ep)
+ continue;
+ INIT_HLIST_NODE(&ep->list_entry);
+ /* reference aRFS entry to add HW filter */
+ ep->arfs_entry = e;
+ hlist_add_head(&ep->list_entry, add_list);
+ e->fltr_state = ICE_ARFS_ACTIVE;
+ /* expiration timer only used for UDP flows */
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ e->time_activated = get_jiffies_64();
+ } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
+ /* check if filter needs to be removed from HW */
+ if (ice_arfs_is_flow_expired(vsi, e)) {
+ /* remove aRFS entry from hash table for delete
+ * and to prevent referencing it the next time
+ * through this hlist index
+ */
+ hlist_del(&e->list_entry);
+ e->fltr_state = ICE_ARFS_TODEL;
+ /* save reference to aRFS entry for delete */
+ hlist_add_head(&e->list_entry, del_list);
+ }
+ }
+}
+
+/**
+ * ice_sync_arfs_fltrs - update all aRFS filters
+ * @pf: board private structure
+ */
+void ice_sync_arfs_fltrs(struct ice_pf *pf)
+{
+ HLIST_HEAD(tmp_del_list);
+ HLIST_HEAD(tmp_add_list);
+ struct ice_vsi *pf_vsi;
+ unsigned int i;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ if (!ice_is_arfs_active(pf_vsi))
+ return;
+
+ spin_lock_bh(&pf_vsi->arfs_lock);
+ /* Once we process aRFS for the PF VSI get out */
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
+ &tmp_del_list);
+ spin_unlock_bh(&pf_vsi->arfs_lock);
+
+ /* use list of ice_arfs_entry(s) for delete */
+ ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
+
+ /* use list of ice_arfs_entry_ptr(s) for add */
+ ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
+}
+
+/**
+ * ice_arfs_build_entry - builds an aRFS entry based on input
+ * @vsi: destination VSI for this flow
+ * @fk: flow dissector keys for creating the tuple
+ * @rxq_idx: Rx queue to steer this flow to
+ * @flow_id: passed down from the stack and saved for flow expiration
+ *
+ * returns an aRFS entry on success and NULL on failure
+ */
+static struct ice_arfs_entry *
+ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_fdir_fltr *fltr_info;
+ u8 ip_proto;
+
+ arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
+ sizeof(*arfs_entry),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!arfs_entry)
+ return NULL;
+
+ fltr_info = &arfs_entry->fltr_info;
+ fltr_info->q_index = rxq_idx;
+ fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ fltr_info->dest_vsi = vsi->idx;
+ ip_proto = fk->basic.ip_proto;
+
+ if (fk->basic.n_proto == htons(ETH_P_IP)) {
+ fltr_info->ip.v4.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
+ fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
+ fltr_info->ip.v4.src_port = fk->ports.src;
+ fltr_info->ip.v4.dst_port = fk->ports.dst;
+ } else { /* ETH_P_IPV6 */
+ fltr_info->ip.v6.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+ fltr_info->ip.v6.src_port = fk->ports.src;
+ fltr_info->ip.v6.dst_port = fk->ports.dst;
+ }
+
+ arfs_entry->flow_id = flow_id;
+ fltr_info->fltr_id =
+ atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
+
+ return arfs_entry;
+}
+
+/**
+ * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
+ * @hw: pointer to HW structure
+ * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
+ * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
+ *
+ * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
+ * to check if perfect (4-tuple) flow rules are currently in place by Flow
+ * Director.
+ */
+static bool
+ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
+{
+ unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
+
+ /* advanced Flow Director disabled, perfect filters always supported */
+ if (!perfect_fltr)
+ return true;
+
+ if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
+
+ return false;
+}
+
+/**
+ * ice_rx_flow_steer - steer the Rx flow to where application is being run
+ * @netdev: ptr to the netdev being adjusted
+ * @skb: buffer with required header information
+ * @rxq_idx: queue to which the flow needs to move
+ * @flow_id: flow identifier provided by the netdev
+ *
+ * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
+ * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
+ * if the flow_id already exists in the hash table but the rxq_idx has changed
+ * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
+ * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
+ * If neither of the previous conditions are true then add a new entry in the
+ * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
+ * added to HW.
+ */
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_vsi *vsi = np->vsi;
+ struct flow_keys fk;
+ struct ice_pf *pf;
+ __be16 n_proto;
+ u8 ip_proto;
+ u16 idx;
+ int ret;
+
+ /* failed to allocate memory for aRFS so don't crash */
+ if (unlikely(!vsi->arfs_fltr_list))
+ return -ENODEV;
+
+ pf = vsi->back;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
+
+ n_proto = fk.basic.n_proto;
+ /* Support only IPV4 and IPV6 */
+ if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
+ n_proto == htons(ETH_P_IPV6))
+ ip_proto = fk.basic.ip_proto;
+ else
+ return -EPROTONOSUPPORT;
+
+ /* Support only TCP and UDP */
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ /* only support 4-tuple filters for aRFS */
+ if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
+ return -EOPNOTSUPP;
+
+ /* choose the aRFS list bucket based on skb hash */
+ idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
+ /* search for entry in the bucket */
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
+ list_entry) {
+ struct ice_fdir_fltr *fltr_info;
+
+ /* keep searching for the already existing arfs_entry flow */
+ if (arfs_entry->flow_id != flow_id)
+ continue;
+
+ fltr_info = &arfs_entry->fltr_info;
+ ret = fltr_info->fltr_id;
+
+ if (fltr_info->q_index == rxq_idx ||
+ arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
+ goto out;
+
+ /* update the queue to forward to on an already existing flow */
+ fltr_info->q_index = rxq_idx;
+ arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
+ ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
+ goto out_schedule_service_task;
+ }
+
+ arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
+ if (!arfs_entry) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = arfs_entry->fltr_info.fltr_id;
+ INIT_HLIST_NODE(&arfs_entry->list_entry);
+ hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
+out_schedule_service_task:
+ ice_service_task_schedule(pf);
+out:
+ spin_unlock_bh(&vsi->arfs_lock);
+ return ret;
+}
+
+/**
+ * ice_init_arfs_cntrs - initialize aRFS counter values
+ * @vsi: VSI that aRFS counters need to be initialized on
+ */
+static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
+{
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
+ GFP_KERNEL);
+ if (!vsi->arfs_fltr_cntrs)
+ return -ENOMEM;
+
+ vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
+ GFP_KERNEL);
+ if (!vsi->arfs_last_fltr_id) {
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_init_arfs - initialize aRFS resources
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_init_arfs(struct ice_vsi *vsi)
+{
+ struct hlist_head *arfs_fltr_list;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return;
+
+ arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ GFP_KERNEL);
+ if (!arfs_fltr_list)
+ return;
+
+ if (ice_init_arfs_cntrs(vsi))
+ goto free_arfs_fltr_list;
+
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ INIT_HLIST_HEAD(&arfs_fltr_list[i]);
+
+ spin_lock_init(&vsi->arfs_lock);
+
+ vsi->arfs_fltr_list = arfs_fltr_list;
+
+ return;
+
+free_arfs_fltr_list:
+ kfree(arfs_fltr_list);
+}
+
+/**
+ * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_clear_arfs(struct ice_vsi *vsi)
+{
+ struct device *dev;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
+ !vsi->arfs_fltr_list)
+ return;
+
+ dev = ice_pf_to_dev(vsi->back);
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
+ struct ice_arfs_entry *r;
+ struct hlist_node *n;
+
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
+ list_entry) {
+ hlist_del(&r->list_entry);
+ devm_kfree(dev, r);
+ }
+ spin_unlock_bh(&vsi->arfs_lock);
+ }
+
+ kfree(vsi->arfs_fltr_list);
+ vsi->arfs_fltr_list = NULL;
+ kfree(vsi->arfs_last_fltr_id);
+ vsi->arfs_last_fltr_id = NULL;
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+}
+
+/**
+ * ice_free_cpu_rx_rmap - free setup CPU reverse map
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev || !netdev->rx_cpu_rmap ||
+ netdev->reg_state != NETREG_REGISTERED)
+ return;
+
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+}
+
+/**
+ * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
+ * @vsi: the VSI to be forwarded to
+ */
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+ struct ice_pf *pf;
+ int base_idx, i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ netdev = vsi->netdev;
+ if (!pf || !netdev || !vsi->num_q_vectors ||
+ vsi->netdev->reg_state != NETREG_REGISTERED)
+ return -EINVAL;
+
+ netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
+ vsi->type, netdev->name, vsi->num_q_vectors);
+
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
+ if (unlikely(!netdev->rx_cpu_rmap))
+ return -EINVAL;
+
+ base_idx = vsi->base_vector;
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ pf->msix_entries[base_idx + i].vector)) {
+ ice_free_cpu_rx_rmap(vsi);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_remove_arfs - remove/clear all aRFS resources
+ * @pf: device private structure
+ */
+void ice_remove_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_free_cpu_rx_rmap(pf_vsi);
+ ice_clear_arfs(pf_vsi);
+}
+
+/**
+ * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
+ * @pf: device private structure
+ */
+void ice_rebuild_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_remove_arfs(pf);
+ if (ice_set_cpu_rx_rmap(pf_vsi)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
+ return;
+ }
+ ice_init_arfs(pf_vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
new file mode 100644
index 000000000000..f39cd16403ed
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_ARFS_H_
+#define _ICE_ARFS_H_
+enum ice_arfs_fltr_state {
+ ICE_ARFS_INACTIVE,
+ ICE_ARFS_ACTIVE,
+ ICE_ARFS_TODEL,
+};
+
+struct ice_arfs_entry {
+ struct ice_fdir_fltr fltr_info;
+ struct hlist_node list_entry;
+ u64 time_activated; /* only valid for UDP flows */
+ u32 flow_id;
+ /* fltr_state = 0 - ICE_ARFS_INACTIVE:
+ * filter needs to be updated or programmed in HW.
+ * fltr_state = 1 - ICE_ARFS_ACTIVE:
+ * filter is active and programmed in HW.
+ * fltr_state = 2 - ICE_ARFS_TODEL:
+ * filter has been deleted from HW and needs to be removed from
+ * the aRFS hash table.
+ */
+ u8 fltr_state;
+};
+
+struct ice_arfs_entry_ptr {
+ struct ice_arfs_entry *arfs_entry;
+ struct hlist_node list_entry;
+};
+
+struct ice_arfs_active_fltr_cntrs {
+ atomic_t active_tcpv4_cnt;
+ atomic_t active_tcpv6_cnt;
+ atomic_t active_udpv4_cnt;
+ atomic_t active_udpv6_cnt;
+};
+
+#ifdef CONFIG_RFS_ACCEL
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id);
+void ice_clear_arfs(struct ice_vsi *vsi);
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_init_arfs(struct ice_vsi *vsi);
+void ice_sync_arfs_fltrs(struct ice_pf *pf);
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_remove_arfs(struct ice_pf *pf);
+void ice_rebuild_arfs(struct ice_pf *pf);
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
+ enum ice_fltr_ptype flow_type);
+#else
+#define ice_sync_arfs_fltrs(pf) do {} while (0)
+#define ice_init_arfs(vsi) do {} while (0)
+#define ice_clear_arfs(vsi) do {} while (0)
+#define ice_remove_arfs(pf) do {} while (0)
+#define ice_free_cpu_rx_rmap(vsi) do {} while (0)
+#define ice_rebuild_arfs(pf) do {} while (0)
+
+static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+static inline int
+ice_rx_flow_steer(struct net_device __always_unused *netdev,
+ const struct sk_buff __always_unused *skb,
+ u16 __always_unused rxq_idx, u32 __always_unused flow_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool
+ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw,
+ enum ice_fltr_ptype __always_unused flow_type)
+{
+ return false;
+}
+#endif /* CONFIG_RFS_ACCEL */
+#endif /* _ICE_ARFS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a19cd6f5436b..d620d26d42ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <net/xdp_sock_drv.h>
#include "ice_base.h"
+#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
@@ -12,7 +14,7 @@
*/
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
- int offset, i;
+ unsigned int offset, i;
mutex_lock(qs_cfg->qs_mutex);
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
@@ -24,7 +26,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
mutex_unlock(qs_cfg->qs_mutex);
return 0;
@@ -38,7 +40,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
*/
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
- int i, index = 0;
+ unsigned int i, index = 0;
mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) {
@@ -47,7 +49,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
if (index >= qs_cfg->pf_map_size)
goto err_scatter;
set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
}
mutex_unlock(qs_cfg->qs_mutex);
@@ -96,7 +98,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM.
*/
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
@@ -246,6 +248,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -279,12 +282,13 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
int ice_setup_rx_ctx(struct ice_ring *ring)
{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
struct ice_hw *hw;
- u32 regval;
u16 pf_q;
int err;
@@ -308,24 +312,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->xsk_umem) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ ring->rx_buf_len =
+ xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
- ring->zca.free = ice_zca_free;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca);
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
if (err)
return err;
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
- dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
@@ -376,38 +379,27 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
chain_len * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- if (vsi->type != ICE_VSI_VF) {
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- } else {
- regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
- QRXFLXP_CNTXT_RXDID_PRIO_M |
- QRXFLXP_CNTXT_TS_M);
- }
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ * increasing context priority to pick up profile ID; default is 0x01;
+ * setting to 0x03 to ensure profile is programming if prev context is
+ * of same priority
+ */
+ if (vsi->type != ICE_VSI_VF)
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
+ else
+ ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -425,13 +417,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- err = ring->xsk_umem ?
- ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
- if (err)
- dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
- ring->q_index, pf_q);
+ if (ring->xsk_umem) {
+ if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
+ dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ num_bufs, ring->q_index);
+ dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
+
+ return 0;
+ }
+
+ err = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ if (err)
+ dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ ring->q_index, pf_q);
+ return 0;
+ }
+
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}
@@ -453,7 +455,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
@@ -526,7 +528,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int v_idx, err;
+ u16 v_idx;
+ int err;
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
@@ -562,7 +565,7 @@ err_out:
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{
int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
+ u16 tx_rings_rem, rx_rings_rem;
int v_id;
/* initially assigning remaining rings count to VSIs num queue value */
@@ -571,10 +574,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+ u8 tx_rings_per_v, rx_rings_per_v;
+ u16 q_id, q_base;
/* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
@@ -590,7 +595,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
@@ -633,6 +639,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 buf_len = sizeof(*qg_buf);
+ struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 pf_q;
u8 tc;
@@ -641,13 +648,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc;
@@ -662,8 +669,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
@@ -832,8 +839,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2c0d8fd3d5cd..bce0e1281168 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -316,12 +316,78 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
+ * ice_fill_tx_timer_and_fc_thresh
+ * @hw: pointer to the HW struct
+ * @cmd: pointer to MAC cfg structure
+ *
+ * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
+ * descriptor
+ */
+static void
+ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
+ struct ice_aqc_set_mac_cfg *cmd)
+{
+ u16 fc_thres_val, tx_timer_val;
+ u32 val;
+
+ /* We read back the transmit timer and FC threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are operating on transmit timer and FC
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
+
+ /* Retrieve the FC threshold */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+
+ cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
+}
+
+/**
+ * ice_aq_set_mac_cfg
+ * @hw: pointer to the HW struct
+ * @max_frame_size: Maximum Frame Size to be supported
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603)
+ */
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_cfg;
+
+ if (max_frame_size == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+
+ ice_fill_tx_timer_and_fc_thresh(hw, cmd);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
@@ -332,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- return ice_init_def_sw_recp(hw);
+ status = ice_init_def_sw_recp(hw);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
+ return status;
+ }
+ return 0;
}
/**
@@ -653,6 +724,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
+ /* Set bit to enable Flow Director filters */
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ INIT_LIST_HEAD(&hw->fdir_list_head);
+
ice_clear_pxe_mode(hw);
status = ice_init_nvm(hw);
@@ -743,9 +818,18 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
+ /* enable jumbo frame support at MAC level */
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ /* Obtain counter base index which would be used by flow director */
+ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+ mutex_init(&hw->tnl_lock);
return 0;
err_unroll_fltr_mgmt_struct:
@@ -769,12 +853,14 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
+ mutex_destroy(&hw->tnl_lock);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -878,7 +964,12 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
- for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ /* Wait for the PFR to complete. The wait time is the global config lock
+ * timeout plus the PFR timeout which will account for a possible reset
+ * that is occurring during a download package operation.
+ */
+ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@@ -1012,7 +1103,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
rlan_ctx->prefena = 1;
- ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1678,6 +1769,33 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
+ case ICE_AQC_CAPS_FD:
+ if (dev_p) {
+ dev_p->num_flow_director_fltr = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_flow_director_fltr = %d\n",
+ prefix,
+ dev_p->num_flow_director_fltr);
+ }
+ if (func_p) {
+ u32 reg_val, val;
+
+ reg_val = rd32(hw, GLQF_FD_SIZE);
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+ GLQF_FD_SIZE_FD_GSIZE_S;
+ func_p->fd_fltr_guar =
+ ice_get_num_per_func(hw, val);
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+ GLQF_FD_SIZE_FD_BSIZE_S;
+ func_p->fd_fltr_best_effort = val;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_guar = %d\n",
+ prefix, func_p->fd_fltr_guar);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_best_effort = %d\n",
+ prefix, func_p->fd_fltr_best_effort);
+ }
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1887,10 +2005,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
-
- /* Prep values for flags, sah, sal */
- cmd->sah = htons(*((const u16 *)mac_addr));
- cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
+ ether_addr_copy(cmd->mac_addr, mac_addr);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2117,6 +2232,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (!cfg)
return ICE_ERR_PARAM;
@@ -2145,7 +2261,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
- return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ status = 0;
+
+ return status;
}
/**
@@ -3089,12 +3209,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_set_ctx - set context bits in packed structure
+ * @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -3103,6 +3225,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* using the correct size so that we are correct regardless
* of the endianness of the machine.
*/
+ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
+ ice_debug(hw, ICE_DBG_QCTX,
+ "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+ f, ce_info[f].width, ce_info[f].size_of);
+ continue;
+ }
switch (ce_info[f].size_of) {
case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8104f3d64d96..9b9e50d2398b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -108,6 +109,8 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index dd946866d7b8..1e18021aa073 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -12,6 +12,7 @@ do { \
(qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \
@@ -20,6 +21,7 @@ do { \
(qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0)
@@ -199,7 +201,9 @@ unwind_alloc_rq_bufs:
cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0;
}
+ cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -245,7 +249,9 @@ unwind_alloc_sq_bufs:
cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0;
}
+ cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return 0;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size, \
+ (qi)->ring.r.ring##_bi[i].va, \
+ (qi)->ring.r.ring##_bi[i].pa); \
+ (qi)->ring.r.ring##_bi[i].va = NULL;\
+ (qi)->ring.r.ring##_bi[i].pa = 0;\
+ (qi)->ring.r.ring##_bi[i].size = 0;\
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) { \
- dmam_free_coherent(ice_hw_to_dev(hw), \
- (qi)->ring.r.ring##_bi[i].size,\
- (qi)->ring.r.ring##_bi[i].va,\
- (qi)->ring.r.ring##_bi[i].pa);\
- (qi)->ring.r.ring##_bi[i].va = NULL; \
- (qi)->ring.r.ring##_bi[i].pa = 0; \
- (qi)->ring.r.ring##_bi[i].size = 0; \
- } \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free DMA head */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -635,6 +644,50 @@ init_ctrlq_free_sq:
}
/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
@@ -649,17 +702,27 @@ init_ctrlq_free_sq:
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status ret_code;
+ enum ice_status status;
+ u32 retry = 0;
/* Init FW admin queue */
- ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
- if (ret_code)
- return ret_code;
+ do {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
- ret_code = ice_init_check_adminq(hw);
- if (ret_code)
- return ret_code;
+ status = ice_init_check_adminq(hw);
+ if (status != ICE_ERR_AQ_FW_CRITICAL)
+ break;
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Retry Admin Queue init due to FW critical error\n");
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
+ } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
+ if (status)
+ return status;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -701,57 +764,12 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_shutdown_ctrlq - shutdown routine for any control queue
- * @hw: pointer to the hardware structure
- * @q_type: specific Control queue type
- *
- * NOTE: this function does not destroy the control queue locks.
- */
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
-{
- struct ice_ctl_q_info *cq;
-
- switch (q_type) {
- case ICE_CTL_Q_ADMIN:
- cq = &hw->adminq;
- if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
- break;
- case ICE_CTL_Q_MAILBOX:
- cq = &hw->mailboxq;
- break;
- default:
- return;
- }
-
- ice_shutdown_sq(hw, cq);
- ice_shutdown_rq(hw, cq);
-}
-
-/**
- * ice_shutdown_all_ctrlq - shutdown routine for all control queues
- * @hw: pointer to the hardware structure
- *
- * NOTE: this function does not destroy the control queue locks. The driver
- * may call this at runtime to shutdown and later restart control queues, such
- * as in response to a reset event.
- */
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
-{
- /* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
- /* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
-}
-
-/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
-static void
-ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
@@ -1042,9 +1060,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* update the error if time out occurred */
if (!cmd_completed) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
+ rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
+ status = ICE_ERR_AQ_FW_CRITICAL;
+ } else {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
}
sq_send_command_error:
@@ -1128,7 +1152,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen);
- e->msg_len = min(datalen, e->buf_len);
+ e->msg_len = min_t(u16, datalen, e->buf_len);
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index bf0ebe6149e8..faaa08e8171b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -34,6 +34,8 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
+#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
@@ -59,6 +61,7 @@ struct ice_ctl_q_ring {
u32 bal;
u32 len_mask;
u32 len_ena_mask;
+ u32 len_crit_mask;
u32 head_mask;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7bea09363b42..979af197f8a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_is_pfc_causing_hung_q
+ * @pf: pointer to PF structure
+ * @txqueue: Tx queue which is supposedly hung queue
+ *
+ * find if PFC is causing the hung queue, if yes return true else false
+ */
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
+{
+ u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
+ u64 ref_prio_xoff[ICE_MAX_UP];
+ struct ice_vsi *vsi;
+ u32 up2tc;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ num_tcs++;
+
+ /* first find out the TC to which the hung queue belongs to */
+ for (tc = 0; tc < num_tcs - 1; tc++)
+ if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
+ vsi->tc_cfg.tc_info[tc + 1].qoffset,
+ txqueue))
+ break;
+
+ /* Build a bit map of all UPs associated to the suspect hung queue TC,
+ * so that we check for its counter increment.
+ */
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+ for (i = 0; i < ICE_MAX_UP; i++) {
+ up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
+ if (up_mapped_tc == tc)
+ up_in_tc |= BIT(i);
+ }
+
+ /* Now that we figured out that hung queue is PFC enabled, still the
+ * Tx timeout can be legitimate. So to make sure Tx timeout is
+ * absolutely caused by PFC storm, check if the counters are
+ * incrementing.
+ */
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
+
+ ice_update_dcb_stats(pf);
+
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
+ return true;
+
+ return false;
+}
+
+/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
* @host: if set it's HOST if not it's MANAGED
@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
*/
static bool ice_dcb_tc_contig(u8 *prio_table)
{
- u8 max_tc = 0;
+ bool found_empty = false;
+ u8 used_tc = 0;
int i;
- for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
- u8 cur_tc = prio_table[i];
+ /* Create a bitmap of used TCs */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ used_tc |= BIT(prio_table[i]);
- if (cur_tc > max_tc)
- return false;
- else if (cur_tc == max_tc)
- max_tc++;
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ if (used_tc & BIT(i)) {
+ if (found_empty)
+ return false;
+ } else {
+ found_empty = true;
+ }
}
return true;
@@ -728,39 +791,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
+ *
+ * This should not be called if the outer VLAN is software offloaded as the VLAN
+ * tag will already be configured with the correct ID and priority bits
*/
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
- return 0;
+ return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S;
- if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
- struct vlan_ethhdr *vhdr;
- int rc;
-
- rc = skb_cow_head(skb, 0);
- if (rc < 0)
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI = htons(first->tx_flags >>
- ICE_TX_FLAGS_VLAN_S);
- } else {
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- }
+ /* if this is not already set it means a VLAN 0 + priority needs
+ * to be offloaded
+ */
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 37680e815b02..323238669572 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -17,6 +17,8 @@
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
@@ -25,13 +27,27 @@ void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+/**
+ * ice_find_q_in_range
+ * @low: start of queue range for a TC i.e. offset of TC
+ * @high: start of queue for next TC
+ * @tx_q: hung_queue/tx_queue
+ *
+ * finds if queue 'tx_q' falls between the two offsets of any given TC
+ */
+static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
+{
+ return (tx_q >= low) && (tx_q < high);
+}
+
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool
+ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
+ unsigned int __always_unused txqueue)
+{
+ return false;
+}
+
#define ice_update_dcb_stats(pf) do {} while (0)
#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index c4c12414083a..87f91b750d59 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -7,8 +7,6 @@
#include "ice_dcb_nl.h"
#include <net/dcbnl.h>
-#define ICE_APP_PROT_ID_ROCE 0x8915
-
/**
* ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
* @netdev: device associated with interface that needs reset
@@ -671,7 +669,7 @@ static bool
ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
struct ice_dcb_app_priority_table *app)
{
- int i;
+ unsigned int i;
for (i = 0; i < cfg->numapps; i++) {
if (app->selector == cfg->app[i].selector &&
@@ -746,7 +744,8 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *old_cfg, *new_cfg;
- int i, j, ret = 0;
+ unsigned int i, j;
+ int ret = 0;
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return -EINVAL;
@@ -869,7 +868,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
struct ice_port_info *pi;
struct dcb_app sapp;
struct ice_pf *pf;
- int i;
+ unsigned int i;
if (!netdev)
return;
@@ -941,7 +940,7 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
- int i;
+ unsigned int i;
if (!main_vsi)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index c6833944b90a..a73d06e06b5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
+ netlist->cust_ver);
+
+ return 0;
+}
+
+static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ snprintf(buf, len, "0x%08x", netlist->hash);
+
+ return 0;
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
@@ -128,6 +149,8 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.netlist", ice_info_netlist_ver),
+ running("fw.netlist.build", ice_info_netlist_build),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 593fb37bd59e..68c38004a088 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_flow.h"
+#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -129,6 +130,8 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
+ ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
+ ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
};
static const u32 ice_regs_dump_list[] = {
@@ -139,9 +142,6 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
- PF0INT_ITR_0(0),
- PF0INT_ITR_1(0),
- PF0INT_ITR_2(0),
};
struct ice_priv_flag {
@@ -157,6 +157,8 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("vf-true-promisc-support",
+ ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -203,7 +205,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
struct ice_pf *pf = np->vsi->back;
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
- int i;
+ unsigned int i;
regs->version = 1;
@@ -273,8 +275,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -282,8 +285,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false);
if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto release;
}
@@ -304,7 +308,7 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -332,7 +336,8 @@ static u64 ice_link_test(struct net_device *netdev)
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %d\n", status);
+ netdev_err(netdev, "link query error, status = %s\n",
+ ice_stat_str(status));
return 1;
}
@@ -373,7 +378,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
0x00000000, 0xFFFFFFFF
};
u32 val, orig_val;
- int i;
+ unsigned int i;
orig_val = rd32(hw, reg);
for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
@@ -426,7 +431,7 @@ static u64 ice_reg_test(struct net_device *netdev)
GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
{GLINT_CTL, 0xffff0001, 1, 0}
};
- int i;
+ unsigned int i;
netdev_dbg(netdev, "Register test\n");
for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
@@ -671,7 +676,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
- LIST_HEAD(tmp_list);
struct device *dev;
u8 *tx_frame;
int i;
@@ -707,16 +711,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
/* Test VSI needs to receive broadcast packets */
eth_broadcast_addr(broadcast);
- if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
+ if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
ret = 5;
goto lbtest_mac_dis;
}
- if (ice_add_mac(&pf->hw, &tmp_list)) {
- ret = 6;
- goto free_mac_list;
- }
-
if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 7;
goto remove_mac_filters;
@@ -739,10 +738,8 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
- if (ice_remove_mac(&pf->hw, &tmp_list))
+ if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
-free_mac_list:
- ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
@@ -1158,8 +1155,9 @@ static int ice_nway_reset(struct net_device *netdev)
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(pi->hw->adminq.sq_last_status));
return -EIO;
}
@@ -1308,6 +1306,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_down(vsi);
ice_up(vsi);
}
+ /* don't allow modification of this flag when a single VF is in
+ * promiscuous mode because it's not supported
+ */
+ if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
+ ice_is_any_vf_in_promisc(pf)) {
+ dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
+ ret = -EAGAIN;
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2450,8 +2458,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EINVAL;
}
@@ -2526,6 +2534,10 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
struct ice_vsi *vsi = np->vsi;
switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return ice_add_fdir_ethtool(vsi, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return ice_del_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXFH:
return ice_set_rss_hash_opt(vsi, cmd);
default:
@@ -2549,12 +2561,27 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = -EOPNOTSUPP;
+ struct ice_hw *hw;
+
+ hw = &vsi->back->hw;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = hw->fdir_active_fltr;
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ice_get_ethtool_fdir_entry(hw, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
ice_get_rss_hash_opt(vsi, cmd);
ret = 0;
@@ -2593,7 +2620,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
- u32 new_rx_cnt, new_tx_cnt;
+ u16 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
@@ -2645,8 +2672,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt;
- vsi->num_tx_desc = new_tx_cnt;
- vsi->num_rx_desc = new_rx_cnt;
+ vsi->num_tx_desc = (u16)new_tx_cnt;
+ vsi->num_rx_desc = (u16)new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2952,31 +2979,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
- if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back. If still
- * down, restart autoneg link or reinitialize the interface.
- */
- msleep(75);
- if (!test_bit(__ICE_DOWN, pf->state))
- return ice_nway_reset(netdev);
-
- ice_down(vsi);
- ice_up(vsi);
- }
-
return err;
}
@@ -3171,10 +3189,6 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- /* check to see if VSI is active */
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
/* report maximum channels */
ch->max_rx = ice_get_max_rxq(pf);
ch->max_tx = ice_get_max_txq(pf);
@@ -3184,6 +3198,10 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
ch->combined_count = ice_get_combined_cnt(vsi);
ch->rx_count = vsi->num_rxq - ch->combined_count;
ch->tx_count = vsi->num_txq - ch->combined_count;
+
+ /* report other queues */
+ ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+ ch->max_other = ch->other_count;
}
/**
@@ -3227,8 +3245,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
vsi->rss_table_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EIO;
}
@@ -3255,9 +3274,14 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
/* do not support changing other_count */
- if (ch->other_count)
+ if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
+ netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
+ return -EOPNOTSUPP;
+ }
+
curr_combined = ice_get_combined_cnt(vsi);
/* these checks are for cases where user didn't specify a particular
@@ -3731,10 +3755,10 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_hw *hw = &pf->hw;
enum ice_status status;
bool is_sfp = false;
+ unsigned int i;
u16 offset = 0;
u8 value = 0;
u8 page = 0;
- int i;
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
&value, 1, 0, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
new file mode 100644
index 000000000000..d7430ce6af26
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+/* flow director ethtool support for ice */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_flow.h"
+
+static struct in6_addr full_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+static struct in6_addr zero_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ }
+};
+
+/* calls to ice_flow_add_prof require the number of segments in the array
+ * for segs_cnt. In this code that is one more than the index.
+ */
+#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
+
+/**
+ * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
+{
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ case IPV4_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ case IPV6_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ default:
+ return ICE_FLTR_PTYPE_NONF_NONE;
+ }
+}
+
+/**
+ * ice_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool ice_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
+ * @hw: hardware structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 on success and -EINVAL on failure
+ */
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *rule;
+ int ret = 0;
+ u16 idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
+
+ if (!rule || fsp->location != rule->fltr_id) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ break;
+ case IPV6_USER_FLOW:
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
+ rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
+ rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
+ break;
+ default:
+ break;
+ }
+
+ if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+ idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
+ if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
+ dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
+ rule->flow_type);
+ ret = -EINVAL;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @hw: hardware structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct ice_fdir_fltr *f_rule;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = f_rule->fltr_id;
+ cnt++;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+ return val;
+}
+
+/**
+ * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static struct ice_fd_hw_prof *
+ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ if (blk == ICE_BLK_FD && hw->fdir_prof)
+ return hw->fdir_prof[flow];
+
+ return NULL;
+}
+
+/**
+ * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static void
+ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ int tun;
+
+ if (!prof)
+ return;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+ int j;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ u16 vsi_num;
+
+ if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ ice_flow_rem_prof(hw, blk, prof_id);
+ }
+}
+
+/**
+ * ice_fdir_rem_flow - release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
+ enum ice_fltr_ptype flow_type)
+{
+ int flow = (int)flow_type & ~FLOW_EXT;
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ if (!prof)
+ return;
+
+ ice_fdir_erase_flow_from_hw(hw, blk, flow);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+/**
+ * ice_fdir_release_flows - release all flows in use for later replay
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_release_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ /* release Flow Director HW table entries */
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
+ ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
+}
+
+/**
+ * ice_fdir_replay_flows - replay HW Flow Director filter info
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_replay_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ int tun;
+
+ if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
+ continue;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ struct ice_flow_prof *hw_prof;
+ struct ice_fd_hw_prof *prof;
+ u64 prof_id;
+ int j;
+
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ prof->fdir_seg[tun], TNL_SEG_CNT(tun),
+ &hw_prof);
+ for (j = 0; j < prof->cnt; j++) {
+ enum ice_flow_priority prio;
+ u64 entry_h = 0;
+ int err;
+
+ prio = ICE_FLOW_PRIO_NORMAL;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof_id,
+ prof->vsi_h[0],
+ prof->vsi_h[j],
+ prio, prof->fdir_seg,
+ &entry_h);
+ if (err) {
+ dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
+ flow);
+ continue;
+ }
+ prof->entry_h[j][tun] = entry_h;
+ }
+ }
+ }
+}
+
+/**
+ * ice_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *data)
+{
+ u64 value, mask;
+
+ memset(data, 0, sizeof(*data));
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
+ if (!mask)
+ return 0;
+
+#define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0)
+#define ICE_USERDEF_FLEX_OFFS_S 16
+#define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
+#define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0)
+
+ /* 0x1fe is the maximum value for offsets stored in the internal
+ * filtering tables.
+ */
+#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
+
+ if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
+ value > ICE_USERDEF_FLEX_FLTR_M)
+ return -EINVAL;
+
+ data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
+ data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
+ ICE_USERDEF_FLEX_OFFS_S;
+ if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+
+ data->flex_fltr = true;
+
+ return 0;
+}
+
+/**
+ * ice_fdir_num_avail_fltr - return the number of unused flow director filters
+ * @hw: pointer to hardware structure
+ * @vsi: software VSI structure
+ *
+ * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
+ * use filters from either pool. The guaranteed pool is divided between VSIs.
+ * The best effort filter pool is common to all VSIs and is a device shared
+ * resource pool. The number of filters available to this VSI is the sum of
+ * the VSIs guaranteed filter pool and the global available best effort
+ * filter pool.
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+{
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+ u16 num_be;
+
+ /* total guaranteed filters assigned to this VSI */
+ num_guar = vsi->num_gfltr;
+
+ /* minus the guaranteed filters programed by this VSI */
+ num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
+ VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
+
+ /* total global best effort filters */
+ num_be = hw->func_caps.fd_fltr_best_effort;
+
+ /* minus the global best effort filters programmed */
+ num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
+ GLQF_FD_CNT_FD_BCNT_S;
+
+ return num_guar + num_be;
+}
+
+/**
+ * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
+ * @hw: HW structure containing the FDir flow profile structure(s)
+ * @flow: flow type to allocate the flow profile for
+ *
+ * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
+ * on success and negative on error.
+ */
+static int
+ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
+{
+ if (!hw)
+ return -EINVAL;
+
+ if (!hw->fdir_prof) {
+ hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
+ ICE_FLTR_PTYPE_MAX,
+ sizeof(*hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof)
+ return -ENOMEM;
+ }
+
+ if (!hw->fdir_prof[flow]) {
+ hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(**hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof[flow])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
+ * @pf: pointer to the PF structure
+ * @seg: protocol header description pointer
+ * @flow: filter enum
+ * @tun: FDir segment to program
+ */
+static int
+ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
+ enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi, *ctrl_vsi;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_fd_hw_prof *hw_prof;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u64 entry1_h = 0;
+ u64 entry2_h = 0;
+ u64 prof_id;
+ int err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EINVAL;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ err = ice_fdir_alloc_flow_prof(hw, flow);
+ if (err)
+ return err;
+
+ hw_prof = hw->fdir_prof[flow];
+ old_seg = hw_prof->fdir_seg[tun];
+ if (old_seg) {
+ /* This flow_type already has a changed input set.
+ * If it matches the requested input set then we are
+ * done. Or, if it's different then it's an error.
+ */
+ if (!memcmp(old_seg, seg, sizeof(*seg)))
+ return -EEXIST;
+
+ /* if there are FDir filters using this flow,
+ * then return error.
+ */
+ if (hw->fdir_fltr_cnt[flow]) {
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ return -EINVAL;
+ }
+
+ if (ice_is_arfs_using_perfect_flow(hw, flow)) {
+ dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
+ flow);
+ return -EINVAL;
+ }
+
+ /* remove HW filter definition */
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+ }
+
+ /* Adding a profile, but there is only one header supported.
+ * That is the final parameters are 1 header (segment), no
+ * actions (NULL) and zero actions 0.
+ */
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (status)
+ return ice_status_to_errno(status);
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_prof;
+ }
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_entry;
+ }
+
+ hw_prof->fdir_seg[tun] = seg;
+ hw_prof->entry_h[0][tun] = entry1_h;
+ hw_prof->entry_h[1][tun] = entry2_h;
+ hw_prof->vsi_h[0] = main_vsi->idx;
+ hw_prof->vsi_h[1] = ctrl_vsi->idx;
+ if (!hw_prof->cnt)
+ hw_prof->cnt = 2;
+
+ return 0;
+
+err_entry:
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
+err_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+
+ return err;
+}
+
+/**
+ * ice_set_init_fdir_seg
+ * @seg: flow segment for programming
+ * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
+ * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
+ *
+ * Set the configuration for perfect filters to the provided flow segment for
+ * programming the HW filter. This is to be called only when initializing
+ * filters as this function it assumes no filters exist.
+ */
+static int
+ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
+ enum ice_flow_seg_hdr l3_proto,
+ enum ice_flow_seg_hdr l4_proto)
+{
+ enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
+
+ if (!seg)
+ return -EINVAL;
+
+ if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
+ } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
+ } else {
+ return -EINVAL;
+ }
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
+
+ /* IP source address */
+ ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 source port */
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ return 0;
+}
+
+/**
+ * ice_create_init_fdir_rule
+ * @pf: PF structure
+ * @flow: filter enum
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* if there is already a filter rule for kind return -EINVAL */
+ if (hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->fdir_seg[0])
+ return -EINVAL;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_UDP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_UDP);
+ else
+ ret = -EINVAL;
+ if (ret)
+ goto err_exit;
+
+ /* add filter for outer headers */
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
+ if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
+ if (ret)
+ /* could not write tunnel filter, but outer header filter
+ * exists
+ */
+ devm_kfree(dev, tun_seg);
+
+ set_bit(flow, hw->fdir_perfect_fltr);
+ return ret;
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_ip4_seg
+ * @seg: flow segment for programming
+ * @tcp_ip4_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv4
+ */
+static int
+ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip4_spec *tcp_ip4_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
+ !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ /* filtering on TOS not supported */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
+
+ /* IP source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip4_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip4_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv4
+ */
+static int
+ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip4_spec *usr_ip4_spec,
+ bool *perfect_fltr)
+{
+ /* first 4 bytes of Layer 4 header */
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EINVAL;
+ if (usr_ip4_spec->tos)
+ return -EINVAL;
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+ /* Filtering on Layer 4 protocol not supported */
+ if (usr_ip4_spec->proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+
+ /* IP source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_seg
+ * @seg: flow segment for programming
+ * @tcp_ip6_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv6
+ */
+static int
+ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip6_spec *tcp_ip6_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
+ return -EINVAL;
+
+ /* filtering on TC not supported */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
+
+ if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip6_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv6
+ */
+static int
+ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip6_spec *usr_ip6_spec,
+ bool *perfect_fltr)
+{
+ /* filtering on Layer 4 bytes not supported */
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+ /* filtering on TC not supported */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ /* filtering on Layer 4 protocol not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
+
+ if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
+ * @pf: PF structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @user: user defined data from flow specification
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *user)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_fltr_ptype fltr_idx;
+ struct ice_hw *hw = &pf->hw;
+ bool perfect_filter;
+ int ret;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV4_USER_FLOW:
+ ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
+ &perfect_filter);
+ break;
+ case TCP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV6_USER_FLOW:
+ ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
+ &perfect_filter);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto err_exit;
+
+ /* tunnel segments are shifted up one. */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+
+ if (user && user->flex_fltr) {
+ perfect_filter = false;
+ ice_flow_add_fld_raw(seg, user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ }
+
+ /* add filter for outer headers */
+ fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
+ ICE_FD_HW_SEG_NON_TUN);
+ if (ret == -EEXIST)
+ /* Rule already exists, free memory and continue */
+ devm_kfree(dev, seg);
+ else if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
+ ICE_FD_HW_SEG_TUN);
+ if (ret == -EEXIST) {
+ /* Rule already exists, free memory and count as success */
+ devm_kfree(dev, tun_seg);
+ ret = 0;
+ } else if (ret) {
+ /* could not write tunnel filter, but outer filter exists */
+ devm_kfree(dev, tun_seg);
+ }
+
+ if (perfect_filter)
+ set_bit(fltr_idx, hw->fdir_perfect_fltr);
+ else
+ clear_bit(fltr_idx, hw->fdir_perfect_fltr);
+
+ return ret;
+
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_fdir_write_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ * @is_tun: true adds inner filter on tunnel and false outer headers
+ *
+ * returns 0 on success and negative value on error
+ */
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fltr_desc desc;
+ struct ice_vsi *ctrl_vsi;
+ enum ice_status status;
+ u8 *pkt, *frag_pkt;
+ bool has_frag;
+ int err;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!frag_pkt) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_free_all;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
+ if (err)
+ goto err_free_all;
+
+ /* repeat for fragment packet */
+ has_frag = ice_fdir_has_frag(input->flow_type);
+ if (has_frag) {
+ /* does not return error */
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_frag;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
+ if (err)
+ goto err_frag;
+ } else {
+ devm_kfree(dev, frag_pkt);
+ }
+
+ return 0;
+
+err_free_all:
+ devm_kfree(dev, frag_pkt);
+err_free:
+ devm_kfree(dev, pkt);
+ return err;
+
+err_frag:
+ devm_kfree(dev, frag_pkt);
+ return err;
+}
+
+/**
+ * ice_fdir_write_all_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ *
+ * returns 0 on success and negative value on error
+ */
+static int
+ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ bool add)
+{
+ u16 port_num;
+ int tun;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ bool is_tun = tun == ICE_FD_HW_SEG_TUN;
+ int err;
+
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
+ &port_num))
+ continue;
+ err = ice_fdir_write_fltr(pf, input, add, is_tun);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * ice_fdir_replay_fltrs - replay filters from the HW filter list
+ * @pf: board private structure
+ */
+void ice_fdir_replay_fltrs(struct ice_pf *pf)
+{
+ struct ice_fdir_fltr *f_rule;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ int err = ice_fdir_write_all_fltr(pf, f_rule, true);
+
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
+ err, f_rule->fltr_id);
+ }
+}
+
+/**
+ * ice_fdir_create_dflt_rules - create default perfect filters
+ * @pf: PF data structure
+ *
+ * Returns 0 for success or error.
+ */
+int ice_fdir_create_dflt_rules(struct ice_pf *pf)
+{
+ int err;
+
+ /* Create perfect TCP and UDP rules in hardware. */
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_fdir - turn on/off flow director
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ */
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_fltr_ptype flow;
+
+ if (ena) {
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ ice_fdir_create_dflt_rules(pf);
+ return;
+ }
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
+ goto release_lock;
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ /* ignore return value */
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), f_rule);
+ }
+
+ if (hw->fdir_prof)
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
+ flow++)
+ if (hw->fdir_prof[flow])
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+}
+
+/**
+ * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
+ * @pf: PF structure
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool need_perfect = false;
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ need_perfect = true;
+
+ if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
+ return;
+
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
+ if (need_perfect)
+ ice_create_init_fdir_rule(pf, flow_type);
+}
+
+/**
+ * ice_fdir_update_list_entry - add or delete a filter from the filter list
+ * @pf: PF structure
+ * @input: filter structure
+ * @fltr_idx: ethtool index of filter to modify
+ *
+ * returns 0 on success and negative on errors
+ */
+static int
+ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ int fltr_idx)
+{
+ struct ice_fdir_fltr *old_fltr;
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOENT;
+
+ /* Do not update filters during reset */
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+
+ old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
+ if (old_fltr) {
+ err = ice_fdir_write_all_fltr(pf, old_fltr, false);
+ if (err)
+ return err;
+ ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
+ /* we just deleted the last filter of flow_type so we
+ * should also delete the HW filter info.
+ */
+ ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
+ list_del(&old_fltr->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), old_fltr);
+ }
+ if (!input)
+ return err;
+ ice_fdir_list_add_fltr(hw, input);
+ ice_fdir_update_cntrs(hw, input->flow_type, true);
+ return 0;
+}
+
+/**
+ * ice_del_fdir_ethtool - delete Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int val;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not delete filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(__ICE_FD_FLUSH_REQ, pf->state))
+ return -EBUSY;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
+ mutex_unlock(&hw->fdir_fltr_lock);
+
+ return val;
+}
+
+/**
+ * ice_set_fdir_input_set - Set the input set for Flow Director
+ * @vsi: pointer to target VSI
+ * @fsp: pointer to ethtool Rx flow specification
+ * @input: filter structure
+ */
+static int
+ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
+ struct ice_fdir_fltr *input)
+{
+ u16 dest_vsi, q_index = 0;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int flow_type;
+ u8 dest_ctl;
+
+ if (!vsi || !fsp || !input)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+
+ dest_vsi = vsi->idx;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (vf) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
+ return -EINVAL;
+ }
+
+ if (ring >= vsi->num_rxq)
+ return -EINVAL;
+
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ q_index = ring;
+ }
+
+ input->fltr_id = fsp->location;
+ input->q_index = q_index;
+ flow_type = fsp->flow_type & ~FLOW_EXT;
+
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
+ input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(input->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(input->ext_data.usr_def));
+ input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
+ input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
+ memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(input->ext_mask.usr_def));
+ input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
+ input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
+ }
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ break;
+ case IPV4_USER_FLOW:
+ input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
+ input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
+ input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
+ input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
+ input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
+ input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
+ input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_fdir_ethtool - Add/Remove Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ice_rx_flow_userdef userdata;
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *input;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int fltrs_needed;
+ u16 tunnel_port;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not program filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (ice_parse_rx_flow_user_data(fsp, &userdata))
+ return -EINVAL;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
+ if (ret)
+ return ret;
+
+ if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ /* return error if not an update and no available filters */
+ fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
+ 2 : 1;
+ if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
+ ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ ret = ice_set_fdir_input_set(vsi, fsp, input);
+ if (ret)
+ goto free_input;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (ice_fdir_is_dup_fltr(hw, input)) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ if (userdata.flex_fltr) {
+ input->flex_fltr = true;
+ input->flex_word = cpu_to_be16(userdata.flex_word);
+ input->flex_offset = userdata.flex_offset;
+ }
+
+ /* input struct is added to the HW filter list */
+ ice_fdir_update_list_entry(pf, input, fsp->location);
+
+ ret = ice_fdir_write_all_fltr(pf, input, true);
+ if (ret)
+ goto remove_sw_rule;
+
+ goto release_lock;
+
+remove_sw_rule:
+ ice_fdir_update_cntrs(hw, input->flow_type, false);
+ list_del(&input->fltr_node);
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+free_input:
+ if (ret)
+ devm_kfree(dev, input);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
new file mode 100644
index 000000000000..6834df14332f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice_common.h"
+
+/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_tcpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 ice_fdir_udpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_tcpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* Flow Director no-op training packet table */
+static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
+ sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt,
+ sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt,
+ sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt,
+ sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
+ sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt,
+ sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt,
+ sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt,
+ sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt,
+ },
+};
+
+#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt)
+
+/**
+ * ice_set_dflt_val_fd_desc
+ * @fd_fltr_ctx: pointer to fd filter descriptor
+ */
+static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)
+{
+ fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;
+ fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE;
+ fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX;
+ fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1;
+ fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT;
+ fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE;
+ fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0;
+ fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0;
+ fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG;
+ fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO;
+ fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO;
+ fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET;
+ fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
+ fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD;
+ fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO;
+}
+
+/**
+ * ice_set_fd_desc_val
+ * @ctx: pointer to fd filter descriptor context
+ * @fdir_desc: populated with fd filter descriptor values
+ */
+static void
+ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
+ struct ice_fltr_desc *fdir_desc)
+{
+ u64 qword;
+
+ /* prep QW0 of FD filter programming desc */
+ qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
+ ICE_FXD_FLTR_QW0_QINDEX_M;
+ qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
+ ICE_FXD_FLTR_QW0_COMP_Q_M;
+ qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
+ ICE_FXD_FLTR_QW0_COMP_REPORT_M;
+ qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
+ ICE_FXD_FLTR_QW0_FD_SPACE_M;
+ qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
+ ICE_FXD_FLTR_QW0_STAT_CNT_M;
+ qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
+ ICE_FXD_FLTR_QW0_STAT_ENA_M;
+ qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
+ ICE_FXD_FLTR_QW0_EVICT_ENA_M;
+ qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_M;
+ qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
+ qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
+ ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
+ qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
+ ICE_FXD_FLTR_QW0_DROP_M;
+ qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
+ ICE_FXD_FLTR_QW0_FLEX_PRI_M;
+ qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
+ ICE_FXD_FLTR_QW0_FLEX_MDID_M;
+ qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
+ ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
+
+ /* prep QW1 of FD filter programming desc */
+ qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
+ ICE_FXD_FLTR_QW1_DTYPE_M;
+ qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
+ ICE_FXD_FLTR_QW1_PCMD_M;
+ qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
+ ICE_FXD_FLTR_QW1_PROF_PRI_M;
+ qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
+ ICE_FXD_FLTR_QW1_PROF_M;
+ qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
+ ICE_FXD_FLTR_QW1_FD_VSI_M;
+ qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
+ ICE_FXD_FLTR_QW1_SWAP_M;
+ qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
+ ICE_FXD_FLTR_QW1_FDID_PRI_M;
+ qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_MDID_M;
+ qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_M;
+ fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
+}
+
+/**
+ * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct
+ * @hw: pointer to the hardware structure
+ * @input: filter
+ * @fdesc: filter descriptor
+ * @add: if add is true, this is an add operation, false implies delete
+ */
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add)
+{
+ struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 };
+
+ /* set default context info */
+ ice_set_dflt_val_fd_desc(&fdir_fltr_ctx);
+
+ /* change sideband filtering values */
+ fdir_fltr_ctx.fdid = input->fltr_id;
+ if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
+ fdir_fltr_ctx.qindex = 0;
+ } else {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fdir_fltr_ctx.qindex = input->q_index;
+ }
+ fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fdir_fltr_ctx.cnt_index = input->cnt_index;
+ fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
+ fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
+ fdir_fltr_ctx.toq_prio = 3;
+ fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
+ ICE_FXD_FLTR_QW1_PCMD_REMOVE;
+ fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
+ fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fdir_fltr_ctx.fdid_prio = 3;
+ fdir_fltr_ctx.desc_prof = 1;
+ fdir_fltr_ctx.desc_prof_prio = 3;
+ ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
+}
+
+/**
+ * ice_alloc_fd_res_cntr - obtain counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ */
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_free_fd_res_cntr - Free counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: counter index to be freed
+ */
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+{
+ return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_get_fdir_cnt_all - get the number of Flow Director filters
+ * @hw: hardware data structure
+ *
+ * Returns the number of filters available on device
+ */
+int ice_get_fdir_cnt_all(struct ice_hw *hw)
+{
+ return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort;
+}
+
+/**
+ * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @addr: IPv6 address to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
+{
+ int idx;
+
+ for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++)
+ memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx],
+ sizeof(*addr));
+}
+
+/**
+ * ice_pkt_insert_u16 - insert a be16 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 16 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_pkt_insert_u32 - insert a be32 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 32 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_fdir_get_gen_prgm_pkt - generate a training packet
+ * @hw: pointer to the hardware structure
+ * @input: flow director filter data structure
+ * @pkt: pointer to return filter packet
+ * @frag: generate a fragment packet
+ * @tun: true implies generate a tunnel packet
+ */
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun)
+{
+ enum ice_fltr_ptype flow;
+ u16 tnl_port;
+ u8 *loc;
+ u16 idx;
+
+ if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ switch (input->ip.v4.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ switch (input->ip.v6.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else {
+ flow = input->flow_type;
+ }
+
+ for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++)
+ if (ice_fdir_pkt[idx].flow == flow)
+ break;
+ if (idx == ICE_FDIR_NUM_PKT)
+ return ICE_ERR_PARAM;
+ if (!tun) {
+ memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
+ loc = pkt;
+ } else {
+ if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ return ICE_ERR_DOES_NOT_EXIST;
+ if (!ice_fdir_pkt[idx].tun_pkt)
+ return ICE_ERR_PARAM;
+ memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
+ ice_fdir_pkt[idx].tun_pkt_len);
+ ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ htons(tnl_port));
+ loc = &pkt[ICE_FDIR_TUN_PKT_OFF];
+ }
+
+ /* Reverse the src and dst, since the HW expects them to be from Tx
+ * perspective. The input from user is from Rx filter perspective.
+ */
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ if (frag)
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ if (input->flex_fltr)
+ ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word);
+
+ return 0;
+}
+
+/**
+ * ice_fdir_has_frag - does flow type have 2 ptypes
+ * @flow: flow ptype
+ *
+ * returns true is there is a fragment packet for this ptype
+ */
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
+{
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ice_fdir_find_by_idx - find filter with idx
+ * @hw: pointer to hardware structure
+ * @fltr_idx: index to find.
+ *
+ * Returns pointer to filter if found or null
+ */
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx)
+{
+ struct ice_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found in the list */
+ if (fltr_idx == rule->fltr_id)
+ return rule;
+ if (fltr_idx < rule->fltr_id)
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * ice_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @hw: hardware structure
+ * @fltr: filter node to add to structure
+ */
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
+{
+ struct ice_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found or pass its spot in the list */
+ if (rule->fltr_id >= fltr->fltr_id)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->fltr_node, &parent->fltr_node);
+ else
+ list_add(&fltr->fltr_node, &hw->fdir_list_head);
+}
+
+/**
+ * ice_fdir_update_cntrs - increment / decrement filter counter
+ * @hw: pointer to hardware structure
+ * @flow: filter flow type
+ * @add: true implies filters added
+ */
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add)
+{
+ int incr;
+
+ incr = add ? 1 : -1;
+ hw->fdir_active_fltr += incr;
+
+ if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX)
+ ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow);
+ else
+ hw->fdir_fltr_cnt[flow] += incr;
+}
+
+/**
+ * ice_cmp_ipv6_addr - compare 2 IP v6 addresses
+ * @a: IP v6 address
+ * @b: IP v6 address
+ *
+ * Returns 0 on equal, returns non-0 if different
+ */
+static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
+{
+ return memcmp(a, b, 4 * sizeof(__be32));
+}
+
+/**
+ * ice_fdir_comp_rules - compare 2 filters
+ * @a: a Flow Director filter data structure
+ * @b: a Flow Director filter data structure
+ * @v6: bool true if v6 filter
+ *
+ * Returns true if the filters match
+ */
+static bool
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+{
+ enum ice_fltr_ptype flow_type = a->flow_type;
+
+ /* The calling function already checks that the two filters have the
+ * same flow_type.
+ */
+ if (!v6) {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ }
+ } else {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ice_fdir_is_dup_fltr - test if filter is already in list for PF
+ * @hw: hardware data structure
+ * @input: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
+{
+ struct ice_fdir_fltr *rule;
+ bool ret = false;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ enum ice_fltr_ptype flow_type;
+
+ if (rule->flow_type != input->flow_type)
+ continue;
+
+ flow_type = input->flow_type;
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ ret = ice_fdir_comp_rules(rule, input, false);
+ else
+ ret = ice_fdir_comp_rules(rule, input, true);
+ if (ret) {
+ if (rule->fltr_id == input->fltr_id &&
+ rule->q_index != input->q_index)
+ ret = false;
+ else
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
new file mode 100644
index 000000000000..1c587766daab
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FDIR_H_
+#define _ICE_FDIR_H_
+
+#define ICE_FDIR_TUN_PKT_OFF 50
+#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
+
+/* macros for offsets into packets for flow director programming */
+#define ICE_IPV4_SRC_ADDR_OFFSET 26
+#define ICE_IPV4_DST_ADDR_OFFSET 30
+#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_TCP_DST_PORT_OFFSET 36
+#define ICE_IPV4_UDP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_UDP_DST_PORT_OFFSET 36
+#define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_SCTP_DST_PORT_OFFSET 36
+#define ICE_IPV4_PROTO_OFFSET 23
+#define ICE_IPV6_SRC_ADDR_OFFSET 22
+#define ICE_IPV6_DST_ADDR_OFFSET 38
+#define ICE_IPV6_TCP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_TCP_DST_PORT_OFFSET 56
+#define ICE_IPV6_UDP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
+#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
+/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
+ * requests that the packet not be fragmented. MF indicates that a packet has
+ * been fragmented.
+ */
+#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+
+enum ice_fltr_prgm_desc_dest {
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
+};
+
+enum ice_fltr_prgm_desc_fd_status {
+ ICE_FLTR_PRGM_DESC_FD_STATUS_NONE,
+ ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID,
+};
+
+/* Flow Director (FD) Filter Programming descriptor */
+struct ice_fd_fltr_desc_ctx {
+ u32 fdid;
+ u16 qindex;
+ u16 cnt_index;
+ u16 fd_vsi;
+ u16 flex_val;
+ u8 comp_q;
+ u8 comp_report;
+ u8 fd_space;
+ u8 cnt_ena;
+ u8 evict_ena;
+ u8 toq;
+ u8 toq_prio;
+ u8 dpu_recipe;
+ u8 drop;
+ u8 flex_prio;
+ u8 flex_mdid;
+ u8 dtype;
+ u8 pcmd;
+ u8 desc_prof_prio;
+ u8 desc_prof;
+ u8 swap;
+ u8 fdid_prio;
+ u8 fdid_mdid;
+};
+
+#define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16)
+
+struct ice_rx_flow_userdef {
+ u16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+};
+
+struct ice_fdir_v4 {
+ __be32 dst_ip;
+ __be32 src_ip;
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header;
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tos;
+ u8 ip_ver;
+ u8 proto;
+};
+
+#define ICE_IPV6_ADDR_LEN_AS_U32 4
+
+struct ice_fdir_v6 {
+ __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header; /* next header */
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tc;
+ u8 proto;
+};
+
+struct ice_fdir_extra {
+ u8 dst_mac[ETH_ALEN]; /* dest MAC address */
+ u32 usr_def[2]; /* user data */
+ __be16 vlan_type; /* VLAN ethertype */
+ __be16 vlan_tag; /* VLAN tag info */
+};
+
+struct ice_fdir_fltr {
+ struct list_head fltr_node;
+ enum ice_fltr_ptype flow_type;
+
+ union {
+ struct ice_fdir_v4 v4;
+ struct ice_fdir_v6 v6;
+ } ip, mask;
+
+ struct ice_fdir_extra ext_data;
+ struct ice_fdir_extra ext_mask;
+
+ /* flex byte filter data */
+ __be16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+
+ /* filter control */
+ u16 q_index;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fltr_status;
+ u16 cnt_index;
+ u32 fltr_id;
+};
+
+/* Dummy packet filter definition structure */
+struct ice_fdir_base_pkt {
+ enum ice_fltr_ptype flow;
+ u16 pkt_len;
+ const u8 *pkt;
+ u16 tun_pkt_len;
+ const u8 *tun_pkt;
+};
+
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add);
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+int ice_get_fdir_cnt_all(struct ice_hw *hw);
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add);
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+#endif /* _ICE_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 42bac3ec5526..4420fc02f7e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -5,6 +5,15 @@
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
@@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -593,8 +864,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
u32 i;
ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
- pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
@@ -764,13 +1036,15 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
- ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
- ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
ice_buf_tbl = ice_find_buf_table(ice_seg);
@@ -815,14 +1089,16 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
- hw->ice_pkg_ver = seg_hdr->seg_ver;
- memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+ hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
sizeof(hw->ice_pkg_name));
- ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
- seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
- seg_hdr->seg_name);
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT,
"Did not find ice segment in driver package\n");
@@ -863,9 +1139,11 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
if (pkg_info->pkg_info[i].is_active) {
flags[place++] = 'A';
hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
memcpy(hw->active_pkg_name,
pkg_info->pkg_info[i].name,
- sizeof(hw->active_pkg_name));
+ sizeof(pkg_info->pkg_info[i].name));
hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
}
if (pkg_info->pkg_info[i].is_active_at_boot)
@@ -905,10 +1183,10 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
if (len < sizeof(*pkg))
return ICE_ERR_BUF_TOO_SHORT;
- if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
return ICE_ERR_CFG;
/* pkg must have at least one segment */
@@ -990,6 +1268,68 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
}
/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return status;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return status;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1039,18 +1379,12 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_version(&hw->pkg_ver);
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
if (status)
return status;
- /* find segment in given package */
- seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
- if (!seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* download package */
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
ice_debug(hw, ICE_DBG_INIT,
@@ -1292,6 +1626,284 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
+/**
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ mutex_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ bool res = false;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
+ }
+
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
+ *
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = 0;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 index;
+ u16 size;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = 0;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* size of section - there is at least one entry */
+ size = struct_size(sect_rx, tcam, count - 1);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam));
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_destroy_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
/* PTG Management */
/**
@@ -1807,9 +2419,16 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 off, i;
+ u16 off;
+ u8 i;
- for (i = 0; i < es->count; i++) {
+ /* For FD, we don't want to re-use a existed profile with the same
+ * field vector and mask. This will cause rule interference.
+ */
+ if (blk == ICE_BLK_FD)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < (u8)es->count; i++) {
off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
@@ -1830,6 +2449,9 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
break;
@@ -1847,6 +2469,9 @@ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
break;
@@ -2290,6 +2915,12 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
mutex_lock(&hw->fl_profs_locks[blk_idx]);
list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ struct ice_flow_entry *e, *t;
+
+ list_for_each_entry_safe(e, t, &p->entries, l_entry)
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
+
list_del(&p->l_entry);
devm_kfree(ice_hw_to_dev(hw), p);
}
@@ -2919,6 +3550,212 @@ error_tmp:
}
/**
+ * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @mask_sel: mask select
+ *
+ * This function enable any of the masks selected by the mask select parameter
+ * for the profile specified.
+ */
+static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
+{
+ wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
+
+ ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
+ GLQF_FDMASK_SEL(prof_id), mask_sel);
+}
+
+struct ice_fd_src_dst_pair {
+ u8 prot_id;
+ u8 count;
+ u16 off;
+};
+
+static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
+ /* These are defined in pairs */
+ { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
+ { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
+
+ { ICE_PROT_IPV4_IL, 2, 12 },
+ { ICE_PROT_IPV4_IL, 2, 16 },
+
+ { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
+ { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
+
+ { ICE_PROT_IPV6_IL, 8, 8 },
+ { ICE_PROT_IPV6_IL, 8, 24 },
+
+ { ICE_PROT_TCP_IL, 1, 0 },
+ { ICE_PROT_TCP_IL, 1, 2 },
+
+ { ICE_PROT_UDP_OF, 1, 0 },
+ { ICE_PROT_UDP_OF, 1, 2 },
+
+ { ICE_PROT_UDP_IL_OR_S, 1, 0 },
+ { ICE_PROT_UDP_IL_OR_S, 1, 2 },
+
+ { ICE_PROT_SCTP_IL, 1, 0 },
+ { ICE_PROT_SCTP_IL, 1, 2 }
+};
+
+#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
+
+/**
+ * ice_update_fd_swap - set register appropriately for a FD FV extraction
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @es: extraction sequence (length of array is determined by the block)
+ */
+static enum ice_status
+ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
+{
+ DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+ u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
+#define ICE_FD_FV_NOT_FOUND (-2)
+ s8 first_free = ICE_FD_FV_NOT_FOUND;
+ u8 used[ICE_MAX_FV_WORDS] = { 0 };
+ s8 orig_free, si;
+ u32 mask_sel = 0;
+ u8 i, j, k;
+
+ bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+
+ /* This code assumes that the Flow Director field vectors are assigned
+ * from the end of the FV indexes working towards the zero index, that
+ * only complete fields will be included and will be consecutive, and
+ * that there are no gaps between valid indexes.
+ */
+
+ /* Determine swap fields present */
+ for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
+ /* Find the first free entry, assuming right to left population.
+ * This is where we can start adding additional pairs if needed.
+ */
+ if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
+ ICE_PROT_INVALID)
+ first_free = i - 1;
+
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
+ es[i].off == ice_fd_pairs[j].off) {
+ set_bit(j, pair_list);
+ pair_start[j] = i;
+ }
+ }
+
+ orig_free = first_free;
+
+ /* determine missing swap fields that need to be added */
+ for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
+ u8 bit1 = test_bit(i + 1, pair_list);
+ u8 bit0 = test_bit(i, pair_list);
+
+ if (bit0 ^ bit1) {
+ u8 index;
+
+ /* add the appropriate 'paired' entry */
+ if (!bit0)
+ index = i;
+ else
+ index = i + 1;
+
+ /* check for room */
+ if (first_free + 1 < (s8)ice_fd_pairs[index].count)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* place in extraction sequence */
+ for (k = 0; k < ice_fd_pairs[index].count; k++) {
+ es[first_free - k].prot_id =
+ ice_fd_pairs[index].prot_id;
+ es[first_free - k].off =
+ ice_fd_pairs[index].off + (k * 2);
+
+ if (k > first_free)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* keep track of non-relevant fields */
+ mask_sel |= BIT(first_free - k);
+ }
+
+ pair_start[index] = first_free;
+ first_free -= ice_fd_pairs[index].count;
+ }
+ }
+
+ /* fill in the swap array */
+ si = hw->blk[ICE_BLK_FD].es.fvw - 1;
+ while (si >= 0) {
+ u8 indexes_used = 1;
+
+ /* assume flat at this index */
+#define ICE_SWAP_VALID 0x80
+ used[si] = si | ICE_SWAP_VALID;
+
+ if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
+ si -= indexes_used;
+ continue;
+ }
+
+ /* check for a swap location */
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
+ es[si].off == ice_fd_pairs[j].off) {
+ u8 idx;
+
+ /* determine the appropriate matching field */
+ idx = j + ((j % 2) ? -1 : 1);
+
+ indexes_used = ice_fd_pairs[idx].count;
+ for (k = 0; k < indexes_used; k++) {
+ used[si - k] = (pair_start[idx] - k) |
+ ICE_SWAP_VALID;
+ }
+
+ break;
+ }
+
+ si -= indexes_used;
+ }
+
+ /* for each set of 4 swap and 4 inset indexes, write the appropriate
+ * register
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
+ u32 raw_swap = 0;
+ u32 raw_in = 0;
+
+ for (k = 0; k < 4; k++) {
+ u8 idx;
+
+ idx = (j * 4) + k;
+ if (used[idx] && !(mask_sel & BIT(idx))) {
+ raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+ raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+ }
+ }
+
+ /* write the appropriate swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ /* write the appropriate inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
+ }
+
+ /* initially clear the mask select for this profile */
+ ice_update_fd_mask(hw, prof_id, 0);
+
+ return 0;
+}
+
+/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2939,7 +3776,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u32 byte = 0;
+ u8 byte = 0;
u8 prof_id;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -2953,6 +3790,18 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
+ if (blk == ICE_BLK_FD) {
+ /* For Flow Director block, the extraction sequence may
+ * need to be altered in the case where there are paired
+ * fields that have no match. This is necessary because
+ * for Flow Director, src and dest fields need to paired
+ * for filter programming and these values are swapped
+ * during Tx.
+ */
+ status = ice_update_fd_swap(hw, prof_id, es);
+ if (status)
+ goto err_ice_add_prof;
+ }
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
@@ -2962,8 +3811,10 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
- if (!prof)
+ if (!prof) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof;
+ }
prof->profile_cookie = id;
prof->prof_id = prof_id;
@@ -2972,7 +3823,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u32 bit;
+ u8 bit;
if (!ptypes[byte]) {
bytes--;
@@ -3006,7 +3857,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
break;
/* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
+ m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
break;
}
@@ -3703,8 +4554,10 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].prof_id,
t->tcam[i].ptg, vsig, 0, 0,
vl_msk, dc_msk, nm_msk);
- if (status)
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
+ }
/* log change */
list_add(&p->list_entry, chg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index c7b5e1a6ea2b..568ea519af51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,14 @@
#define ICE_PKG_CNT 4
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port);
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0fb3fe3ff3ea..a6f391eac8ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -20,7 +20,7 @@ struct ice_fv {
/* Package and segment headers and tables */
struct ice_pkg_hdr {
- struct ice_pkg_ver format_ver;
+ struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
@@ -30,9 +30,9 @@ struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
- struct ice_pkg_ver seg_ver;
+ struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
- char seg_name[ICE_PKG_NAME_SIZE];
+ char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
@@ -75,7 +75,7 @@ struct ice_buf_table {
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
- __le32 track_id;
+ __le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+ TNL_VXLAN = 0,
+ TNL_GENEVE,
+ TNL_LAST = 0xFF,
+ TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+ enum ice_tunnel_type type;
+ const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+ enum ice_tunnel_type type;
+ u16 boost_addr;
+ u16 port;
+ u16 ref;
+ struct ice_boost_tcam_entry *boost_entry;
+ u8 valid;
+ u8 in_use;
+ u8 marked;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES 16
+
+struct ice_tunnel_table {
+ struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 3de862a3c789..d74e5290677f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
-
+ /* GRE */
+ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
+ sizeof_field(struct gre_full_hdr, key)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+ 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
+ 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
@@ -178,6 +193,40 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
return 0;
}
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_TCP 20
+#define ICE_FLOW_PROT_HDR_SZ_UDP 8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+ u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
+
+ /* L3 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+
+ /* L4 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+ sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+ sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+ return sz;
+}
+
/**
* ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
* @params: information about the flow to be processed
@@ -225,6 +274,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+ if (!i) {
+ src = (const unsigned long *)ice_ptypes_gre_of;
+ bitmap_and(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ }
}
}
@@ -275,6 +330,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
+ case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+ prot_id = ICE_PROT_GRE_OF;
+ break;
default:
return ICE_ERR_NOT_IMPL;
}
@@ -324,6 +382,81 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
}
/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg)
+{
+ u16 fv_words;
+ u16 hdrs_sz;
+ u8 i;
+
+ if (!params->prof->segs[seg].raws_cnt)
+ return 0;
+
+ if (params->prof->segs[seg].raws_cnt >
+ ARRAY_SIZE(params->prof->segs[seg].raws))
+ return ICE_ERR_MAX_LIMIT;
+
+ /* Offsets within the segment headers are not supported */
+ hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+ if (!hdrs_sz)
+ return ICE_ERR_PARAM;
+
+ fv_words = hw->blk[params->blk].es.fvw;
+
+ for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+ struct ice_flow_seg_fld_raw *raw;
+ u16 off, cnt, j;
+
+ raw = &params->prof->segs[seg].raws[i];
+
+ /* Storing extraction information */
+ raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
+ raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
+ BITS_PER_BYTE;
+ raw->info.xtrct.idx = params->es_cnt;
+
+ /* Determine the number of field vector entries this raw field
+ * consumes.
+ */
+ cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
+ (raw->info.src.last * BITS_PER_BYTE),
+ (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
+ off = raw->info.xtrct.off;
+ for (j = 0; j < cnt; j++) {
+ u16 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= hw->blk[params->blk].es.count ||
+ params->es_cnt >= ICE_MAX_FV_WORDS)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = raw->info.xtrct.prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
@@ -349,6 +482,11 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
if (status)
return status;
}
+
+ /* Process raw matching bytes */
+ status = ice_flow_xtract_raws(hw, params, i);
+ if (status)
+ return status;
}
return status;
@@ -373,10 +511,8 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
return status;
switch (params->blk) {
+ case ICE_BLK_FD:
case ICE_BLK_RSS:
- /* Only header information is provided for RSS configuration.
- * No further processing is needed.
- */
status = 0;
break;
default:
@@ -458,6 +594,43 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
}
/**
+ * ice_dealloc_flow_entry - Deallocate flow entry memory
+ * @hw: pointer to the HW struct
+ * @entry: flow entry to be removed
+ */
+static void
+ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->entry)
+ devm_kfree(ice_hw_to_dev(hw), entry->entry);
+
+ devm_kfree(ice_hw_to_dev(hw), entry);
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
+ struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return ICE_ERR_BAD_PTR;
+
+ list_del(&entry->l_entry);
+
+ ice_dealloc_flow_entry(hw, entry);
+
+ return 0;
+}
+
+/**
* ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
* @hw: pointer to the HW struct
* @blk: classification stage
@@ -544,6 +717,21 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
{
enum ice_status status;
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ status = ice_flow_rem_entry_sync(hw, blk, e);
+ if (status)
+ break;
+ }
+
+ mutex_unlock(&prof->entries_lock);
+ }
+
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
@@ -629,7 +817,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-static enum ice_status
+enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
@@ -667,7 +855,7 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-static enum ice_status
+enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
@@ -691,6 +879,113 @@ out:
}
/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+ void *data, u64 *entry_h)
+{
+ struct ice_flow_entry *e = NULL;
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ /* No flow entry data is expected for RSS */
+ if (!entry_h || (!data && blk != ICE_BLK_RSS))
+ return ICE_ERR_BAD_PTR;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ } else {
+ /* Allocate memory for the entry being added and associate
+ * the VSI to the found flow profile
+ */
+ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
+ if (!e)
+ status = ICE_ERR_NO_MEMORY;
+ else
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ }
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+ if (status)
+ goto out;
+
+ e->id = entry_id;
+ e->vsi_handle = vsi_handle;
+ e->prof = prof;
+ e->priority = prio;
+
+ switch (blk) {
+ case ICE_BLK_FD:
+ case ICE_BLK_RSS:
+ break;
+ default:
+ status = ICE_ERR_NOT_IMPL;
+ goto out;
+ }
+
+ mutex_lock(&prof->entries_lock);
+ list_add(&e->l_entry, &prof->entries);
+ mutex_unlock(&prof->entries_lock);
+
+ *entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+ if (status && e) {
+ if (e->entry)
+ devm_kfree(ice_hw_to_dev(hw), e->entry);
+ devm_kfree(ice_hw_to_dev(hw), e);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h)
+{
+ struct ice_flow_entry *entry;
+ struct ice_flow_prof *prof;
+ enum ice_status status = 0;
+
+ if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+ return ICE_ERR_PARAM;
+
+ entry = ICE_FLOW_ENTRY_PTR(entry_h);
+
+ /* Retain the pointer to the flow profile as the entry will be freed */
+ prof = entry->prof;
+
+ if (prof) {
+ mutex_lock(&prof->entries_lock);
+ status = ice_flow_rem_entry_sync(hw, blk, entry);
+ mutex_unlock(&prof->entries_lock);
+ }
+
+ return status;
+}
+
+/**
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
@@ -752,7 +1047,7 @@ ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
* create the content of a match entry. This function should only be used for
* fixed-size data structures.
*/
-static void
+void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
{
@@ -762,6 +1057,42 @@ ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
}
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc)
+{
+ if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+ seg->raws[seg->raws_cnt].off = off;
+ seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+ seg->raws[seg->raws_cnt].info.src.val = val_loc;
+ seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+ /* The "last" field is used to store the length of the field */
+ seg->raws[seg->raws_cnt].info.src.last = len;
+ }
+
+ /* Overflows of "raws" will be handled as an error condition later in
+ * the flow when this information is processed.
+ */
+ seg->raws_cnt++;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
@@ -945,6 +1276,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
+#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format:
* [0:31] - Packet match fields
@@ -1085,6 +1417,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks);
return status;
@@ -1238,6 +1573,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS);
if (status)
break;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_INNER_HEADERS);
+ if (status)
+ break;
}
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 5558627bd5eb..3913da2116d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+ ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* GRE */
+ ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
@@ -125,6 +128,7 @@ enum ice_flow_priority {
};
#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -161,14 +165,38 @@ struct ice_flow_fld_info {
struct ice_flow_seg_xtrct xtrct;
};
+struct ice_flow_seg_fld_raw {
+ struct ice_flow_fld_info info;
+ u16 off; /* Offset from the start of the segment */
+};
+
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+ u8 raws_cnt; /* Number of raw fields to be matched */
+ struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+ struct list_head l_entry;
+
+ u64 id;
+ struct ice_flow_prof *prof;
+ /* Flow entry's content */
+ void *entry;
+ enum ice_flow_priority priority;
+ u16 vsi_handle;
+ u16 entry_sz;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+
struct ice_flow_prof {
struct list_head l_entry;
@@ -194,7 +222,24 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+ void *data, u64 *entry_h);
+enum ice_status
+ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
new file mode 100644
index 000000000000..2418d4fff037
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_fltr.h"
+
+/**
+ * ice_fltr_free_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_fltr_add_mac_to_list
+ */
+void ice_fltr_free_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_fltr_add_entry_to_list - allocate and add filter entry to list
+ * @dev: pointer to device needed by alloc function
+ * @info: filter info struct that gets added to the passed in list
+ * @list: pointer to the list which contains MAC filters entry
+ */
+static int
+ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
+ struct list_head *list)
+{
+ struct ice_fltr_list_entry *entry;
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->fltr_info = *info;
+
+ INIT_LIST_HEAD(&entry->list_entry);
+ list_add(&entry->list_entry, list);
+
+ return 0;
+}
+
+/**
+ * ice_fltr_add_mac_list - add list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_mac_list - remove list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_vlan_list - add list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_vlan_list - remove list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_eth_list - add list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_eth_list - remove list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_all - remove all filters associated with VSI
+ * @vsi: pointer to VSI struct
+ */
+void ice_fltr_remove_all(struct ice_vsi *vsi)
+{
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+}
+
+/**
+ * ice_fltr_add_mac_to_list - add MAC filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @mac: MAC address to add
+ * @action: filter action
+ */
+int
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_MAC;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+
+ ether_addr_copy(info.l_data.mac.mac_addr, mac);
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @vlan_id: VLAN ID to add
+ * @action: filter action
+ */
+static int
+ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 vlan_id, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_VLAN;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.vlan.vlan_id = vlan_id;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_eth_to_list - add ethertype filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @ethertype: ethertype of packet that matches filter
+ * @flag: filter direction, Tx or Rx
+ * @action: filter action
+ */
+static int
+ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = flag;
+ info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.ethertype_mac.ethertype = ethertype;
+
+ if (flag == ICE_FLTR_TX)
+ info.src_id = ICE_SRC_ID_VSI;
+ else
+ info.src_id = ICE_SRC_ID_LPORT;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_prepare_mac - add or remove MAC rule
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*mac_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_mac_and_broadcast - add or remove MAC and broadcast filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status(*mac_action)
+ (struct ice_vsi *, struct list_head *))
+{
+ u8 broadcast[ETH_ALEN];
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ eth_broadcast_addr(broadcast);
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
+ ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_vlan - add or remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ * @vlan_action: pointer to add or remove VLAN function
+ */
+static enum ice_status
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*vlan_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = vlan_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_eth - add or remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of packet to be filtered
+ * @flag: direction of packet, Tx or Rx
+ * @action: action to be performed on filter match
+ * @eth_action: pointer to add or remove ethertype function
+ */
+static enum ice_status
+ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*eth_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = eth_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_add_mac - add single MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_add_mac_and_broadcast - add single MAC and broadcast
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac_and_broadcast(vsi, mac, action,
+ ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_remove_mac - remove MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: filter MAC to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
+}
+
+/**
+ * ice_fltr_add_vlan - add single VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_add_vlan_list);
+}
+
+/**
+ * ice_fltr_remove_vlan - remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: filter VLAN to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_remove_vlan_list);
+}
+
+/**
+ * ice_fltr_add_eth - add specyfic ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of packet to be filtered, Tx or Rx
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_add_eth_list);
+}
+
+/**
+ * ice_fltr_remove_eth - remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of filter
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
+ u16 flag, enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_remove_eth_list);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
new file mode 100644
index 000000000000..361cb4da9b43
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FLTR_H_
+#define _ICE_FLTR_H_
+
+void ice_fltr_free_list(struct device *dev, struct list_head *h);
+enum ice_status
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+enum ice_status
+ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+
+enum ice_status
+ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+
+enum ice_status
+ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+void ice_fltr_remove_all(struct ice_vsi *vsi);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1d37a9f02c1c..1086c9f778b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,9 +6,6 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096))
-#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096))
-#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096))
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -42,6 +39,7 @@
#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN 0x0022E480
#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30)
#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
#define PF_MBX_ARQT 0x0022E580
#define PF_MBX_ATQBAH 0x0022E180
@@ -50,6 +48,7 @@
#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN 0x0022E200
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENC 0x00083000
@@ -58,6 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
+#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -218,6 +218,11 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
@@ -289,6 +294,20 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLQF_FD_CNT 0x00460018
+#define GLQF_FD_CNT_FD_BCNT_S 16
+#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
+#define GLQF_FD_SIZE 0x00460010
+#define GLQF_FD_SIZE_FD_GSIZE_S 0
+#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_BSIZE_S 16
+#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
+#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
+#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
+#define PFQF_FD_ENA 0x0043A000
+#define PFQF_FD_ENA_FD_ENA_M BIT(0)
+#define PFQF_FD_SIZE 0x00460100
#define GLDCB_RTCTQ_RXQNUM_S 0
#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
@@ -332,6 +351,7 @@
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
@@ -342,6 +362,9 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
+#define VSIQF_FD_CNT_FD_GCNT_S 0
+#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 878e125d8b42..14dfbbc1b2cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -40,6 +40,104 @@ union ice_32byte_rx_desc {
} wb; /* writeback */
};
+struct ice_fltr_desc {
+ __le64 qidx_compq_space_stat;
+ __le64 dtype_cmd_vsi_fdid;
+};
+
+#define ICE_FXD_FLTR_QW0_QINDEX_S 0
+#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_S 11
+#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
+#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL
+
+#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16
+#define ICE_FXD_FLTR_QW0_STAT_CNT_M \
+ (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29
+#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S)
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_S 32
+#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S)
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_DROP_S 40
+#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S)
+#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL
+#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S)
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S)
+#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \
+ (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S)
+#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_DTYPE_S 0
+#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S)
+#define ICE_FXD_FLTR_QW1_PCMD_S 4
+#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S)
+#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL
+#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5
+#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S)
+#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_S 8
+#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S)
+#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_FD_VSI_S 14
+#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S)
+#define ICE_FXD_FLTR_QW1_SWAP_S 24
+#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S)
+#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL
+#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
+#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
+#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
+#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_S 32
+#define ICE_FXD_FLTR_QW1_FDID_M \
+ (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
@@ -262,6 +360,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
+enum ice_rx_flex_desc_status_error_1_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
@@ -340,6 +444,7 @@ struct ice_tx_desc {
enum ice_tx_desc_dtype_value {
ICE_TX_DESC_DTYPE_DATA = 0x0,
ICE_TX_DESC_DTYPE_CTX = 0x1,
+ ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8,
/* DESC_DONE - HW has completed write-back of descriptor */
ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
};
@@ -351,12 +456,14 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ ICE_TX_DESC_CMD_DUMMY = 0x0010,
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
+ ICE_TX_DESC_CMD_RE = 0x0400,
};
#define ICE_TXD_QW1_OFFSET_S 16
@@ -413,6 +520,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40
};
+enum ice_tx_ctx_desc_eipt_offload {
+ ICE_TX_CTX_EIPT_NONE = 0x0,
+ ICE_TX_CTX_EIPT_IPV6 = 0x1,
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
+ ICE_TX_CTX_EIPT_IPV4 = 0x3
+};
+
+#define ICE_TXD_CTX_QW0_EIPLEN_S 2
+
+#define ICE_TXD_CTX_QW0_L4TUNT_S 9
+
+#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
+#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
+
+#define ICE_TXD_CTX_QW0_NATLEN_S 12
+
+#define ICE_TXD_CTX_QW0_L4T_CS_S 23
+#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
+
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
@@ -455,7 +581,7 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2f256bf45efc..28b46cc9f5cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_flow.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
/**
@@ -18,6 +19,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_CTRL:
+ return "ICE_VSI_CTRL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
default:
@@ -37,7 +40,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
*/
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
- int i, ret = 0;
+ int ret = 0;
+ u16 i;
for (i = 0; i < vsi->num_rxq; i++)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
@@ -121,6 +125,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -185,6 +190,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
*/
vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
+ case ICE_VSI_CTRL:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -248,8 +258,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
+ vsi->vsi_num, ice_stat_str(status));
kfree(ctxt);
}
@@ -320,7 +330,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi)
+ if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -331,6 +341,25 @@ int ice_vsi_clear(struct ice_vsi *vsi)
}
/**
+ * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+#define FDIR_RX_DESC_CLEAN_BUDGET 64
+ ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+
+ return IRQ_HANDLED;
+}
+
+/**
* ice_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
@@ -381,8 +410,6 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
- vsi->idx = pf->next_vsi;
-
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
@@ -396,6 +423,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
+ case ICE_VSI_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup ctrl VSI MSIX irq handler */
+ vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+ break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -409,12 +443,20 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- /* fill VSI slot in the PF struct */
- pf->vsi[pf->next_vsi] = vsi;
+ if (vsi->type == ICE_VSI_CTRL) {
+ /* Use the last VSI slot as the index for the control VSI */
+ vsi->idx = pf->num_alloc_vsi - 1;
+ pf->ctrl_vsi_idx = vsi->idx;
+ pf->vsi[vsi->idx] = vsi;
+ } else {
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ }
goto unlock_pf;
err_rings:
@@ -426,6 +468,48 @@ unlock_pf:
}
/**
+ * ice_alloc_fd_res - Allocate FD resource for a VSI
+ * @vsi: pointer to the ice_vsi
+ *
+ * This allocates the FD resources
+ *
+ * Returns 0 on success, -EPERM on no-op or -EIO on failure
+ */
+static int ice_alloc_fd_res(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u32 g_val, b_val;
+
+ /* Flow Director filters are only allocated/assigned to the PF VSI which
+ * passes the traffic. The CTRL VSI is only used to add/delete filters
+ * so we don't allocate resources to it
+ */
+
+ /* FD filters from guaranteed pool per VSI */
+ g_val = pf->hw.func_caps.fd_fltr_guar;
+ if (!g_val)
+ return -EPERM;
+
+ /* FD filters from best effort pool */
+ b_val = pf->hw.func_caps.fd_fltr_best_effort;
+ if (!b_val)
+ return -EPERM;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EPERM;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+
+ return 0;
+}
+
+/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -521,8 +605,8 @@ static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -565,8 +649,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
- vsi->rss_table_size = cap->rss_table_size;
- vsi->rss_size = min_t(int, num_online_cpus(),
+ vsi->rss_table_size = (u16)cap->rss_table_size;
+ vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
@@ -581,8 +665,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB:
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
- vsi->type);
+ dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
break;
}
}
@@ -684,15 +768,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_RSS_QS_PER_VF;
- qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(u16, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
- qcount_rx = min_t(int, qcount_rx,
+ qcount_rx = min_t(u16, qcount_rx,
vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount_rx);
+ pow = (u16)order_base_2(qcount_rx);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -752,6 +836,51 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
/**
+ * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 dflt_q_group, dflt_q_prio;
+ u16 dflt_q, report_q, val;
+
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ return;
+
+ val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+ ctxt->info.valid_sections |= cpu_to_le16(val);
+ dflt_q = 0;
+ dflt_q_group = 0;
+ report_q = 0;
+ dflt_q_prio = 0;
+
+ /* enable flow director filtering/programming */
+ val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+ ctxt->info.fd_options = cpu_to_le16(val);
+ /* max of allocated flow director filters */
+ ctxt->info.max_fd_fltr_dedicated =
+ cpu_to_le16(vsi->num_gfltr);
+ /* max of shared flow director filters any VSI may program */
+ ctxt->info.max_fd_fltr_shared =
+ cpu_to_le16(vsi->num_bfltr);
+ /* default queue index within the VSI of the default FD */
+ val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
+ ICE_AQ_VSI_FD_DEF_Q_M);
+ /* target queue or queue group to the FD filter */
+ val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
+ ICE_AQ_VSI_FD_DEF_GRP_M);
+ ctxt->info.fd_def_q = cpu_to_le16(val);
+ /* queue index on which FD filter completion is reported */
+ val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
+ ICE_AQ_VSI_FD_REPORT_Q_M);
+ /* priority of the default qindex action */
+ val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
+ ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ ctxt->info.fd_report_opt = cpu_to_le16(val);
+}
+
+/**
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
* @ctxt: the VSI context being set
* @vsi: the VSI being configured
@@ -776,13 +905,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
- case ICE_VSI_LB:
+ default:
dev_dbg(dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type));
return;
- default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- return;
}
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
@@ -812,8 +938,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (!ctxt)
return -ENOMEM;
- ctxt->info = vsi->info;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -829,12 +955,15 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ice_set_dflt_vsi_ctx(ctxt);
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
+ vsi->type != ICE_VSI_CTRL) {
ice_set_rss_vsi_ctx(ctxt, vsi);
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
@@ -941,7 +1070,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
- int start = 0, end = 0;
+ u16 start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
@@ -1024,6 +1153,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 num_q_vectors;
+ int base;
dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
@@ -1038,14 +1168,15 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- vsi->idx);
- if (vsi->base_vector < 0) {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+
+ if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
ice_get_free_res_count(pf->irq_tracker),
ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
+ vsi->base_vector = (u16)base;
pf->num_avail_sw_msix -= num_q_vectors;
return 0;
@@ -1085,7 +1216,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
- int i;
+ u16 i;
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
@@ -1178,7 +1309,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
u8 *lut;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1193,7 +1324,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(dev, "set_rss_lut failed, error %d\n", status);
+ dev_err(dev, "set_rss_lut failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
@@ -1215,7 +1347,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(dev, "set_rss_key failed, error %d\n", status);
+ dev_err(dev, "set_rss_key failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
}
@@ -1248,8 +1381,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -1281,91 +1414,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-}
-
-/**
- * ice_add_mac_to_list - Add a MAC address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds MAC address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
-
- tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, add_list);
-
- return 0;
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
}
/**
@@ -1415,54 +1514,21 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
- struct ice_fltr_list_entry *e, *tmp;
-
- list_for_each_entry_safe(e, tmp, h, list_entry) {
- list_del(&e->list_entry);
- devm_kfree(dev, e);
- }
-}
-
-/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
* @vid: VLAN ID to be added
+ * @action: filter action to be performed on match
*/
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
{
- struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- tmp->fltr_info.l_data.vlan.vlan_id = vid;
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, &tmp_add_list);
-
- status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (!status) {
+ if (!ice_fltr_add_vlan(vsi, vid, action)) {
vsi->num_vlan++;
} else {
err = -ENODEV;
@@ -1470,7 +1536,6 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
vsi->vsi_num);
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1483,41 +1548,25 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
- struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.l_data.vlan.vlan_id = vid;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
- vid, vsi->vsi_num, status);
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, status);
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
err = -EIO;
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1547,6 +1596,32 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
}
/**
+ * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
+ * @hw: HW pointer
+ * @pf_q: index of the Rx queue in the PF's queue space
+ * @rxdid: flexible descriptor RXDID
+ * @prio: priority for the RXDID for this queue
+ */
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
+{
+ int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+
+ /* clear any previous values */
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
+
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1671,7 +1746,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- u32 txq = 0, rxq = 0;
+ u16 txq = 0, rxq = 0;
int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) {
@@ -1737,8 +1812,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1761,6 +1837,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
enum ice_status status;
int ret = 0;
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.pvid)
+ return 0;
+
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -1783,8 +1865,9 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
- ena, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
+ ena, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1922,9 +2005,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- pf->hw.adminq.sq_last_status);
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
+ ice_stat_str(status),
+ ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -1991,47 +2075,6 @@ clear_reg_idx:
}
/**
- * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
- * @vsi: the VSI being configured
- * @add_rule: boolean value to add or remove ethertype filter rule
- */
-static void
-ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
-{
- struct ice_fltr_list_entry *list;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (add_rule)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
- else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
-
- if (status)
- dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
-}
-
-/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
@@ -2039,45 +2082,25 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- struct ice_fltr_list_entry *list;
+ enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
+ eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
-
- if (tx) {
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- } else {
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.flag = ICE_FLTR_RX;
- list->fltr_info.src_id = ICE_SRC_ID_LPORT;
- }
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (create)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ if (tx)
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
if (status)
- dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -2122,10 +2145,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
+ ice_alloc_fd_res(vsi);
+
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
- goto unroll_get_qs;
+ goto unroll_vsi_alloc;
}
/* set RSS capabilities */
@@ -2140,6 +2165,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_get_qs;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2164,20 +2190,23 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* so this handles those cases (i.e. adding the PF to a bridge
* without the 8021q module loaded).
*/
- ret = ice_vsi_add_vlan(vsi, 0);
+ ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
goto unroll_clear_rings;
ice_vsi_map_rings_to_vectors(vsi);
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ ice_init_arfs(vsi);
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -2223,8 +2252,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
goto unroll_vector_base;
}
@@ -2239,9 +2268,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
*/
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, true);
-
- /* Tx LLDP packets */
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
@@ -2259,6 +2287,7 @@ unroll_vsi_init:
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
+unroll_vsi_alloc:
ice_vsi_clear(vsi);
return NULL;
@@ -2411,6 +2440,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_unlock();
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ err = ice_vsi_open_ctrl(vsi);
}
return err;
@@ -2440,6 +2471,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ ice_vsi_close(vsi);
}
}
@@ -2558,7 +2591,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
@@ -2568,7 +2602,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2673,15 +2707,13 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce[i]);
- for (; i < vsi->num_q_vectors; i++) {
- struct ice_coalesce_stored coalesce_dflt = {
- .itr_tx = ICE_DFLT_TX_ITR,
- .itr_rx = ICE_DFLT_RX_ITR,
- .intrl = 0
- };
+ /* number of q_vectors increased, so assume coalesce settings were
+ * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
+ * the previous settings from q_vector 0 for all of the new q_vectors
+ */
+ for (; i < vsi->num_q_vectors; i++)
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce_dflt);
- }
+ &coalesce[0]);
}
/**
@@ -2746,6 +2778,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
ice_vsi_get_qs(vsi);
+
+ ice_alloc_fd_res(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2754,6 +2788,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2773,17 +2808,19 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
}
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -2814,8 +2851,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -2924,8 +2961,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed TC config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed TC config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2985,33 +3022,27 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
- * @vsi: the VSI being configured MAC filter
- * @macaddr: the MAC address to be added.
- * @set: Add or delete a MAC filter
- *
- * Adds or removes MAC address filter entry for VF VSI
+ * ice_status_to_errno - convert from enum ice_status to Linux errno
+ * @err: ice_status value to convert
*/
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+int ice_status_to_errno(enum ice_status err)
{
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
-
- /* Update MAC filter list to be added or removed for a VSI */
- if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
- status = ICE_ERR_NO_MEMORY;
- goto cfg_mac_fltr_exit;
+ switch (err) {
+ case ICE_SUCCESS:
+ return 0;
+ case ICE_ERR_DOES_NOT_EXIST:
+ return -ENOENT;
+ case ICE_ERR_OUT_OF_RANGE:
+ return -ENOTTY;
+ case ICE_ERR_PARAM:
+ return -EINVAL;
+ case ICE_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case ICE_ERR_MAX_LIMIT:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
}
-
- if (set)
- status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
- else
- status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
-
-cfg_mac_fltr_exit:
- ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
- return status;
}
/**
@@ -3079,8 +3110,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
@@ -3118,8 +3149,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
+ dflt_vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 04ca00799364..d80e6afa4511 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -8,12 +8,6 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr);
-
-void ice_free_fltr_list(struct device *dev, struct list_head *h);
-
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -79,6 +74,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
+
void ice_vsi_put_qs(struct ice_vsi *vsi);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
@@ -97,6 +95,8 @@ void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+int ice_status_to_errno(enum ice_status err);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5b190c257124..082825e3cb39 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,6 +8,7 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
@@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
static int ice_init_mac_fltr(struct ice_pf *pf)
{
enum ice_status status;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ u8 *perm_addr;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return -EINVAL;
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
- if (status)
- goto unregister;
-
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
- if (status)
- goto unregister;
+ perm_addr = vsi->port_info->mac.perm_addr;
+ status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
+ if (!status)
+ return 0;
- return 0;
-unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
*/
- if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
- status);
+ if (vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
+ ice_stat_str(status));
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
@@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
- ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
+ status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
@@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Add MAC addresses in the sync list */
- status = ice_add_mac(hw, &vsi->tmp_sync_list);
- ice_free_fltr_list(dev, &vsi->tmp_sync_list);
+ status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
@@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
- } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ } else {
+ /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -462,7 +452,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %d\n", qtype,
- ret);
+ dev_err(dev, "%s Receive Queue event error %s\n", qtype,
+ ice_stat_str(ret));
break;
}
@@ -1123,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
*
* If not already scheduled, this puts the task into the work queue.
*/
-static void ice_service_task_schedule(struct ice_pf *pf)
+void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
@@ -1198,8 +1188,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ unsigned int i;
u32 reg;
- int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
@@ -1332,8 +1322,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* PF can be configured to reset the VF through ethtool
* private flag mdd-auto-reset-vf.
*/
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ ice_print_vf_rx_mdd_event(vf);
ice_reset_vf(&pf->vf[i], false);
+ }
}
}
@@ -1493,7 +1488,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
-
+ ice_sync_arfs_fltrs(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1652,9 +1647,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
- q_vector->affinity_notify.notify = ice_irq_affinity_notify;
- q_vector->affinity_notify.release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
+ struct irq_affinity_notify *affinity_notify;
+
+ affinity_notify = &q_vector->affinity_notify;
+ affinity_notify->notify = ice_irq_affinity_notify;
+ affinity_notify->release = ice_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, affinity_notify);
+ }
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
@@ -1666,8 +1666,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector) {
vector--;
- irq_num = pf->msix_entries[base + vector].vector,
- irq_set_affinity_notifier(irq_num, NULL);
+ irq_num = pf->msix_entries[base + vector].vector;
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
@@ -1809,8 +1810,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
- status);
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
+ ice_stat_str(status));
goto clear_xdp_rings;
}
ice_vsi_assign_bpf_prog(vsi, prog);
@@ -1898,6 +1899,9 @@ free_qmap:
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
+ /* change number of XDP Tx queues to 0 */
+ vsi->num_xdp_txq = 0;
+
return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
}
@@ -1931,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
@@ -2137,10 +2141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
- if (!test_bit(__ICE_DOWN, pf->state)) {
- ice_service_task_schedule(pf);
- ice_irq_dynamic_ena(hw, NULL, NULL);
- }
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
return ret;
}
@@ -2247,7 +2249,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->oicr_idx = oicr_idx;
+ pf->oicr_idx = (u16)oicr_idx;
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
@@ -2331,6 +2333,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH;
csumo_features = NETIF_F_RXCSUM |
@@ -2342,13 +2345,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO |
+ tso_features = NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
+ /* add support for HW_CSUM on packets with MPLS header */
+ netdev->mpls_features = NETIF_F_HW_CSUM;
+
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
@@ -2411,7 +2428,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- goto err_destroy_devlink_port;
+ goto err_free_netdev;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
@@ -2422,9 +2439,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
return 0;
+err_free_netdev:
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
err_destroy_devlink_port:
ice_devlink_destroy_port(pf);
-
return err;
}
@@ -2457,6 +2476,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_ctrl_vsi_setup - Set up a control VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+}
+
+/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2509,7 +2542,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
@@ -2594,12 +2627,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
+ status = ice_set_cpu_rx_rmap(vsi);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ vsi->vsi_num, status);
+ status = -EINVAL;
+ goto unroll_napi_add;
+ }
status = ice_init_mac_fltr(pf);
if (status)
- goto unroll_napi_add;
+ goto free_cpu_rx_map;
return status;
+free_cpu_rx_map:
+ ice_free_cpu_rx_rmap(vsi);
+
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2630,7 +2673,8 @@ unroll_vsi_setup:
static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{
- u16 count = 0, bit;
+ unsigned long bit;
+ u16 count = 0;
mutex_lock(lock);
for_each_clear_bit(bit, pf_qmap, size)
@@ -2703,6 +2747,23 @@ static void ice_set_pf_caps(struct ice_pf *pf)
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+ clear_bit(ICE_FLAG_FD_ENA, pf->flags);
+ if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
+ u16 unused;
+
+ /* ctrl_vsi_idx will be set to a valid value when flow director
+ * is setup by ice_init_fdir
+ */
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(&pf->hw, &unused,
+ func_caps->fd_fltr_guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(&pf->hw, &unused,
+ func_caps->fd_fltr_best_effort);
+ }
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -2769,6 +2830,15 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
+ /* reserve one vector for flow director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ needed = ICE_FDIR_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2793,8 +2863,10 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors for LAN (traffic + OICR) */
+/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
#define ICE_MIN_LAN_VECS 2
+#define ICE_MIN_RDMA_VECS 2
+#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
if (v_actual < ICE_MIN_LAN_VECS) {
/* error if we can't get minimum vectors */
@@ -2869,8 +2941,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/* populate SW interrupts pool with number of OS granted IRQs. */
- pf->num_avail_sw_msix = vectors;
- pf->irq_tracker->num_entries = vectors;
+ pf->num_avail_sw_msix = (u16)vectors;
+ pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
@@ -2902,9 +2974,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
}
if (new_tx)
- vsi->req_txq = new_tx;
+ vsi->req_txq = (u16)new_tx;
if (new_rx)
- vsi->req_rxq = new_rx;
+ vsi->req_rxq = (u16)new_rx;
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
@@ -2985,6 +3057,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
+ break;
case ICE_ERR_BUF_TOO_SHORT:
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
@@ -3013,6 +3088,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
return;
default:
break;
@@ -3100,6 +3178,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
}
/**
+ * ice_init_fdir - Initialize flow director VSI and configuration
+ * @pf: pointer to the PF instance
+ *
+ * returns 0 on success, negative on error
+ */
+static int ice_init_fdir(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *ctrl_vsi;
+ int err;
+
+ /* Side Band Flow Director needs to have a control VSI.
+ * Allocate it and store it in the PF.
+ */
+ ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "could not create control VSI\n");
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "could not open control VSI\n");
+ goto err_vsi_open;
+ }
+
+ mutex_init(&pf->hw.fdir_fltr_lock);
+
+ err = ice_fdir_create_dflt_rules(pf);
+ if (err)
+ goto err_fdir_rule;
+
+ return 0;
+
+err_fdir_rule:
+ ice_fdir_release_flows(&pf->hw);
+ ice_vsi_close(ctrl_vsi);
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
*/
@@ -3123,7 +3248,7 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
if (!opt_fw_filename)
return NULL;
- snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
@@ -3295,12 +3420,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
-
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3356,12 +3478,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, return here */
+ /* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
- return 0;
+ goto probe_done;
/* initialize DDP driven features */
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3373,6 +3499,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
+probe_done:
+ /* ready to go, so clear down state bit */
+ clear_bit(__ICE_DOWN, pf->state);
return 0;
err_alloc_sw_unroll:
@@ -3384,6 +3513,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3421,6 +3551,9 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ if (!ice_is_safe_mode(pf))
+ ice_remove_arfs(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
@@ -3705,25 +3838,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the MAC address we also have to change the MAC address
- * based filter rules that were created previously for the old MAC
- * address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac via
- * ice_vsi_cfg_mac_fltr function call for both add and/or remove
- * filters.
- */
- status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
- if (status) {
+ /* Clean up old MAC filter. Not an error if old filter doesn't exist */
+ status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
- if (status) {
- err = -EADDRNOTAVAIL;
- goto err_update_filters;
+ /* Add filter for new MAC. If filter exists, just return success */
+ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+ return 0;
}
+ /* error if the new filter addition failed */
+ if (status)
+ err = -EADDRNOTAVAIL;
+
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
@@ -3740,8 +3872,8 @@ err_update_filters:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
- mac, status);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
+ mac, ice_stat_str(status));
}
return 0;
}
@@ -3805,8 +3937,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
- status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
+ ice_stat_str(status));
return -EIO;
}
@@ -3938,6 +4070,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
ret = ice_cfg_vlan_pruning(vsi, false, false);
+ if ((features & NETIF_F_NTUPLE) &&
+ !(netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, true);
+ ice_init_arfs(vsi);
+ } else if (!(features & NETIF_F_NTUPLE) &&
+ (netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, false);
+ ice_clear_arfs(vsi);
+ }
+
return ret;
}
@@ -4084,6 +4226,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
}
/**
+ * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
+ * @vsi: the VSI to be updated
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ u16 count)
+{
+ struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_ring *ring;
+ u64 pkts, bytes;
+
+ ring = READ_ONCE(rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->tx_packets += pkts;
+ vsi_stats->tx_bytes += bytes;
+ vsi->tx_restart += ring->tx_stats.restart_q;
+ vsi->tx_busy += ring->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ }
+}
+
+/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4110,15 +4279,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_for_each_txq(vsi, i) {
- ring = READ_ONCE(vsi->tx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
- }
+ ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -4130,6 +4291,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
}
+ /* update XDP Tx rings counters */
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ vsi->num_xdp_txq);
+
rcu_read_unlock();
}
@@ -4162,7 +4328,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
- pf->stats.illegal_bytes;
+ pf->stats.illegal_bytes +
+ pf->stats.rx_len_errors +
+ pf->stats.rx_undersize +
+ pf->hw_csum_rx_error +
+ pf->stats.rx_jabber +
+ pf->stats.rx_fragments +
+ pf->stats.rx_oversize;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
@@ -4177,6 +4349,7 @@ void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
+ u16 fd_ctr_base;
u8 port;
port = hw->port_info->lport;
@@ -4265,6 +4438,12 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
+ fd_ctr_base = hw->fd_ctr_base;
+
+ ice_stat_update40(hw,
+ GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
+ pf->stat_prev_loaded, &prev_ps->fd_sb_match,
+ &cur_ps->fd_sb_match);
ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
@@ -4308,6 +4487,8 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
+ cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+
pf->stat_prev_loaded = true;
}
@@ -4493,6 +4674,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_open_ctrl - open control VSI for use
+ * @vsi: the VSI to open
+ *
+ * Initialization of the Control VSI
+ *
+ * Returns 0 on success, negative value on error
+ */
+int ice_vsi_open_ctrl(struct ice_vsi *vsi)
+{
+ char int_name[ICE_INT_NAME_STR_LEN];
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* allocate descriptors */
+ err = ice_vsi_setup_tx_rings(vsi);
+ if (err)
+ goto err_setup_tx;
+
+ err = ice_vsi_setup_rx_rings(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = ice_vsi_cfg(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
+ dev_driver_string(dev), dev_name(dev));
+ err = ice_vsi_req_irq_msix(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ ice_vsi_cfg_msix(vsi);
+
+ err = ice_vsi_start_all_rx_rings(vsi);
+ if (err)
+ goto err_up_complete;
+
+ clear_bit(__ICE_DOWN, vsi->state);
+ ice_vsi_ena_irq(vsi);
+
+ return 0;
+
+err_up_complete:
+ ice_down(vsi);
+err_setup_rx:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_tx:
+ ice_vsi_free_tx_rings(vsi);
+
+ return err;
+}
+
+/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -4604,8 +4841,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
- status, vsi->idx, ice_vsi_type_str(type));
+ dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
+ ice_stat_str(status), vsi->idx,
+ ice_vsi_type_str(type));
return -EIO;
}
@@ -4659,6 +4897,11 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
* ice_rebuild - rebuild after reset
* @pf: PF to rebuild
* @reset_type: type of reset
+ *
+ * Do not rebuild VF VSI in this flow because that is already handled via
+ * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
+ * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
+ * to reset/rebuild all the VF VSI twice.
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -4674,7 +4917,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_init_all_ctrlq(hw);
if (ret) {
- dev_err(dev, "control queues init failed %d\n", ret);
+ dev_err(dev, "control queues init failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4690,7 +4934,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_clear_pf_cfg(hw);
if (ret) {
- dev_err(dev, "clear PF configuration failed %d\n", ret);
+ dev_err(dev, "clear PF configuration failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4704,7 +4949,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_get_caps(hw);
if (ret) {
- dev_err(dev, "ice_get_caps failed %d\n", ret);
+ dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
+ ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (ret) {
+ dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4723,6 +4974,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_sched_init_port;
}
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ if (!rd32(hw, PFQF_FD_SIZE)) {
+ u16 unused, guar, b_effort;
+
+ guar = hw->func_caps.fd_fltr_guar;
+ b_effort = hw->func_caps.fd_fltr_best_effort;
+
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(hw, &unused, guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(hw, &unused, b_effort);
+ }
+ }
+
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -4733,12 +4999,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
+ /* If Flow Director is active */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
if (err) {
- dev_err(dev, "VF VSI rebuild failed: %d\n", err);
+ dev_err(dev, "control VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
+
+ /* replay HW Flow Director recipes */
+ if (hw->fdir_prof)
+ ice_fdir_replay_flows(hw);
+
+ /* replay Flow Director filters */
+ ice_fdir_replay_fltrs(pf);
+
+ ice_rebuild_arfs(pf);
}
ice_update_pf_netdev_link(pf);
@@ -4746,8 +5022,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
- ret);
+ dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
+ ice_stat_str(ret));
goto err_vsi_rebuild;
}
@@ -4795,7 +5071,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_pf *pf = vsi->back;
u8 count = 0;
- if (new_mtu == netdev->mtu) {
+ if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
@@ -4810,11 +5086,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < netdev->min_mtu) {
+ if (new_mtu < (int)netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
- } else if (new_mtu > netdev->max_mtu) {
+ } else if (new_mtu > (int)netdev->max_mtu) {
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
@@ -4835,7 +5111,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = new_mtu;
+ netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
@@ -4859,6 +5135,118 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * ice_aq_str - convert AQ err code to a string
+ * @aq_err: the AQ error code to convert
+ */
+const char *ice_aq_str(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_OK:
+ return "OK";
+ case ICE_AQ_RC_EPERM:
+ return "ICE_AQ_RC_EPERM";
+ case ICE_AQ_RC_ENOENT:
+ return "ICE_AQ_RC_ENOENT";
+ case ICE_AQ_RC_ENOMEM:
+ return "ICE_AQ_RC_ENOMEM";
+ case ICE_AQ_RC_EBUSY:
+ return "ICE_AQ_RC_EBUSY";
+ case ICE_AQ_RC_EEXIST:
+ return "ICE_AQ_RC_EEXIST";
+ case ICE_AQ_RC_EINVAL:
+ return "ICE_AQ_RC_EINVAL";
+ case ICE_AQ_RC_ENOSPC:
+ return "ICE_AQ_RC_ENOSPC";
+ case ICE_AQ_RC_ENOSYS:
+ return "ICE_AQ_RC_ENOSYS";
+ case ICE_AQ_RC_EMODE:
+ return "ICE_AQ_RC_EMODE";
+ case ICE_AQ_RC_ENOSEC:
+ return "ICE_AQ_RC_ENOSEC";
+ case ICE_AQ_RC_EBADSIG:
+ return "ICE_AQ_RC_EBADSIG";
+ case ICE_AQ_RC_ESVN:
+ return "ICE_AQ_RC_ESVN";
+ case ICE_AQ_RC_EBADMAN:
+ return "ICE_AQ_RC_EBADMAN";
+ case ICE_AQ_RC_EBADBUF:
+ return "ICE_AQ_RC_EBADBUF";
+ }
+
+ return "ICE_AQ_RC_UNKNOWN";
+}
+
+/**
+ * ice_stat_str - convert status err code to a string
+ * @stat_err: the status error code to convert
+ */
+const char *ice_stat_str(enum ice_status stat_err)
+{
+ switch (stat_err) {
+ case ICE_SUCCESS:
+ return "OK";
+ case ICE_ERR_PARAM:
+ return "ICE_ERR_PARAM";
+ case ICE_ERR_NOT_IMPL:
+ return "ICE_ERR_NOT_IMPL";
+ case ICE_ERR_NOT_READY:
+ return "ICE_ERR_NOT_READY";
+ case ICE_ERR_NOT_SUPPORTED:
+ return "ICE_ERR_NOT_SUPPORTED";
+ case ICE_ERR_BAD_PTR:
+ return "ICE_ERR_BAD_PTR";
+ case ICE_ERR_INVAL_SIZE:
+ return "ICE_ERR_INVAL_SIZE";
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ return "ICE_ERR_DEVICE_NOT_SUPPORTED";
+ case ICE_ERR_RESET_FAILED:
+ return "ICE_ERR_RESET_FAILED";
+ case ICE_ERR_FW_API_VER:
+ return "ICE_ERR_FW_API_VER";
+ case ICE_ERR_NO_MEMORY:
+ return "ICE_ERR_NO_MEMORY";
+ case ICE_ERR_CFG:
+ return "ICE_ERR_CFG";
+ case ICE_ERR_OUT_OF_RANGE:
+ return "ICE_ERR_OUT_OF_RANGE";
+ case ICE_ERR_ALREADY_EXISTS:
+ return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM_CHECKSUM:
+ return "ICE_ERR_NVM_CHECKSUM";
+ case ICE_ERR_BUF_TOO_SHORT:
+ return "ICE_ERR_BUF_TOO_SHORT";
+ case ICE_ERR_NVM_BLANK_MODE:
+ return "ICE_ERR_NVM_BLANK_MODE";
+ case ICE_ERR_IN_USE:
+ return "ICE_ERR_IN_USE";
+ case ICE_ERR_MAX_LIMIT:
+ return "ICE_ERR_MAX_LIMIT";
+ case ICE_ERR_RESET_ONGOING:
+ return "ICE_ERR_RESET_ONGOING";
+ case ICE_ERR_HW_TABLE:
+ return "ICE_ERR_HW_TABLE";
+ case ICE_ERR_DOES_NOT_EXIST:
+ return "ICE_ERR_DOES_NOT_EXIST";
+ case ICE_ERR_FW_DDP_MISMATCH:
+ return "ICE_ERR_FW_DDP_MISMATCH";
+ case ICE_ERR_AQ_ERROR:
+ return "ICE_ERR_AQ_ERROR";
+ case ICE_ERR_AQ_TIMEOUT:
+ return "ICE_ERR_AQ_TIMEOUT";
+ case ICE_ERR_AQ_FULL:
+ return "ICE_ERR_AQ_FULL";
+ case ICE_ERR_AQ_NO_WORK:
+ return "ICE_ERR_AQ_NO_WORK";
+ case ICE_ERR_AQ_EMPTY:
+ return "ICE_ERR_AQ_EMPTY";
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return "ICE_ERR_AQ_FW_CRITICAL";
+ }
+
+ return "ICE_ERR_UNKNOWN";
+}
+
+/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: RSS hash seed
@@ -4882,8 +5270,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4892,8 +5281,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4924,8 +5314,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4934,8 +5325,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -5002,8 +5394,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
- bmode, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
+ bmode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -5072,8 +5465,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
- mode, status, hw->adminq.sq_last_status);
+ netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
+ mode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return -EIO;
@@ -5100,6 +5494,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_count++;
+ /* Check if PFC is enabled for the TC to which the queue belongs
+ * to. If yes then Tx timeout is not caused by a hung queue, no
+ * need to reset and rebuild
+ */
+ if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
+ dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
+ txqueue);
+ return;
+ }
+
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
@@ -5158,6 +5562,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ tnl_type = TNL_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ tnl_type = TNL_GENEVE;
+ break;
+ default:
+ netdev_err(netdev, "Unknown tunnel type\n");
+ return;
+ }
+
+ status = ice_create_tunnel(&pf->hw, tnl_type, port);
+ if (status == ICE_ERR_OUT_OF_RANGE)
+ netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
+ port);
+ else if (status)
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+}
+
+/**
+ * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+ bool retval;
+
+ retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
+ if (!retval) {
+ netdev_info(netdev, "port %d not found in UDP tunnels list\n",
+ port);
+ return;
+ }
+
+ status = ice_destroy_tunnel(&pf->hw, port, false);
+ if (status)
+ netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
+ port);
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5173,14 +5641,20 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
int err;
- if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
+ if (test_bit(__ICE_DOWN, pf->state)) {
+ netdev_err(netdev, "device is not ready yet\n");
+ return -EBUSY;
+ }
+
netif_carrier_off(netdev);
pi = vsi->port_info;
@@ -5213,6 +5687,10 @@ int ice_open(struct net_device *netdev)
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
+
+ /* Update existing tunnels information */
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -5263,21 +5741,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
- if (len & ~(ICE_TXD_MACLEN_MAX))
+ if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len & ~(ICE_TXD_L4LEN_MAX))
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -5322,8 +5800,13 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = ice_rx_flow_steer,
+#endif
.ndo_tx_timeout = ice_tx_timeout,
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_udp_tunnel_add = ice_udp_tunnel_add,
+ .ndo_udp_tunnel_del = ice_udp_tunnel_del,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 8beb675d676b..b049c1c30c88 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -172,7 +172,8 @@ void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+static enum ice_status
+ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -196,7 +197,7 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+static enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
@@ -367,6 +368,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
}
/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ *
+ * Get the netlist version information
+ */
+static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
+{
+ struct ice_netlist_ver_info *ver = &hw->netlist_ver;
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
+ GFP_KERNEL);
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = le16_to_cpu(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ kfree(buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
+/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
*
@@ -515,6 +597,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
+ /* read the netlist version information */
+ status = ice_get_netlist_ver_info(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 999f273ba6ad..165eda07b93d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -11,10 +11,6 @@ enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type);
-enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 71647566964e..7f4c1ec1eff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -12,12 +12,15 @@
*/
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eae707ddf8e8..0475134295e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1714,8 +1714,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
-static void
-ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
@@ -1917,7 +1916,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*/
static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
- enum ice_rl_type rl_type, u8 bw_alloc)
+ enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
@@ -1947,8 +1946,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@@ -1967,8 +1965,7 @@ ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@@ -1993,8 +1990,7 @@ ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index a9a8bc3aca42..4028c6365172 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -27,6 +27,8 @@ enum ice_status {
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
+ ICE_ERR_FW_DDP_MISMATCH = -20,
+
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
@@ -35,6 +37,7 @@ enum ice_status {
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
+ ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 51825a203e35..ff7d16ac693e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- res_type = le16_to_cpu(ele->vsi_port_num) >>
- ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
+ res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
+ ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
@@ -1612,18 +1612,17 @@ exit:
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status
-ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head;
- u16 elem_sent, total_elem_left;
+ u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
- u16 s_rule_size;
+ u8 elem_sent;
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -1707,8 +1706,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter;
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ elem_sent = min_t(u8, total_elem_left,
+ (ICE_AQ_MAX_BUF_LEN / s_rule_size));
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
elem_sent, ice_aqc_opc_add_sw_rules,
NULL);
@@ -1914,8 +1913,7 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
@@ -2145,8 +2143,7 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found.
*/
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
@@ -2678,6 +2675,81 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_alloc_res_cntr - allocating resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries requested for FD resource type
+ * @counter_id: counter index returned by AQ call
+ */
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Allocate resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto exit;
+
+ *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
+
+exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_res_cntr - free resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries to be freed for FD resource type
+ * @counter_id: counter ID resource which needs to be freed
+ */
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Free resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW,
+ "counter resource could not be freed\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index fa14b9545dab..8b4f9d35c860 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -208,6 +208,13 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id);
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id);
+
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f67e8362958c..abdb137c8bb7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -15,6 +15,90 @@
#define ICE_RX_HDR_SIZE 256
+#define FDIR_DESC_RXDID 0x40
+#define ICE_FDIR_CLEAN_DELAY 10
+
+/**
+ * ice_prgm_fdir_fltr - Program a Flow Director filter
+ * @vsi: VSI to send dummy packet
+ * @fdir_desc: flow director descriptor
+ * @raw_packet: allocated buffer for flow director
+ */
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet)
+{
+ struct ice_tx_buf *tx_buf, *first;
+ struct ice_fltr_desc *f_desc;
+ struct ice_tx_desc *tx_desc;
+ struct ice_ring *tx_ring;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd;
+ u16 i;
+
+ /* VSI and Tx ring */
+ if (!vsi)
+ return -ENOENT;
+ tx_ring = vsi->tx_rings[0];
+ if (!tx_ring || !tx_ring->desc)
+ return -ENOENT;
+ dev = tx_ring->dev;
+
+ /* we are using two descriptors to add/del a filter and we can wait */
+ for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
+ msleep_interruptible(1);
+ }
+
+ dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma))
+ return -EINVAL;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ first = &tx_ring->tx_buf[i];
+ f_desc = ICE_TX_FDIRDESC(tx_ring, i);
+ memcpy(f_desc, fdir_desc, sizeof(*f_desc));
+
+ i++;
+ i = (i < tx_ring->count) ? i : 0;
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_buf[i];
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
+ ICE_TX_DESC_CMD_RE;
+
+ tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->raw_buf = raw_packet;
+
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ /* mark the data descriptor to be watched */
+ first->next_to_watch = tx_desc;
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ return 0;
+}
+
/**
* ice_unmap_and_free_tx_buf - Release a Tx buffer
* @ring: the ring that owns the buffer
@@ -24,7 +108,9 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- if (ice_ring_is_xdp(ring))
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ else if (ice_ring_is_xdp(ring))
page_frag_free(tx_buf->raw_buf);
else
dev_kfree_skb_any(tx_buf->skb);
@@ -423,6 +509,22 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
+static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ice_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
@@ -583,7 +685,8 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
+ if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
+ !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -803,7 +906,7 @@ static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
+ u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
@@ -918,7 +1021,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
@@ -981,7 +1084,7 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* Returns amount of work completed
*/
-static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
@@ -991,6 +1094,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
bool failure;
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+#endif
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1020,6 +1127,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ ice_put_rx_buf(rx_ring, NULL);
+ cleaned_count++;
+ continue;
+ }
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
@@ -1038,6 +1151,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+#endif
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
@@ -1051,16 +1168,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
- size);
-#endif
xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
@@ -1528,7 +1637,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
- budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
@@ -1664,7 +1773,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, max_data, td_tag);
+ ice_build_ctob(td_cmd, td_offset, max_data,
+ td_tag);
tx_desc++;
i++;
@@ -1684,8 +1794,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len))
break;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1716,8 +1826,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
- td_tag);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -1791,12 +1901,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- if (skb->encapsulation)
- return -1;
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+
+ if (skb->encapsulation) {
+ bool gso_ena = false;
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
+ tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
+ ICE_TX_CTX_EIPT_IPV4 :
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
+ l4_proto = ip.v4->protocol;
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ tunnel |= ICE_TX_CTX_EIPT_IPV6;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ ICE_TXD_CTX_QW0_EIPLEN_S;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ ICE_TXD_CTX_QW0_NATLEN_S;
+
+ gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
+ /* indicate if we need to offload outer UDP header */
+ if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunnel_params |= tunnel;
+
+ /* set DTYP=1 to indicate that it's an Tx context descriptor
+ * in IPsec tunnel mode with Tx offloads in Quad word 1
+ */
+ off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+ }
/* Enable IP checksum offloads */
- protocol = vlan_get_protocol(skb);
- if (protocol == htons(ETH_P_IP)) {
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
@@ -1806,7 +1998,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
- } else if (protocol == htons(ETH_P_IPV6)) {
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -1861,49 +2053,25 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
*
* Checks the skb and set up correspondingly several generic transmit flags
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
- *
- * Returns error code indicate the frame should be dropped upon error and the
- * otherwise returns 0 to indicate the flags has been set properly.
*/
-static int
+static void
ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
- __be16 protocol = skb->protocol;
-
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* when HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- return 0;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
+ /* nothing left to do, software offloaded VLAN */
+ if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
+ return;
+
+ /* currently, we always assume 802.1Q for VLAN insertion as VLAN
+ * insertion for 802.1AD is not supported
+ */
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- /* for SW VLAN, check the next protocol and store the tag */
- vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
- sizeof(_vhdr),
- &_vhdr);
- if (!vhdr)
- return -EINVAL;
-
- first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
- ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
- return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
+ ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1928,7 +2096,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
- u32 paylen, l4_start;
+ u32 paylen;
+ u8 l4_start;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1953,8 +2122,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0;
}
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
+ /* determine offset of outer transport header */
+ l4_start = (u8)(l4.hdr - skb->data);
+
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_start;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* reset pointers to inner headers */
+
+ /* cppcheck-suppress unreadVariable */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
/* determine offset of transport header */
- l4_start = l4.hdr - skb->data;
+ l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
@@ -1963,12 +2166,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
- off->header_len = sizeof(l4.udp) + l4_start;
+ off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
}
/* update gso_segs and bytecount */
@@ -2176,8 +2379,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */
- if (ice_tx_prepare_vlan_flags(tx_ring, first))
- goto out_drop;
+ ice_tx_prepare_vlan_flags(tx_ring, first);
/* set up TSO offload */
tso = ice_tso(first, &offload);
@@ -2199,7 +2401,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
- int i = tx_ring->next_to_use;
+ u16 i = tx_ring->next_to_use;
/* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i);
@@ -2244,3 +2446,86 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return ice_xmit_frame_ring(skb, tx_ring);
}
+
+/**
+ * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
+ * @tx_ring: tx_ring to clean
+ */
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+{
+ struct ice_vsi *vsi = tx_ring->vsi;
+ s16 i = tx_ring->next_to_clean;
+ int budget = ICE_DFLT_IRQ_WORK;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+
+ tx_buf = &tx_ring->tx_buf[i];
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no pending work */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past filter desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap the data header */
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(tx_ring->dev, tx_buf->raw_buf);
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->raw_buf = NULL;
+ tx_buf->tx_flags = 0;
+ tx_buf->next_to_watch = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past eop_desc for start of next FD desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ /* re-enable interrupt if needed */
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 7ee00a128663..e70c4619edc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -38,7 +38,8 @@
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+ ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
+ SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
/**
* ice_compute_pad - compute the padding
@@ -107,12 +108,19 @@ static inline int ice_skb_pad(void)
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
+/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
+ * freed instead of returned like skb packets.
+ */
+#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+#define ICE_TX_FLAGS_IPV4 BIT(5)
+#define ICE_TX_FLAGS_IPV6 BIT(6)
+#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -155,17 +163,16 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- struct sk_buff *skb;
- dma_addr_t dma;
union {
struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
struct page *page;
unsigned int page_offset;
u16 pagecnt_bias;
};
struct {
- void *addr;
- u64 handle;
+ struct xdp_buff *xdp;
};
};
};
@@ -289,7 +296,6 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
@@ -373,5 +379,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
-
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet);
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 6da048a6ca7c..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -8,7 +8,7 @@
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -84,11 +84,11 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
+ u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype);
@@ -101,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
return;
/* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
if (!(decoded.known && decoded.outer_ip))
@@ -112,19 +112,31 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+
+ if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
goto checksum_fail;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* check for outer UDP checksum error in tunneled packets */
+ if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
+ (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
@@ -215,8 +227,8 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
- size, 0);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -242,7 +254,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*/
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return ICE_XDP_CONSUMED;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index ba9164dad9ae..58ff58f0f972 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
}
static inline __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
(td_cmd << ICE_TXD_QW1_CMD_S) |
@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4ce5f92fca4a..c1ad8622e65c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -118,7 +118,8 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
- ICE_VSI_VF,
+ ICE_VSI_VF = 1,
+ ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_LB = 6,
};
@@ -161,6 +162,38 @@ struct ice_phy_info {
u8 get_link_info;
};
+/* protocol enumeration for filters */
+enum ice_fltr_ptype {
+ /* NONE - used for undef/error */
+ ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ ICE_FLTR_PTYPE_FRAG_IPV4,
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ ICE_FLTR_PTYPE_MAX,
+};
+
+enum ice_fd_hw_seg {
+ ICE_FD_HW_SEG_NON_TUN = 0,
+ ICE_FD_HW_SEG_TUN,
+ ICE_FD_HW_SEG_MAX,
+};
+
+/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
+#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+
+struct ice_fd_hw_prof {
+ struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
+ int cnt;
+ u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
+ u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+};
+
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
u32 valid_functions;
@@ -197,6 +230,8 @@ struct ice_hw_func_caps {
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
u32 guar_num_vsi;
+ u32 fd_fltr_guar; /* Number of filters guaranteed */
+ u32 fd_fltr_best_effort; /* Number of best effort filters */
};
/* Device wide capabilities */
@@ -204,6 +239,7 @@ struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
u32 num_funcs;
};
@@ -259,6 +295,16 @@ struct ice_nvm_info {
#define ICE_NVM_VER_LEN 32
+/* netlist version information */
+struct ice_netlist_ver_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -479,6 +525,8 @@ struct ice_hw {
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
+ u16 fd_ctr_base; /* FD counter base index */
+
/* pci info */
u16 device_id;
u16 vendor_id;
@@ -491,8 +539,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */
- u16 num_tx_sched_layers;
- u16 num_tx_sched_phys_layers;
+ u8 num_tx_sched_layers;
+ u8 num_tx_sched_phys_layers;
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
@@ -506,6 +554,7 @@ struct ice_hw {
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
+ struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
@@ -548,6 +597,7 @@ struct ice_hw {
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
+ u32 active_track_id;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -568,10 +618,29 @@ struct ice_hw {
u8 *pkg_copy;
u32 pkg_size;
+ /* tunneling info */
+ struct mutex tnl_lock;
+ struct ice_tunnel_table tnl;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
struct list_head fl_profs[ICE_BLK_COUNT];
+
+ /* Flow Director filter info */
+ int fdir_active_fltr;
+
+ struct mutex fdir_fltr_lock; /* protect Flow Director */
+ struct list_head fdir_list_head;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX];
+
+ struct ice_fd_hw_prof **fdir_prof;
+ DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
};
@@ -592,6 +661,8 @@ struct ice_eth_stats {
u64 tx_errors; /* tepc */
};
+#define ICE_MAX_UP 8
+
/* Statistics collected by the MAC */
struct ice_hw_port_stats {
/* eth stats collected by the port */
@@ -631,6 +702,9 @@ struct ice_hw_port_stats {
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
+ /* flow director stats */
+ u32 fd_sb_status;
+ u64 fd_sb_match;
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 15191a325918..16a2f2526ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -4,16 +4,18 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
/**
* ice_validate_vf_id - helper to check if VF ID is valid
* @pf: pointer to the PF structure
* @vf_id: the ID of the VF to check
*/
-static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
+static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
{
+ /* vf_id range is only valid for 0-255, and should always be unsigned */
if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
+ dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
return -EINVAL;
}
return 0;
@@ -27,7 +29,7 @@ static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
{
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
vf->vf_id);
return -EBUSY;
}
@@ -35,6 +37,37 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_err_to_virt_err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -47,7 +80,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -149,6 +182,26 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+static void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -163,10 +216,8 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx) {
- ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
- vf->lan_vsi_idx = 0;
- vf->lan_vsi_num = 0;
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
vf->num_mac = 0;
}
@@ -292,7 +343,7 @@ void ice_free_vfs(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int tmp, i;
+ unsigned int tmp, i;
if (!pf->vf)
return;
@@ -337,7 +388,7 @@ void ice_free_vfs(struct ice_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
- int vf_id;
+ unsigned int vf_id;
/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
@@ -368,9 +419,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
struct device *dev;
struct ice_hw *hw;
- int vf_abs_id, i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
@@ -380,10 +431,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
- * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
- * It's normally disabled in ice_free_vf_res(), but it's safer
- * to do it earlier to give some time to finish to any VF config
- * functions that may still be running at this point.
+ * when it's safe again to access VF's VSI.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
@@ -418,7 +466,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -460,8 +508,9 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -475,18 +524,39 @@ out:
}
/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+/**
* ice_vf_vsi_setup - Set up a VF VSI
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vf_id: defines VF ID to which this VSI connects.
+ * @vf: VF to setup VSI for
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
*/
-static struct ice_vsi *
-ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
}
/**
@@ -507,170 +577,158 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_alloc_vsi_res - Setup VF VSI and its resources
- * @vf: pointer to the VF structure
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
*
- * Returns 0 on success, negative value on failure
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
*/
-static int ice_alloc_vsi_res(struct ice_vf *vf)
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- LIST_HEAD(tmp_add_list);
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int status = 0;
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u16 vlan_id = 0;
+ int err;
- dev = ice_pf_to_dev(pf);
- /* first vector index is the VFs OICR index */
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ if (vf->port_vlan_info) {
+ err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
- vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
- if (!vsi) {
- dev_err(dev, "Failed to create VF VSI\n");
- return -ENOMEM;
+ vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
}
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- /* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_info) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
- dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
- vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
- } else {
- /* set VLAN 0 filter by default when no port VLAN is
- * enabled. If a port VLAN is enabled we don't want
- * untagged broadcast/multicast traffic seen on the VF
- * interface.
- */
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
- vf->vf_id);
+ /* vlan_id will either be 0 or the port VLAN number */
+ err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
+ vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
+ err);
+ return err;
}
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+ u8 broadcast[ETH_ALEN];
+
eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
+ vf->vf_id, ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ vf->num_mac++;
if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vf->dflt_lan_addr.addr);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ &vf->dflt_lan_addr.addr[0], vf->vf_id,
+ ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
+ vf->num_mac++;
}
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status)
- dev_err(dev, "could not add mac filters error %d\n", status);
- else
- vf->num_mac = 1;
-
- /* Clear this bit after VF initialization since we shouldn't reclaim
- * and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
- * expect vector assignment to be changed unless there is a request for
- * more vectors.
- */
-ice_alloc_vsi_res_exit:
- ice_free_fltr_list(dev, &tmp_add_list);
- return status;
+ return 0;
}
/**
- * ice_alloc_vf_res - Allocate VF resources
- * @vf: pointer to the VF structure
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
*/
-static int ice_alloc_vf_res(struct ice_vf *vf)
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- int tx_rx_queue_left;
- int status;
-
- /* Update number of VF queues, in case VF had requested for queue
- * changes
- */
- tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += pf->num_qps_per_vf;
- if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
- vf->num_req_qs != vf->num_vf_qs)
- vf->num_vf_qs = vf->num_req_qs;
-
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-
- /* VF is now completely initialized */
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- return status;
-
-ice_alloc_vf_res_exit:
- ice_free_vf_res(vf);
- return status;
}
/**
- * ice_ena_vf_mappings
- * @vf: pointer to the VF structure
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * Enable VF vectors and queues allocation by writing the details into
- * respective registers.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
*/
-static void ice_ena_vf_mappings(struct ice_vf *vf)
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
{
- int abs_vf_id, abs_first, abs_last;
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
+ int device_based_vf_id;
struct ice_hw *hw;
u32 reg;
- dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx;
- last = (first + pf->num_msix_per_vf) - 1;
- abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_msix_per_vf) - 1;
- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* VF Vector allocation */
- reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
- VPINT_ALLOC_VALID_M);
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->num_msix_per_vf) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
& VPINT_ALLOC_PCI_FIRST_M) |
- ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
- VPINT_ALLOC_PCI_VALID_M);
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
/* map the interrupts to its functions */
- for (v = first; v <= last; v++) {
- reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
GLINT_VECT2FUNC_VF_NUM_M) |
((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
GLINT_VECT2FUNC_PF_NUM_M));
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
- /* Map mailbox interrupt. We put an explicit 0 here to remind us that
- * VF admin queue interrupts will go to VF MSI-X vector 0.
- */
- wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -682,7 +740,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
@@ -700,7 +758,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
@@ -709,6 +767,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
}
/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
* ice_determine_res
* @pf: pointer to the PF structure
* @avail_res: available resources in the PF structure
@@ -906,51 +976,18 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
}
/**
- * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
- * @vf: pointer to the VF structure
- *
- * Cleanup a VF after the hardware reset is finished. Expects the caller to
- * have verified whether the reset is finished properly, and ensure the
- * minimum amount of wait time has passed. Reallocate VF resources back to make
- * VF state active
+ * ice_clear_vf_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
*/
-static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vf->pf->hw;
u32 reg;
- hw = &pf->hw;
-
- /* PF software completes the flow by notifying VF that reset flow is
- * completed. This is done by enabling hardware by clearing the reset
- * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
- * register to VFR completed (done at the end of this function)
- * By doing this we allow HW to access VF memory at any point. If we
- * did it any sooner, HW could access memory while it was being freed
- * in ice_free_vf_res(), causing an IOMMU fault.
- *
- * On the other hand, this needs to be done ASAP, because the VF driver
- * is waiting for this to happen and may report a timeout. It's
- * harmless, but it gets logged into Guest OS kernel log, so best avoid
- * it.
- */
reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
reg &= ~VPGEN_VFRTRIG_VFSWR_M;
wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
-
- /* reallocate VF resources to finish resetting the VSI state */
- if (!ice_alloc_vf_res(vf)) {
- ice_ena_vf_mappings(vf);
- set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- }
-
- /* Tell the VF driver the reset is done. This needs to be done only
- * after VF has been fully initialized, because the VF driver may
- * request resources immediately after setting this flag.
- */
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ ice_flush(hw);
}
/**
@@ -994,44 +1031,124 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
return status;
}
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ vf->num_mac = 0;
+ vsi->num_vlan = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
/**
- * ice_config_res_vfs - Finalize allocation of VFs resources in one go
- * @pf: pointer to the PF structure
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
*
- * This function is being called as last part of resetting all VFs, or when
- * configuring VFs for the first time, where there is no resource to be freed
- * Returns true if resources were properly allocated for all VFs, and false
- * otherwise.
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
*/
-static bool ice_config_res_vfs(struct ice_pf *pf)
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int v;
+ ice_vf_clear_counters(vf);
+ ice_clear_vf_reset_trigger(vf);
+}
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
- return false;
- }
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
- /* rearm global interrupts */
- if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
+ ice_vf_set_host_trust_cfg(vf);
- /* Finish resetting each VF and allocate resources */
- ice_for_each_vf(pf, v) {
- struct ice_vf *vf = &pf->vf[v];
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
- vf->num_vf_qs = pf->num_qps_per_vf;
- dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
- vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
+ if (ice_vf_rebuild_host_vlan_cfg(vf))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+}
+
+/**
+ * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
+{
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
}
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
+}
- return true;
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+static void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+}
+
+/**
+ * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+
+ ice_vf_rebuild_host_cfg(vf);
+
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
@@ -1065,17 +1182,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- ice_for_each_vf(pf, v) {
- struct ice_vsi *vsi;
-
- vf = &pf->vf[v];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
- ice_dis_vf_qs(vf);
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
- }
-
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
* sequence to make sure that it has completed. We'll keep track of
@@ -1112,21 +1218,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
- ice_free_vf_res(vf);
-
- /* Free VF queues as well, and reallocate later.
- * If a given VF has different number of queues
- * configured, the request for update will come
- * via mailbox communication.
- */
- vf->num_vf_qs = 0;
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ ice_vf_post_vsi_rebuild(vf);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- if (!ice_config_res_vfs(pf))
- return false;
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1238,12 +1336,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_err(dev, "disabling promiscuous mode failed\n");
}
- /* free VF resources to begin resetting the VSI state */
- ice_free_vf_res(vf);
-
- ice_cleanup_and_realloc_vf(vf);
-
- ice_flush(hw);
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi_with_release(vf);
+ ice_vf_post_vsi_rebuild(vf);
return true;
}
@@ -1311,16 +1406,144 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
}
/**
- * ice_alloc_vfs - Allocate and set up VFs resources
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
+ *
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
+ */
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
+ vf->vf_id, ice_stat_str(status));
+ err = ice_status_to_errno(status);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int retval, i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_clear_vf_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ }
+
+ return retval;
+}
+
+/**
+ * ice_set_dflt_settings - set VF defaults during initialization/creation
+ * @pf: PF holding reference to all VFs for default configuration
+ */
+static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ vf->pf = pf;
+ vf->vf_id = i;
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->num_qps_per_vf;
+ }
+}
+
+/**
+ * ice_alloc_vfs - allocate num_vfs in the PF structure
+ * @pf: PF to store the allocated VFs in
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
+{
+ struct ice_vf *vfs;
+
+ vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs)
+ return -ENOMEM;
+
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_vfs;
+
+ return 0;
+}
+
+/**
+ * ice_ena_vfs - enable VFs so they are ready to be used
* @pf: pointer to the PF structure
- * @num_alloc_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to enable
*/
-static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vfs;
- int i, ret;
+ int ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
@@ -1328,43 +1551,37 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
if (ret) {
pf->num_alloc_vfs = 0;
goto err_unroll_intr;
}
- /* allocate memory */
- vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
- if (!vfs) {
- ret = -ENOMEM;
- goto err_pci_disable_sriov;
- }
- pf->vf = vfs;
- pf->num_alloc_vfs = num_alloc_vfs;
- /* apply default profile */
- ice_for_each_vf(pf, i) {
- vfs[i].pf = pf;
- vfs[i].vf_sw_id = pf->first_sw;
- vfs[i].vf_id = i;
+ ret = ice_alloc_vfs(pf, num_vfs);
+ if (ret)
+ goto err_pci_disable_sriov;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
- vfs[i].spoofchk = true;
+ if (ice_set_per_vf_res(pf)) {
+ dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
+ num_vfs);
+ ret = -ENOSPC;
+ goto err_unroll_sriov;
}
- /* VF resources get allocated with initialization */
- if (!ice_config_res_vfs(pf)) {
- ret = -EIO;
+ ice_set_dflt_settings_vfs(pf);
+
+ if (ice_start_vfs(pf)) {
+ dev_err(dev, "Failed to start VF(s)\n");
+ ret = -EAGAIN;
goto err_unroll_sriov;
}
- return ret;
+ clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
err_unroll_sriov:
+ devm_kfree(dev, pf->vf);
pf->vf = NULL;
- devm_kfree(dev, vfs);
- vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
pci_disable_sriov(pf->pdev);
@@ -1404,6 +1621,8 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf)
* ice_pci_sriov_ena - Enable or change number of VFs
* @pf: pointer to the PF structure
* @num_vfs: number of VFs to allocate
+ *
+ * Returns 0 on success and negative on failure
*/
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
@@ -1411,20 +1630,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
struct device *dev = ice_pf_to_dev(pf);
int err;
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
ice_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return num_vfs;
+ return 0;
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
@@ -1432,45 +1641,77 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
return -EOPNOTSUPP;
}
- dev_info(dev, "Allocating %d VFs\n", num_vfs);
- err = ice_alloc_vfs(pf, num_vfs);
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
if (err) {
dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
return err;
}
set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return num_vfs;
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/**
* ice_sriov_configure - Enable or change number of VFs via sysfs
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
*
- * This function is called when the user updates the number of VFs in sysfs.
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
*/
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
- if (num_vfs)
- return ice_pci_sriov_ena(pf, num_vfs);
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ return 0;
+ }
- if (!pci_vfs_assigned(pdev)) {
- ice_free_vfs(pf);
- } else {
dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
}
- return 0;
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
}
/**
@@ -1483,7 +1724,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
void ice_process_vflr_event(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int vf_id;
+ unsigned int vf_id;
u32 reg;
if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
@@ -1524,7 +1765,7 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
*/
static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
{
- int vf_id;
+ unsigned int vf_id;
ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
@@ -1628,8 +1869,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
- vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
+ dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
+ vf->vf_id, ice_stat_str(aq_ret),
+ ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -1772,7 +2014,7 @@ err:
*/
static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
ice_reset_vf(vf, false);
}
@@ -2044,8 +2286,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
+ ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2060,6 +2303,174 @@ out:
}
/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ int vf_idx;
+
+ ice_for_each_vf(pf, vf_idx) {
+ struct ice_vf *vf = &pf->vf[vf_idx];
+
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ bool rm_promisc;
+ int ret = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Leave v_ret alone, lie to the VF on purpose. */
+ goto error_param;
+ }
+
+ rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
+ !(info->flags & FLAG_VF_MULTICAST_PROMISC);
+
+ if (vsi->num_vlan || vf->port_vlan_info) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+ struct net_device *pf_netdev;
+
+ if (!pf_vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ pf_netdev = pf_vsi->netdev;
+
+ ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
+ rm_promisc ? "ON" : "OFF", vf->vf_id,
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+ ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
+
+ if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
+ /* only attempt to set the default forwarding VSI if
+ * it's not currently set
+ */
+ ret = ice_set_dflt_vsi(pf->first_sw, vsi);
+ else if (!set_dflt_vsi &&
+ ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+ /* only attempt to free the default forwarding VSI if we
+ * are the owner
+ */
+ ret = ice_clear_dflt_vsi(pf->first_sw);
+
+ if (ret) {
+ dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
+ set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto error_param;
+ }
+ } else {
+ enum ice_status status;
+ u8 promisc_m;
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+ } else {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ }
+
+ /* Configure multicast/unicast with or without VLAN promiscuous
+ * mode
+ */
+ status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
+ if (status) {
+ dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
+ rm_promisc ? "dis" : "en", vf->vf_id,
+ ice_stat_str(status));
+ v_ret = ice_err_to_virt_err(status);
+ goto error_param;
+ } else {
+ dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
+ rm_promisc ? "dis" : "en", vf->vf_id);
+ }
+ }
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_ret, NULL, 0);
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2118,6 +2529,52 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
}
/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2177,6 +2634,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->rxq_ena);
}
@@ -2192,6 +2650,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->txq_ena))
continue;
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->txq_ena);
}
@@ -2604,20 +3063,22 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EPERM;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
return -EEXIST;
} else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
- /* only set dflt_lan_addr once */
- if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
- is_unicast_ether_addr(mac_addr))
+ /* Set the default LAN address to the latest unicast MAC address added
+ * by the VF. The default LAN address is reported by the PF via
+ * ndo_get_vf_config.
+ */
+ if (is_unicast_ether_addr(mac_addr))
ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
vf->num_mac++;
@@ -2641,14 +3102,14 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
@@ -2834,7 +3295,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
u16 vlanprio;
@@ -2856,8 +3316,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
@@ -2870,44 +3328,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return 0;
}
- if (vlan_id || qos) {
- /* remove VLAN 0 filter set by default when transitioning from
- * no port VLAN to a port VLAN. No change to old port VLAN on
- * failure.
- */
- ret = ice_vsi_kill_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
- if (ret)
- return ret;
- } else {
- /* add VLAN 0 filter back when transitioning from port VLAN to
- * no port VLAN. No change to old port VLAN on failure.
- */
- ret = ice_vsi_add_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, 0, false);
- if (ret)
- return ret;
- }
+ vf->port_vlan_info = vlanprio;
- if (vlan_id) {
+ if (vf->port_vlan_info)
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
- /* add VLAN filter for the port VLAN */
- ret = ice_vsi_add_vlan(vsi, vlan_id);
- if (ret)
- return ret;
- }
- /* remove old port VLAN filter with valid VLAN ID or QoS fields */
- if (vf->port_vlan_info)
- ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
-
- /* keep port VLAN information persistent on resets */
- vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
+ ice_vc_reset_vf(vf);
return 0;
}
@@ -2992,8 +3421,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
vlan_promisc = true;
if (add_v) {
@@ -3018,7 +3448,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- status = ice_vsi_add_vlan(vsi, vid);
+ status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3317,6 +3747,9 @@ error_handler:
case VIRTCHNL_OP_GET_STATS:
err = ice_vc_get_stats_msg(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ break;
case VIRTCHNL_OP_ADD_VLAN:
err = ice_vc_add_vlan_msg(vf, msg);
break;
@@ -3390,6 +3823,39 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
+ *
+ * Return true on the first/any match, else return false
+ */
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
+{
+ struct ice_sw_recipe *mac_recipe_list =
+ &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* protect MAC filter list access */
+
+ rule_head = &mac_recipe_list->filt_rules;
+ rule_lock = &mac_recipe_list->filt_rule_lock;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(list_itr, rule_head, list_entry) {
+ u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(existing_mac, umac)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3406,25 +3872,41 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ if (is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
vf = &pf->vf[vf_id];
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
+ return 0;
+
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
- /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
- * flow will use the updated dflt_lan_addr and add a MAC filter
- * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
- * set the MAC address for this VF.
+ if (ice_unicast_mac_exists(pf, mac)) {
+ netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+ mac, vf_id, mac);
+ return -EINVAL;
+ }
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
- vf->pf_set_mac = true;
- netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
- vf_id, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
ice_vc_reset_vf(vf);
return 0;
@@ -3554,6 +4036,24 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
}
/**
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_event - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -3582,12 +4082,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
vf->mdd_rx_events.last_printed =
vf->mdd_rx_events.count;
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, hw->pf_id, i,
- vf->dflt_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
+ ice_print_vf_rx_mdd_event(vf);
}
/* only print Tx MDD event message if there are new events */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 3f9464269bd2..67aa9110fdd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -7,7 +7,10 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
-#define ICE_MAX_MACADDR_PER_VF 12
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
/* Malicious Driver Detection */
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
@@ -64,7 +67,7 @@ struct ice_mdd_vf_events {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF ID in the PF space */
+ u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
/* first vector index of this VF in the PF space */
int first_vector_idx;
@@ -128,9 +131,11 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -140,6 +145,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf);
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
+#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -219,5 +225,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8279db15e870..b6f928c9e9c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
#include "ice_base.h"
@@ -280,28 +280,6 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_add_umem - add a UMEM region for XDP sockets
- * @vsi: VSI to which the UMEM will be added
- * @umem: pointer to a requested UMEM region
- * @qid: queue ID
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
-{
- int err;
-
- err = ice_xsk_alloc_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
* ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
* @qid: Ring/qid associated with the UMEM
@@ -318,65 +296,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-/**
- * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
- * @vsi: VSI to map the UMEM region
- * @umem: UMEM to map
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
- PAGE_SIZE,
- DMA_BIDIRECTIONAL,
- ICE_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
- i);
- goto out_unmap;
- }
-
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (; i > 0; i--) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -EFAULT;
-}
-
-/**
- * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
- * @vsi: VSI from which the UMEM will be unmapped
- * @umem: UMEM to unmap
- */
-static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
-
- umem->pages[i].dma = 0;
- }
-}
/**
* ice_xsk_umem_disable - disable a UMEM region
@@ -391,7 +310,7 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
!vsi->xsk_umems[qid])
return -EINVAL;
- ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
ice_xsk_remove_umem(vsi, qid);
return 0;
@@ -408,7 +327,6 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
static int
ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
{
- struct xdp_umem_fq_reuse *reuseq;
int err;
if (vsi->type != ICE_VSI_PF)
@@ -419,20 +337,18 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
if (vsi->xsk_umems && vsi->xsk_umems[qid])
return -EBUSY;
- reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = ice_xsk_umem_dma_map(vsi, umem);
- if (err)
- return err;
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
- err = ice_xsk_add_umem(vsi, umem, qid);
+ err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -484,137 +400,22 @@ xsk_umem_if_up:
}
/**
- * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
- * @zca: zero-cpoy allocator
- * @handle: Buffer handle
- */
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
-{
- struct ice_rx_buf *rx_buf;
- struct ice_ring *rx_ring;
- struct xdp_umem *umem;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(zca, struct ice_ring, zca);
- umem = rx_ring->xsk_umem;
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- mask = umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- rx_buf = &rx_ring->rx_buf[nta];
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = (u64)handle + umem->headroom;
-}
-
-/**
- * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the hot path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = rx_buf->addr;
- u64 handle, hr;
-
- if (addr) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-/**
- * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the slow path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, headroom;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- handle &= umem->chunk_mask;
- headroom = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += headroom;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += headroom;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
* @count: The number of buffers to allocate
- * @alloc: the function pointer to call for allocation
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
* Returns false if all allocations were successful, true if any fail.
*/
-static bool
-ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf;
bool ret = false;
+ dma_addr_t dma;
if (!count)
return false;
@@ -623,16 +424,14 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
rx_buf = &rx_ring->rx_buf[ntu];
do {
- if (!alloc(rx_ring, rx_buf)) {
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!rx_buf->xdp) {
ret = true;
break;
}
- dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
-
- rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
rx_desc++;
@@ -653,32 +452,6 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
}
/**
- * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_fast_zc);
-}
-
-/**
- * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_slow_zc);
-}
-
-/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
@@ -692,76 +465,21 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
}
/**
- * ice_get_rx_buf_zc - Fetch the current Rx buffer
- * @rx_ring: Rx ring
- * @size: size of a buffer
- *
- * This function returns the current, received Rx buffer and does
- * DMA synchronization.
- *
- * Returns a pointer to the received Rx buffer.
- */
-static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
-
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
- size, DMA_BIDIRECTIONAL);
-
- return rx_buf;
-}
-
-/**
- * ice_reuse_rx_buf_zc - reuse an Rx buffer
- * @rx_ring: Rx ring
- * @old_buf: The buffer to recycle
- *
- * This function recycles a finished Rx buffer, and places it on the recycle
- * queue (next_to_alloc).
- */
-static void
-ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
- u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta++];
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- new_buf->dma = old_buf->dma & mask;
- new_buf->dma += hr;
-
- new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
- new_buf->addr += hr;
-
- new_buf->handle = old_buf->handle & mask;
- new_buf->handle += rx_ring->xsk_umem->headroom;
-
- old_buf->addr = NULL;
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @rx_buf: zero-copy Rx buffer
- * @xdp: XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end -
- xdp->data_hard_start;
+ unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
+ unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
+ unsigned int datasize_hard = rx_buf->xdp->data_end -
+ rx_buf->xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -769,13 +487,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
-
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
return skb;
}
@@ -802,7 +520,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
}
act = bpf_prog_run_xdp(xdp_prog, xdp);
- xdp->handle += xdp->data - xdp->data_hard_start;
switch (act) {
case XDP_PASS:
break;
@@ -842,9 +559,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
bool failure = false;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -856,8 +570,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u8 rx_ptype;
if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
- cleaned_count);
+ failure |= ice_alloc_rx_bufs_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -878,25 +592,19 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = ice_get_rx_buf_zc(rx_ring, size);
- if (!rx_buf->addr)
- break;
- xdp.data = rx_buf->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = rx_buf->handle;
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ rx_buf->xdp->data_end = rx_buf->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
- xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
- rx_buf->addr = NULL;
- } else {
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
- }
+ else
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -906,7 +614,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ skb = ice_construct_skb_zc(rx_ring, rx_buf);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -977,17 +685,16 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_buf->bytecount = desc.len;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
- 0, desc.len, 0);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
@@ -1163,11 +870,10 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (!rx_buf->addr)
+ if (!rx_buf->xdp)
continue;
- xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
- rx_buf->addr = NULL;
+ rx_buf->xdp = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 8a4ba7c6d549..fc1a06b4df36 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -10,11 +10,10 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
@@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
return -EOPNOTSUPP;
}
-static inline void
-ice_zca_free(struct zero_copy_allocator __always_unused *zca,
- unsigned long __always_unused handle)
-{
-}
-
static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
int __always_unused budget)
@@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
}
static inline bool
-ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
- u16 __always_unused count)
+ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
{
return false;
}