summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h147
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1088
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h83
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2480
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h377
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
11 files changed, 3314 insertions, 963 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index cb8ddd043476..580e81743681 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 821d4c2f84bd..872cd4bdd70d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -175,21 +175,22 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
- HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
-
- /* Multicast linear table commands */
- HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
- HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
- HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
- HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
@@ -208,6 +209,28 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* Error INT commands */
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804,
+ HCLGE_IGU_COMMON_INT_QUERY = 0x1805,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_IGU_COMMON_INT_CLR = 0x1807,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_NCSI_INT_QUERY = 0x2400,
+ HCLGE_NCSI_INT_EN = 0x2401,
+ HCLGE_NCSI_INT_CLR = 0x2402,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -395,6 +418,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -584,13 +609,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0
-struct hclge_mac_vlan_mask_entry_cmd {
- u8 rsv0[2];
- u8 vlan_mask;
- u8 rsv1;
- u8 mac_mask[6];
- u8 rsv2[14];
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
};
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
@@ -615,30 +639,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0
-#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 7
-struct hclge_mta_filter_mode_cmd {
- u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
- u8 rsv[23];
-};
-
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
-struct hclge_cfg_func_mta_filter_cmd {
- u8 accept; /* Only used lowest 1 bit */
- u8 function_id;
- u8 rsv[22];
-};
-
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0
-#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
-struct hclge_cfg_func_mta_item_cmd {
- __le16 item_idx; /* Only used lowest 12 bit */
- u8 accept; /* Only used lowest 1 bit */
- u8 rsv[21];
-};
-
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
@@ -778,6 +778,7 @@ struct hclge_reset_cmd {
};
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
struct hclge_serdes_lb_cmd {
@@ -818,6 +819,76 @@ struct hclge_set_led_state_cmd {
u8 rsv2[20];
};
+struct hclge_get_fd_mode_cmd {
+ u8 mode;
+ u8 enable;
+ u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+ __le32 stage1_entry_num;
+ __le32 stage2_entry_num;
+ __le16 stage1_counter_num;
+ __le16 stage2_counter_num;
+ u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+ u8 stage;
+ u8 key_select;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u8 rsv1[2];
+ __le32 tuple_mask;
+ __le32 meta_data_mask;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B 0
+struct hclge_fd_tcam_config_1_cmd {
+ u8 stage;
+ u8 xy_sel;
+ u8 port_info;
+ u8 rsv1[1];
+ __le32 index;
+ u8 entry_vld;
+ u8 rsv2[7];
+ u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+ u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+ u8 tcam_data[20];
+ u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B 0
+#define HCLGE_FD_AD_DIRECT_QID_B 1
+#define HCLGE_FD_AD_QID_S 2
+#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B 12
+#define HCLGE_FD_AD_COUNTER_NUM_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B 20
+#define HCLGE_FD_AD_NXT_KEY_S 21
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B 0
+#define HCLGE_FD_AD_RULE_ID_S 1
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
+
+struct hclge_fd_ad_config_cmd {
+ u8 stage;
+ u8 rsv1[3];
+ __le32 index;
+ __le64 ad_data;
+ u8 rsv2[8];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index f08ebb7caaaf..e72f724123d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
+ bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
u8 i;
@@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
total_ets_bw += ets->tc_tx_bw[i];
- break;
+ has_ets_tc = true;
+ break;
default:
return -EINVAL;
}
}
- if (total_ets_bw != BW_PERCENT)
+ if (has_ets_tc && total_ets_bw != BW_PERCENT)
return -EINVAL;
*tc = max_tc + 1;
@@ -182,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
- hclge_tm_schd_info_update(hdev, num_tc);
+ ret = hclge_tm_schd_info_update(hdev, num_tc);
+ if (ret)
+ return ret;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -308,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
return -EINVAL;
}
- hclge_tm_schd_info_update(hdev, tc);
+ ret = hclge_tm_schd_info_update(hdev, tc);
+ if (ret)
+ return ret;
ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
new file mode 100644
index 000000000000..f7e363b90fe0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#include "hclge_err.h"
+
+static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+ { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+struct hclge_tm_sch_ecc_info {
+ const char *name;
+};
+
+static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
+ {
+ { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
+ },
+ {
+ { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "RAM_CFG_CTRL:CSHAP TAB" },
+ { .name = "RAM_CFG_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "SHAPER_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "MSCH_CTRL" },
+ },
+ {
+ { .name = "TOP_CTRL" },
+ },
+};
+
+static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
+ { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
+ { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev,
+ const struct hclge_hw_error *err_list,
+ u32 err_sts)
+{
+ const struct hclge_hw_error *err;
+ int i = 0;
+
+ while (err_list[i].msg) {
+ err = &err_list[i];
+ if (!(err->int_msk & err_sts)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err_sts);
+ i++;
+ }
+}
+
+/* hclge_cmd_query_error: read the error information
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ * @w_num: offset for setting the read interrupt type.
+ * @int_type: select which type of the interrupt for which the error
+ * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
+ *
+ * This function query the error info from hw register/s using command
+ */
+static int hclge_cmd_query_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 cmd,
+ u16 flag, u8 w_num,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ num = 2;
+ }
+ if (w_num)
+ desc[0].data[w_num] = cpu_to_le32(int_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "query error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_cmd_clear_error: clear the error status
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @desc_src: prefilled descriptor from the previous command for reusing
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ *
+ * This function clear the error status in the hw register/s using command
+ */
+static int hclge_cmd_clear_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ struct hclge_desc *desc_src,
+ u32 cmd, u16 flag)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret, i;
+
+ if (cmd) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ num = 2;
+ }
+ if (desc_src) {
+ for (i = 0; i < 6; i++) {
+ desc[0].data[i] = desc_src[0].data[i];
+ if (flag)
+ desc[1].data[i] = desc_src[1].data[i];
+ }
+ }
+ } else {
+ hclge_cmd_reuse_desc(&desc[0], false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_reuse_desc(&desc[1], false);
+ num = 2;
+ }
+ }
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
+
+ if (en) {
+ /* enable COMMON error interrupts */
+ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
+ desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
+ desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
+ desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
+ } else {
+ /* disable COMMON error interrupts */
+ desc[0].data[0] = 0;
+ desc[0].data[2] = 0;
+ desc[0].data[3] = 0;
+ desc[0].data[4] = 0;
+ desc[0].data[5] = 0;
+ }
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable COMMON err interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return 0;
+
+ /* enable/disable NCSI error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable NCSI error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable/disable error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU common interrupts\n",
+ ret);
+ return ret;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ncsi_error(hdev, en);
+ if (ret)
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* enable/disable PPP error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 0,1\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ en);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 2,3\n",
+ ret);
+
+ return ret;
+}
+
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable TM SCH hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
+ return ret;
+ }
+
+ /* enable TM QCN hw errors */
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+ if (en)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ else
+ desc.data[1] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to configure TM QCN mem errors\n", ret);
+
+ return ret;
+}
+
+static void hclge_process_common_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read err sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_COMMON_ECC_INT_CFG,
+ HCLGE_CMD_FLAG_NEXT, 0, 0);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query COMMON error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
+ & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
+
+ if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+
+ err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
+ HCLGE_TQP_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[5])) &
+ HCLGE_IMP_ITCM4_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
+
+ /* clear error interrupts */
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
+
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to clear COMMON error interrupt status\n",
+ ret);
+}
+
+static void hclge_process_ncsi_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return;
+
+ /* read NCSI error status */
+ ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
+ 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query NCSI error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]);
+ hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_NCSI_INT_CLR, 0);
+ if (ret)
+ dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
+ ret);
+}
+
+static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ /* read IGU common err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_COMMON_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_COM_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_COMMON_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* read IGU-EGU TNL err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_EGU_TNL_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
+}
+
+static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
+ enum hclge_err_int_type int_type)
+{
+ enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read PPP INT sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
+ HCLGE_CMD_FLAG_NEXT, 5, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ /* log error */
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int0[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int1[0];
+ hw_err_lst3 = &hclge_ppp_pf_int[0];
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int2[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int3[0];
+ } else {
+ dev_err(dev, "invalid command(=%d)\n", cmd);
+ return -EINVAL;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[2]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst1, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[3]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst2, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst3, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ /* clear PPP INT */
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hclge_process_ppp_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ /* read PPP INT0,1 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ int_type);
+ if (ret < 0) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
+ ret);
+ return;
+ }
+
+ /* read err PPP INT2,3 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ int_type);
+ if (ret < 0)
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
+ ret);
+}
+
+static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
+ struct hclge_desc desc;
+ u32 ecc_info;
+ u8 module_no;
+ u8 ram_no;
+ int ret;
+
+ /* read TM scheduler errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
+ return;
+ }
+ ecc_info = le32_to_cpu(desc.data[0]);
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
+ return;
+ }
+
+ /* log TM scheduler errors */
+ if (le32_to_cpu(desc.data[0])) {
+ hclge_log_error(dev, &hclge_tm_sch_err_int[0],
+ le32_to_cpu(desc.data[0]));
+ if (le32_to_cpu(desc.data[0]) & 0x2) {
+ module_no = (ecc_info >> 20) & 0xF;
+ ram_no = (ecc_info >> 16) & 0xF;
+ tm_sch_ecc_info =
+ &hclge_tm_sch_ecc_err[module_no][ram_no];
+ dev_warn(dev, "ecc err module:ram=%s\n",
+ tm_sch_ecc_info->name);
+ dev_warn(dev, "ecc memory address = 0x%x\n",
+ ecc_info & 0xFFFF);
+ }
+ }
+
+ /* clear TM scheduler errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
+}
+
+static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* read QCN errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
+ return;
+ }
+
+ /* log QCN errors */
+ if (le32_to_cpu(desc.data[0]))
+ hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
+ le32_to_cpu(desc.data[0]));
+
+ /* clear QCN errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
+}
+
+static void hclge_process_tm_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ hclge_process_tm_sch_error(hdev);
+ hclge_process_tm_qcn_error(hdev);
+}
+
+static const struct hclge_hw_blk hw_blk[] = {
+ { .msk = BIT(0), .name = "IGU_EGU",
+ .enable_error = hclge_enable_igu_egu_error,
+ .process_error = hclge_process_igu_egu_error, },
+ { .msk = BIT(5), .name = "COMMON",
+ .enable_error = hclge_enable_common_error,
+ .process_error = hclge_process_common_error, },
+ { .msk = BIT(4), .name = "TM",
+ .enable_error = hclge_enable_tm_hw_error,
+ .process_error = hclge_process_tm_error, },
+ { .msk = BIT(1), .name = "PPP",
+ .enable_error = hclge_enable_ppp_error,
+ .process_error = hclge_process_ppp_error, },
+ { /* sentinel */ }
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret = 0;
+ int i = 0;
+
+ while (hw_blk[i].name) {
+ if (!hw_blk[i].enable_error) {
+ i++;
+ continue;
+ }
+ ret = hw_blk[i].enable_error(hdev, state);
+ if (ret) {
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+ return ret;
+ }
+ i++;
+ }
+
+ return ret;
+}
+
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 sts, val;
+ int i = 0;
+
+ sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Processing Non-fatal errors */
+ if (sts & HCLGE_RAS_REG_NFE_MASK) {
+ val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
+ i = 0;
+ while (hw_blk[i].name) {
+ if (!(hw_blk[i].msk & val)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s ras non-fatal error identified\n",
+ hw_blk[i].name);
+ if (hw_blk[i].process_error)
+ hw_blk[i].process_error(hdev,
+ HCLGE_ERR_INT_RAS_NFE);
+ i++;
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
new file mode 100644
index 000000000000..e0e3b5861495
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_ERR_H
+#define __HCLGE_ERR_H
+
+#include "hclge_main.h"
+
+#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
+#define HCLGE_RAS_REG_FE_MASK 0xFF
+#define HCLGE_RAS_REG_NFE_MASK 0xFF00
+#define HCLGE_RAS_REG_NFE_SHIFT 8
+
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
+#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
+#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_IGU_ERR_INT_EN 0x0000066F
+#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
+#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
+#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_PF_ERR_INT_EN 0x0003
+#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
+#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
+#define HCLGE_NCSI_ERR_INT_EN 0x3
+#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+
+#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3
+#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF
+#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16
+#define HCLGE_TQP_ECC_INT_MASK 0xFFF
+#define HCLGE_TQP_ECC_INT_SHIFT 16
+#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3
+#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000
+#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001
+#define HCLGE_IGU_COM_INT_MASK 0xF
+#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F
+#define HCLGE_PPP_PF_INT_MASK 0x100
+
+enum hclge_err_int_type {
+ HCLGE_ERR_INT_MSIX = 0,
+ HCLGE_ERR_INT_RAS_CE = 1,
+ HCLGE_ERR_INT_RAS_NFE = 2,
+ HCLGE_ERR_INT_RAS_FE = 3,
+};
+
+struct hclge_hw_blk {
+ u32 msk;
+ const char *name;
+ int (*enable_error)(struct hclge_dev *hdev, bool en);
+ void (*process_error)(struct hclge_dev *hdev,
+ enum hclge_err_int_type type);
+};
+
+struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en);
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 8577dfc799ad..5234b5373ed3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -19,20 +19,18 @@
#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
+#include "hclge_err.h"
#include "hnae3.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
-#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable);
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc);
static struct hnae3_ae_algo ae_algo;
@@ -51,175 +49,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "Mac Loopback test",
- "Serdes Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
"Phy Loopback test"
};
-static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
- {"igu_rx_oversize_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
- {"igu_rx_undersize_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
- {"igu_rx_out_all_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
- {"igu_rx_uni_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
- {"igu_rx_multi_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
- {"igu_rx_broad_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
- {"egu_tx_out_all_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
- {"egu_tx_uni_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
- {"egu_tx_multi_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
- {"egu_tx_broad_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
- {"ssu_ppp_mac_key_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
- {"ssu_ppp_host_key_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
- {"ppp_ssu_mac_rlt_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
- {"ppp_ssu_host_rlt_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
- {"ssu_tx_in_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
- {"ssu_tx_out_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
- {"ssu_rx_in_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
- {"ssu_rx_out_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
-};
-
-static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
- {"igu_rx_err_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
- {"igu_rx_no_eof_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
- {"igu_rx_no_sof_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
- {"egu_tx_1588_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
- {"ssu_full_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
- {"ssu_part_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
- {"ppp_key_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
- {"ppp_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
- {"ssu_key_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
- {"pkt_curr_buf_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
- {"qcn_fb_rcv_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
- {"qcn_fb_drop_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
- {"qcn_fb_invaild_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
- {"rx_packet_tc0_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
- {"rx_packet_tc1_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
- {"rx_packet_tc2_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
- {"rx_packet_tc3_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
- {"rx_packet_tc4_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
- {"rx_packet_tc5_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
- {"rx_packet_tc6_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
- {"rx_packet_tc7_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
- {"rx_packet_tc0_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
- {"rx_packet_tc1_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
- {"rx_packet_tc2_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
- {"rx_packet_tc3_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
- {"rx_packet_tc4_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
- {"rx_packet_tc5_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
- {"rx_packet_tc6_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
- {"rx_packet_tc7_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
- {"tx_packet_tc0_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
- {"tx_packet_tc1_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
- {"tx_packet_tc2_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
- {"tx_packet_tc3_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
- {"tx_packet_tc4_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
- {"tx_packet_tc5_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
- {"tx_packet_tc6_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
- {"tx_packet_tc7_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
- {"tx_packet_tc0_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
- {"tx_packet_tc1_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
- {"tx_packet_tc2_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
- {"tx_packet_tc3_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
- {"tx_packet_tc4_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
- {"tx_packet_tc5_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
- {"tx_packet_tc6_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
- {"tx_packet_tc7_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
- {"pkt_curr_buf_tc0_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
- {"pkt_curr_buf_tc1_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
- {"pkt_curr_buf_tc2_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
- {"pkt_curr_buf_tc3_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
- {"pkt_curr_buf_tc4_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
- {"pkt_curr_buf_tc5_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
- {"pkt_curr_buf_tc6_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
- {"pkt_curr_buf_tc7_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
- {"mb_uncopy_num",
- HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
- {"lo_pri_unicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
- {"hi_pri_multicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
- {"lo_pri_multicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
- {"rx_oq_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
- {"tx_oq_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
- {"nic_l2_err_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
- {"roc_l2_err_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
-};
-
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
{"mac_tx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
@@ -394,109 +229,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
-{
-#define HCLGE_64_BIT_CMD_NUM 5
-#define HCLGE_64_BIT_RTN_DATANUM 4
- u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
- struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
- __le64 *desc_data;
- int i, k, n;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 64 bit pkt stats fail, status = %d.\n", ret);
- return ret;
- }
-
- for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_RTN_DATANUM - 1;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_64_BIT_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *data++ += le64_to_cpu(*desc_data);
- desc_data++;
- }
- }
-
- return 0;
-}
-
-static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
-{
- stats->pkt_curr_buf_cnt = 0;
- stats->pkt_curr_buf_tc0_cnt = 0;
- stats->pkt_curr_buf_tc1_cnt = 0;
- stats->pkt_curr_buf_tc2_cnt = 0;
- stats->pkt_curr_buf_tc3_cnt = 0;
- stats->pkt_curr_buf_tc4_cnt = 0;
- stats->pkt_curr_buf_tc5_cnt = 0;
- stats->pkt_curr_buf_tc6_cnt = 0;
- stats->pkt_curr_buf_tc7_cnt = 0;
-}
-
-static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
-{
-#define HCLGE_32_BIT_CMD_NUM 8
-#define HCLGE_32_BIT_RTN_DATANUM 8
-
- struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
- struct hclge_32_bit_stats *all_32_bit_stats;
- __le32 *desc_data;
- int i, k, n;
- u64 *data;
- int ret;
-
- all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
- data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 32 bit pkt stats fail, status = %d.\n", ret);
-
- return ret;
- }
-
- hclge_reset_partial_32bit_counter(all_32_bit_stats);
- for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
- if (unlikely(i == 0)) {
- __le16 *desc_data_16bit;
-
- all_32_bit_stats->igu_rx_err_pkt +=
- le32_to_cpu(desc[i].data[0]);
-
- desc_data_16bit = (__le16 *)&desc[i].data[1];
- all_32_bit_stats->igu_rx_no_eof_pkt +=
- le16_to_cpu(*desc_data_16bit);
-
- desc_data_16bit++;
- all_32_bit_stats->igu_rx_no_sof_pkt +=
- le16_to_cpu(*desc_data_16bit);
-
- desc_data = &desc[i].data[2];
- n = HCLGE_32_BIT_RTN_DATANUM - 4;
- } else {
- desc_data = (__le32 *)&desc[i];
- n = HCLGE_32_BIT_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *data++ += le32_to_cpu(*desc_data);
- desc_data++;
- }
- }
-
- return 0;
-}
-
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -623,7 +355,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -631,7 +363,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -675,14 +407,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
struct net_device_stats *net_stats)
{
net_stats->tx_dropped = 0;
- net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
-
net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
@@ -717,12 +443,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
- status = hclge_32_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 32 bit stats fail, status = %d.\n",
- status);
-
hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
}
@@ -743,18 +463,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_32_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 32 bit stats fail, status = %d.\n",
- status);
-
- status = hclge_64_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 64 bit stats fail, status = %d.\n",
- status);
-
status = hclge_tqps_update_stats(handle);
if (status)
dev_err(&hdev->pdev->dev,
@@ -768,7 +476,10 @@ static void hclge_update_stats(struct hnae3_handle *handle,
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
-#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
+ HNAE3_SUPPORT_PHY_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -782,19 +493,19 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
- if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
+ if (hdev->pdev->revision >= 0x21 ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
- handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count++;
- handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
+ count += 2;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
- ARRAY_SIZE(g_all_32bit_stats_string) +
- ARRAY_SIZE(g_all_64bit_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@ -814,33 +525,29 @@ static void hclge_get_strings(struct hnae3_handle *handle,
g_mac_stats_string,
size,
p);
- size = ARRAY_SIZE(g_all_32bit_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_all_32bit_stats_string,
- size,
- p);
- size = ARRAY_SIZE(g_all_64bit_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_all_64bit_stats_string,
- size,
- p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
- if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
+ memcpy(p,
+ hns3_nic_test_strs[HNAE3_LOOP_APP],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
+ hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
+ hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -857,14 +564,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string),
data);
- p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
- g_all_32bit_stats_string,
- ARRAY_SIZE(g_all_32bit_stats_string),
- p);
- p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
- g_all_64bit_stats_string,
- ARRAY_SIZE(g_all_64bit_stats_string),
- p);
p = hclge_tqps_get_stats(handle, p);
}
@@ -1079,6 +778,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1157,6 +861,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
+ hdev->wanted_umv_size = cfg.umv_space;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1657,11 +1362,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
- u32 rx_all = hdev->pkt_buf_size;
+#define HCLGE_BUF_SIZE_UNIT 128
+ u32 rx_all = hdev->pkt_buf_size, aligned_mps;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
int i;
+ aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
/* When DCB is not supported, rx private
@@ -1680,13 +1387,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
if (hdev->hw_tc_map & BIT(i)) {
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = hdev->mps;
- priv->wl.high = priv->wl.low + hdev->mps;
+ priv->wl.low = aligned_mps;
+ priv->wl.high = priv->wl.low + aligned_mps;
priv->buf_size = priv->wl.high +
HCLGE_DEFAULT_DV;
} else {
priv->wl.low = 0;
- priv->wl.high = 2 * hdev->mps;
+ priv->wl.high = 2 * aligned_mps;
priv->buf_size = priv->wl.high;
}
} else {
@@ -1718,11 +1425,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128;
- priv->wl.high = priv->wl.low + hdev->mps;
+ priv->wl.high = priv->wl.low + aligned_mps;
priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
} else {
priv->wl.low = 0;
- priv->wl.high = hdev->mps;
+ priv->wl.high = aligned_mps;
priv->buf_size = priv->wl.high;
}
}
@@ -2066,19 +1773,17 @@ static int hclge_init_msi(struct hclge_dev *hdev)
return 0;
}
-static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
- struct hclge_mac *mac = &hdev->hw.mac;
- if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
- mac->duplex = (u8)duplex;
- else
- mac->duplex = HCLGE_MAC_FULL;
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
+ duplex = HCLGE_MAC_FULL;
- mac->speed = speed;
+ return duplex;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
+ u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2138,7 +1843,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
return ret;
}
- hclge_check_speed_dup(hdev, duplex, speed);
+ return 0;
+}
+
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+{
+ int ret;
+
+ duplex = hclge_check_speed_dup(duplex, speed);
+ if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+ return 0;
+
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
return 0;
}
@@ -2224,42 +1945,17 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
- bool mask_vlan,
- u8 *mac_mask)
-{
- struct hclge_mac_vlan_mask_entry_cmd *req;
- struct hclge_desc desc;
- int status;
-
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
-
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
- ether_addr_copy(req->mac_mask, mac_mask);
-
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
- status);
-
- return status;
-}
-
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- struct hclge_vport *vport;
int mtu;
int ret;
- int i;
- ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+ hdev->hw.mac.duplex);
if (ret) {
dev_err(&hdev->pdev->dev,
"Config mac speed dup fail ret=%d\n", ret);
@@ -2268,39 +1964,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- /* Initialize the MTA table work mode */
- hdev->enable_mta = true;
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
-
- ret = hclge_set_mta_filter_mode(hdev,
- hdev->mta_mac_sel_type,
- hdev->enable_mta);
- if (ret) {
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
- ret);
- return ret;
- }
-
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->accept_mta_mc = false;
-
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
- }
- }
-
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set default mac_vlan_mask fail ret=%d\n", ret);
- return ret;
- }
-
if (netdev)
mtu = netdev->mtu;
else
@@ -2360,10 +2023,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
int mac_state;
int link_stat;
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
+ return 0;
+
mac_state = hclge_get_mac_link_status(hdev);
if (hdev->hw.mac.phydev) {
- if (!genphy_read_status(hdev->hw.mac.phydev))
+ if (hdev->hw.mac.phydev->state == PHY_RUNNING)
link_stat = mac_state &
hdev->hw.mac.phydev->link;
else
@@ -2415,13 +2081,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev)
return ret;
}
- if ((mac.speed != speed) || (mac.duplex != duplex)) {
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/duplex config failed %d\n", ret);
- return ret;
- }
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac speed/duplex config failed %d\n", ret);
+ return ret;
}
return 0;
@@ -2520,6 +2184,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
case HCLGE_VECTOR0_EVENT_MBX:
hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
break;
+ default:
+ break;
}
}
@@ -2793,8 +2459,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hnae3_handle *handle;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
/* perform reset of the stack & ae device for a client */
handle = &hdev->vport[0].nic;
rtnl_lock();
@@ -2815,14 +2486,21 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
handle->last_reset_time = jiffies;
rtnl_unlock();
+ ae_dev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_event(struct hnae3_handle *handle)
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclge_dev *hdev = ae_dev->priv;
- /* check if this is a new reset request and we are not here just because
+ /* We might end up getting called broadly because of 2 below cases:
+ * 1. Recoverable error was conveyed through APEI and only way to bring
+ * normalcy is to reset.
+ * 2. A new reset request from the stack due to timeout
+ *
+ * For the first case,error event might not have ae handle available.
+ * check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
@@ -2831,6 +2509,9 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
+ if (!handle)
+ handle = &hdev->vport[0].nic;
+
if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
return;
else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
@@ -3102,6 +2783,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
return ret;
}
+static void hclge_get_rss_type(struct hclge_vport *vport)
+{
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
+ vport->rss_tuple_sets.ipv4_udp_en ||
+ vport->rss_tuple_sets.ipv4_sctp_en ||
+ vport->rss_tuple_sets.ipv6_tcp_en ||
+ vport->rss_tuple_sets.ipv6_udp_en ||
+ vport->rss_tuple_sets.ipv6_sctp_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
+ vport->rss_tuple_sets.ipv6_fragment_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
struct hclge_rss_input_tuple_cmd *req;
@@ -3121,6 +2818,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
+ hclge_get_rss_type(&hdev->vport[0]);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -3135,8 +2833,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
int i;
/* Get hash algorithm */
- if (hfunc)
- *hfunc = vport->rss_algo;
+ if (hfunc) {
+ switch (vport->rss_algo) {
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
/* Get the RSS Key required by the user */
if (key)
@@ -3160,12 +2869,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
/* Set the RSS Hash Key if specififed by the user */
if (key) {
-
- if (hfunc == ETH_RSS_HASH_TOP ||
- hfunc == ETH_RSS_HASH_NO_CHANGE)
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- else
+ break;
+ case ETH_RSS_HASH_XOR:
+ hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ hash_algo = vport->rss_algo;
+ break;
+ default:
return -EINVAL;
+ }
+
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
@@ -3283,6 +3000,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ hclge_get_rss_type(vport);
return 0;
}
@@ -3608,6 +3326,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
hclge_cmd_set_promisc_mode(hdev, &param);
}
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
+{
+ struct hclge_get_fd_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
+
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ *fd_mode = req->mode;
+
+ return ret;
+}
+
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+ u32 *stage1_entry_num,
+ u32 *stage2_entry_num,
+ u16 *stage1_counter_num,
+ u16 *stage2_counter_num)
+{
+ struct hclge_get_fd_allocation_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
+
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
+
+ return ret;
+}
+
+static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+{
+ struct hclge_set_fd_key_config_cmd *req;
+ struct hclge_fd_key_cfg *stage;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
+ req->stage = stage_num;
+ req->key_select = stage->key_sel;
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static int hclge_init_fd_config(struct hclge_dev *hdev)
+{
+#define LOW_2_WORDS 0x03
+ struct hclge_fd_key_cfg *key_cfg;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return 0;
+
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+ if (ret)
+ return ret;
+
+ switch (hdev->fd_cfg.fd_mode) {
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
+ break;
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Unsupported flow director mode %d\n",
+ hdev->fd_cfg.fd_mode);
+ return -EOPNOTSUPP;
+ }
+
+ hdev->fd_cfg.fd_en = true;
+ hdev->fd_cfg.proto_support =
+ TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
+ UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+ key_cfg->outer_sipv6_word_en = 0;
+ key_cfg->outer_dipv6_word_en = 0;
+
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* If use max 400bit key, we can support tuples for ether type */
+ if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
+ hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ key_cfg->tuple_active |=
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ }
+
+ /* roce_type is used to filter roce frames
+ * dst_vport is used to specify the rule
+ */
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
+
+ ret = hclge_get_fd_allocation(hdev,
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+ if (ret)
+ return ret;
+
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
+}
+
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+ int loc, u8 *key, bool is_add)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+ req1->index = cpu_to_le32(loc);
+ req1->entry_vld = sel_x ? is_add : 0;
+
+ if (key) {
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+ sizeof(req2->tcam_data));
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "config tcam key fail, ret=%d\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+ struct hclge_fd_ad_data *action)
+{
+ struct hclge_fd_ad_config_cmd *req;
+ struct hclge_desc desc;
+ u64 ad_data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
+ req->index = cpu_to_le32(loc);
+ req->stage = stage;
+
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+ action->write_rule_id_to_bd);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+ action->rule_id);
+ ad_data <<= 32;
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+ action->forward_to_direct_queue);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ action->queue_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+ action->counter_id);
+
+ req->ad_data = cpu_to_le64(ad_data);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u16 tmp_x_s, tmp_y_s;
+ u32 tmp_x_l, tmp_y_l;
+ int i;
+
+ if (rule->unused_tuple & tuple_bit)
+ return true;
+
+ switch (tuple_bit) {
+ case 0:
+ return false;
+ case BIT(INNER_DST_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_SRC_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_VLAN_TAG_FST):
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_ETH_TYPE):
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_IP_TOS):
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+
+ return true;
+ case BIT(INNER_IP_PROTO):
+ calc_x(*key_x, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+ calc_y(*key_y, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+
+ return true;
+ case BIT(INNER_SRC_IP):
+ calc_x(tmp_x_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_DST_IP):
+ calc_x(tmp_x_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_SRC_PORT):
+ calc_x(tmp_x_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ calc_y(tmp_y_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_DST_PORT):
+ calc_x(tmp_x_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ calc_y(tmp_y_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+ u8 vf_id, u8 network_port_id)
+{
+ u32 port_number = 0;
+
+ if (port_type == HOST_PORT) {
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+ pf_id);
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+ vf_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+ } else {
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
+ }
+
+ return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+ __le32 *key_x, __le32 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+ u8 cur_pos = 0, tuple_size, shift_bits;
+ int i;
+
+ for (i = 0; i < MAX_META_DATA; i++) {
+ tuple_size = meta_data_key_info[i].key_length;
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+ switch (tuple_bit) {
+ case BIT(ROCE_TYPE):
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+ cur_pos += tuple_size;
+ break;
+ case BIT(DST_VPORT):
+ port_number = hclge_get_port_number(HOST_PORT, 0,
+ rule->vf_id, 0);
+ hnae3_set_field(meta_data,
+ GENMASK(cur_pos + tuple_size, cur_pos),
+ cur_pos, port_number);
+ cur_pos += tuple_size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+ u8 *cur_key_x, *cur_key_y;
+ int i, ret, tuple_size;
+ u8 meta_data_region;
+
+ memset(key_x, 0, sizeof(key_x));
+ memset(key_y, 0, sizeof(key_y));
+ cur_key_x = key_x;
+ cur_key_y = key_y;
+
+ for (i = 0 ; i < MAX_TUPLE; i++) {
+ bool tuple_valid;
+ u32 check_tuple;
+
+ tuple_size = tuple_key_info[i].key_length / 8;
+ check_tuple = key_cfg->tuple_active & BIT(i);
+
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ cur_key_y, rule);
+ if (tuple_valid) {
+ cur_key_x += tuple_size;
+ cur_key_y += tuple_size;
+ }
+ }
+
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+ MAX_META_DATA_LENGTH / 8;
+
+ hclge_fd_convert_meta_data(key_cfg,
+ (__le32 *)(key_x + meta_data_region),
+ (__le32 *)(key_y + meta_data_region),
+ rule);
+
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "fd key_y config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+ true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "fd key_x config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_ad_data ad_data;
+
+ ad_data.ad_id = rule->location;
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ ad_data.drop_packet = true;
+ ad_data.forward_to_direct_queue = false;
+ ad_data.queue_id = 0;
+ } else {
+ ad_data.drop_packet = false;
+ ad_data.forward_to_direct_queue = true;
+ ad_data.queue_id = rule->queue_id;
+ }
+
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+
+ ad_data.use_next_stage = false;
+ ad_data.next_input_key = 0;
+
+ ad_data.write_rule_id_to_bd = true;
+ ad_data.rule_id = rule->location;
+
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs, u32 *unused)
+{
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
+ struct ethhdr *ether_spec;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!(fs->flow_type & hdev->fd_cfg.proto_support))
+ return -EOPNOTSUPP;
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ if (!tcp_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip4_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip4_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (!tcp_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fs->h_u.usr_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ if (!usr_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ if (!usr_ip4_spec->proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
+
+ if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
+ !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
+ !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip6_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip6_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fs->h_u.usr_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
+ BIT(INNER_DST_PORT);
+
+ if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
+ !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
+ !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip6_spec->l4_proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ break;
+ case ETHER_FLOW:
+ ether_spec = &fs->h_u.ether_spec;
+ *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(ether_spec->h_source))
+ *unused |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(ether_spec->h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+
+ if (!ether_spec->h_proto)
+ *unused |= BIT(INNER_ETH_TYPE);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->h_ext.vlan_etype)
+ return -EOPNOTSUPP;
+ if (!fs->h_ext.vlan_tci)
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ } else {
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ return -EOPNOTSUPP;
+
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+ else
+ *unused &= ~(BIT(INNER_DST_MAC));
+ }
+
+ return 0;
+}
+
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ }
+
+ return rule && rule->location == location;
+}
+
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
+ struct hclge_fd_rule *new_rule,
+ u16 location,
+ bool is_add)
+{
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
+ struct hlist_node *node2;
+
+ if (is_add && !new_rule)
+ return -EINVAL;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->location == location) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+
+ if (!is_add)
+ return 0;
+
+ } else if (!is_add) {
+ dev_err(&hdev->pdev->dev,
+ "delete fail, rule %d is inexistent\n",
+ location);
+ return -EINVAL;
+ }
+
+ INIT_HLIST_NODE(&new_rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+
+ hdev->hclge_fd_rule_num++;
+
+ return 0;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IP_USER_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst, 4);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IPV6_USER_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst, 4);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(rule->tuples.src_mac,
+ fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac,
+ fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac,
+ fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac,
+ fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto =
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto =
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_SCTP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_TCP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_UDP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ if (!hdev->fd_cfg.fd_en) {
+ dev_warn(&hdev->pdev->dev,
+ "Please enable flow director first\n");
+ return -EOPNOTSUPP;
+ }
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ return ret;
+ }
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u16 tqps;
+
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%d) > max tqp num (%d)\n",
+ ring, tqps - 1);
+ return -EINVAL;
+ }
+
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%d) > max vf num (%d)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+ q_index = ring;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
+ if (ret)
+ goto free_rule;
+
+ rule->flow_type = fs->flow_type;
+
+ rule->location = fs->location;
+ rule->unused_tuple = unused;
+ rule->vf_id = dst_vport_id;
+ rule->queue_id = q_index;
+ rule->action = action;
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
+ if (ret)
+ goto free_rule;
+
+ return ret;
+
+free_rule:
+ kfree(rule);
+ return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ dev_err(&hdev->pdev->dev,
+ "Delete fail, rule %d is inexistent\n",
+ fs->location);
+ return -ENOENT;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ fs->location, NULL, false);
+ if (ret)
+ return ret;
+
+ return hclge_fd_update_rule_list(hdev, NULL, fs->location,
+ false);
+}
+
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+ bool clear_list)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return;
+
+ if (clear_list) {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ } else {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node)
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ }
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (!ret)
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "Restore rule %d failed, remove it\n",
+ rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ }
+ return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ return 0;
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ struct hlist_node *node2;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= fs->location)
+ break;
+ }
+
+ if (!rule || fs->location != rule->location)
+ return -ENOENT;
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip4_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip4_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.tcp_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ break;
+ case IP_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.usr_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.usr_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.usr_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip4_spec.proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip6_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip6_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ break;
+ case IPV6_USER_FLOW:
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip6_spec.l4_proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(fs->h_u.ether_spec.h_source,
+ rule->tuples.src_mac);
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_source);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_source,
+ rule->tuples_mask.src_mac);
+
+ ether_addr_copy(fs->h_u.ether_spec.h_dest,
+ rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+
+ fs->h_u.ether_spec.h_proto =
+ cpu_to_be16(rule->tuples.ether_proto);
+ fs->m_u.ether_spec.h_proto =
+ rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+
+ return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hdev->fd_cfg.fd_en = enable;
+ if (!enable)
+ hclge_del_all_fd_entries(handle, false);
+ else
+ hclge_restore_fd_entries(handle);
+}
+
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
@@ -3639,7 +4632,7 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
-static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -3659,6 +4652,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3673,22 +4668,37 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
int ret, i = 0;
+ u8 loop_mode_b;
- req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+ req = (struct hclge_serdes_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+ switch (loop_mode) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ break;
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported serdes loopback mode %d\n", loop_mode);
+ return -ENOTSUPP;
+ }
+
if (en) {
- req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->enable = loop_mode_b;
+ req->mask = loop_mode_b;
} else {
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->mask = loop_mode_b;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3719,33 +4729,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
return -EIO;
}
+ hclge_cfg_mac_mode(hdev, en);
return 0;
}
-static int hclge_set_loopback(struct hnae3_handle *handle,
- enum hnae3_loop loop_mode, bool en)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- int ret;
-
- switch (loop_mode) {
- case HNAE3_MAC_INTER_LOOP_MAC:
- ret = hclge_set_mac_loopback(hdev, en);
- break;
- case HNAE3_MAC_INTER_LOOP_SERDES:
- ret = hclge_set_serdes_loopback(hdev, en);
- break;
- default:
- ret = -ENOTSUPP;
- dev_err(&hdev->pdev->dev,
- "loop_mode %d is not supported\n", loop_mode);
- break;
- }
-
- return ret;
-}
-
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
int stream_id, bool enable)
{
@@ -3766,6 +4753,37 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
return ret;
}
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int i, ret;
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_APP:
+ ret = hclge_set_app_loopback(hdev, en);
+ break;
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+ "loop_mode %d is not supported\n", loop_mode);
+ break;
+ }
+
+ for (i = 0; i < vport->alloc_tqps; i++) {
+ ret = hclge_tqp_enable(hdev, i, 0, en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -3809,6 +4827,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back;
int i;
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
@@ -3950,174 +4970,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
-{
- u16 high_val = addr[1] | (addr[0] << 8);
- struct hclge_dev *hdev = vport->back;
- u32 rsh = 4 - hdev->mta_mac_sel_type;
- u16 ret_val = (high_val >> rsh) & 0xfff;
-
- return ret_val;
-}
-
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable)
-{
- struct hclge_mta_filter_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
-
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable)
-{
- struct hclge_cfg_func_mta_filter_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
-
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
- req->function_id = func_id;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config func_id enable failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
- u16 idx,
- bool enable)
-{
- struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item_cmd *req;
- struct hclge_desc desc;
- u16 item_idx = 0;
- int ret;
-
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
-
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(item_idx);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mta table item failed for cmd_send, ret =%d.\n",
- ret);
- return ret;
- }
-
- if (enable)
- set_bit(idx, vport->mta_shadow);
- else
- clear_bit(idx, vport->mta_shadow);
-
- return 0;
-}
-
-static int hclge_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct net_device *netdev = handle->kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update mta_status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclge_update_mta_status_common(vport, mta_status,
- 0, HCLGE_MTA_TBL_SIZE, true);
-}
-
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter)
-{
- struct hclge_dev *hdev = vport->back;
- u16 update_max = idx + count;
- u16 check_max;
- int ret = 0;
- bool used;
- u16 i;
-
- /* setup mta check range */
- if (update_filter) {
- i = 0;
- check_max = HCLGE_MTA_TBL_SIZE;
- } else {
- i = idx;
- check_max = update_max;
- }
-
- used = false;
- /* check and update all mta item */
- for (; i < check_max; i++) {
- /* ignore unused item */
- if (!test_bit(i, vport->mta_shadow))
- continue;
-
- /* if i in update range then update it */
- if (i >= idx && i < update_max)
- if (!test_bit(i - idx, status))
- hclge_set_mta_table_item(vport, i, false);
-
- if (!used && test_bit(i, vport->mta_shadow))
- used = true;
- }
-
- /* no longer use mta, disable it */
- if (vport->accept_mta_mc && update_filter && !used) {
- ret = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "disable func mta filter fail ret=%d\n",
- ret);
- else
- vport->accept_mta_mc = false;
- }
-
- return ret;
-}
-
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -4241,6 +5093,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "Alloc umv space failed, want %d, get %d\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ mutex_init(&hdev->umv_mutex);
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+
+ return 0;
+}
+
+static int hclge_uninit_umv_space(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->max_umv_size > 0) {
+ ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
+ false);
+ if (ret)
+ return ret;
+ hdev->max_umv_size = 0;
+ }
+ mutex_destroy(&hdev->umv_mutex);
+
+ return 0;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "%s umv space failed for cmd_send, ret =%d\n",
+ is_alloc ? "allocate" : "free", ret);
+ return ret;
+ }
+
+ if (is_alloc && allocated_size)
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->umv_mutex);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+ mutex_unlock(&hdev->umv_mutex);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ mutex_lock(&hdev->umv_mutex);
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+ mutex_unlock(&hdev->umv_mutex);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->umv_mutex);
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+ mutex_unlock(&hdev->umv_mutex);
+}
+
static int hclge_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
@@ -4286,8 +5250,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
* is not allowed in the mac vlan table.
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
- if (ret == -ENOENT)
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (ret == -ENOENT) {
+ if (!hclge_is_umv_space_full(vport)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ return ret;
+ }
+
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
/* check if we just hit the duplicate */
if (!ret)
@@ -4330,6 +5305,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret)
+ hclge_update_umv_space(vport, true);
return ret;
}
@@ -4348,7 +5325,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
- u16 tbl_idx;
int status;
/* mac addr check */
@@ -4362,7 +5338,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4378,25 +5354,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* If mc mac vlan table is full, use MTA table */
- if (status == -ENOSPC) {
- if (!vport->accept_mta_mc) {
- status = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- true);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n",
- status);
- return status;
- }
- vport->accept_mta_mc = true;
- }
-
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
- }
+ if (status == -ENOSPC)
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
}
@@ -4429,7 +5388,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4598,8 +5557,20 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return 0;
}
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
+ int cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hdev->hw.mac.phydev)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
+}
+
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
- bool filter_en)
+ u8 fe_type, bool filter_en)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
@@ -4609,7 +5580,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en;
+ req->vlan_fe = filter_en ? fe_type : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -4621,13 +5592,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+ if (hdev->pdev->revision >= 0x21) {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable);
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, enable);
+ } else {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+ }
+ if (enable)
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ else
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -4686,9 +5678,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
"Add vf vlan filter fail, ret =%d.\n",
req0->resp_code);
} else {
+#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
if (!req0->resp_code)
return 0;
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
+ dev_warn(&hdev->pdev->dev,
+ "vlan %d filter is not in vf vlan table\n",
+ vlan);
+ return 0;
+ }
+
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
req0->resp_code);
@@ -4732,6 +5732,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_idx, vport_num = 0;
int ret;
+ if (is_kill && !vlan_id)
+ return 0;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
0, proto);
if (ret) {
@@ -4761,7 +5764,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return -EINVAL;
}
- for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
@@ -4896,7 +5899,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
- tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
@@ -4913,18 +5916,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_DEF_VLAN_TYPE 0x8100
- struct hnae3_handle *handle;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
- if (ret)
- return ret;
+ if (hdev->pdev->revision >= 0x21) {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, true);
+ if (ret)
+ return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
- if (ret)
- return ret;
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true);
+ if (ret)
+ return ret;
+ }
+
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -4970,7 +5985,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle = &hdev->vport[0].nic;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -5187,20 +6201,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
-static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
- u32 *flowctrl_adv)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
-
- if (!phydev)
- return;
-
- *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
- (phydev->advertising & ADVERTISED_Asym_Pause);
-}
-
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -5208,13 +6208,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
if (!phydev)
return;
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-
- if (rx_en)
- phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- if (tx_en)
- phydev->advertising ^= ADVERTISED_Asym_Pause;
+ phy_set_asym_pause(phydev, rx_en, tx_en);
}
static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
@@ -5256,11 +6250,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- if (phydev->advertising & ADVERTISED_Pause)
- local_advertising = ADVERTISE_PAUSE_CAP;
-
- if (phydev->advertising & ADVERTISED_Asym_Pause)
- local_advertising |= ADVERTISE_PAUSE_ASYM;
+ local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -5444,26 +6434,31 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
- return ret;
+ goto clear_nic;
ret = hclge_init_instance_hw(hdev);
if (ret) {
client->ops->uninit_instance(&vport->nic,
0);
- return ret;
+ goto clear_nic;
}
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclge_init_roce_base_info(vport);
if (ret)
- return ret;
+ goto clear_roce;
ret = rc->ops->init_instance(&vport->roce);
if (ret)
- return ret;
+ goto clear_roce;
+
+ hnae3_set_client_init_flag(hdev->roce_client,
+ ae_dev, 1);
}
break;
@@ -5473,7 +6468,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&vport->nic);
if (ret)
- return ret;
+ goto clear_nic;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
break;
case HNAE3_CLIENT_ROCE:
@@ -5485,16 +6482,31 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
- return ret;
+ goto clear_roce;
ret = client->ops->init_instance(&vport->roce);
if (ret)
- return ret;
+ goto clear_roce;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
}
+
+ break;
+ default:
+ return -EINVAL;
}
}
return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ return ret;
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
@@ -5514,7 +6526,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
}
if (client->type == HNAE3_CLIENT_ROCE)
return;
- if (client->ops->uninit_instance) {
+ if (hdev->nic_client && client->ops->uninit_instance) {
hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
@@ -5697,6 +6709,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
}
+ ret = hclge_init_umv_space(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5734,6 +6752,20 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_hw_error_set_state(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "hw error interrupts enable failed, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -5810,6 +6842,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_umv_space(hdev);
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5840,6 +6874,19 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Re-enable the TM hw error interrupts because
+ * they get disabled on core/global reset.
+ */
+ if (hclge_enable_tm_hw_error(hdev, true))
+ dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -5856,10 +6903,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
+ hclge_uninit_umv_space(hdev);
+
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ hclge_hw_error_set_state(hdev, false);
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -5887,18 +6937,12 @@ static void hclge_get_channels(struct hnae3_handle *handle,
}
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
- u16 *free_tqps, u16 *max_rss_size)
+ u16 *alloc_tqps, u16 *max_rss_size)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u16 temp_tqps = 0;
- int i;
- for (i = 0; i < hdev->num_tqps; i++) {
- if (!hdev->htqp[i].alloced)
- temp_tqps++;
- }
- *free_tqps = temp_tqps;
+ *alloc_tqps = vport->alloc_tqps;
*max_rss_size = hdev->rss_size_max;
}
@@ -6228,27 +7272,6 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
-static void hclge_get_port_type(struct hnae3_handle *handle,
- u8 *port_type)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u8 media_type = hdev->hw.mac.media_type;
-
- switch (media_type) {
- case HNAE3_MEDIA_TYPE_FIBER:
- *port_type = PORT_FIBRE;
- break;
- case HNAE3_MEDIA_TYPE_COPPER:
- *port_type = PORT_TP;
- break;
- case HNAE3_MEDIA_TYPE_UNKNOWN:
- default:
- *port_type = PORT_OTHER;
- break;
- }
-}
-
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -6276,11 +7299,11 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_tc_size = hclge_get_tc_size,
.get_mac_addr = hclge_get_mac_addr,
.set_mac_addr = hclge_set_mac_addr,
+ .do_ioctl = hclge_do_ioctl,
.add_uc_addr = hclge_add_uc_addr,
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
- .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
@@ -6301,12 +7324,19 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
- .get_flowctrl_adv = hclge_get_flowctrl_adv,
.get_regs_len = hclge_get_regs_len,
.get_regs = hclge_get_regs,
.set_led_id = hclge_set_led_id,
.get_link_mode = hclge_get_link_mode,
- .get_port_type = hclge_get_port_type,
+ .add_fd_entry = hclge_add_fd_entry,
+ .del_fd_entry = hclge_del_fd_entry,
+ .del_all_fd_entries = hclge_del_all_fd_entries,
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+ .get_fd_rule_info = hclge_get_fd_rule_info,
+ .get_fd_all_rules = hclge_get_all_rules,
+ .restore_fd_rules = hclge_restore_fd_entries,
+ .enable_fd = hclge_enable_fd,
+ .process_hw_error = hclge_process_ras_hw_error,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 1528fb3fa6be..e3dfd654eca9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -14,6 +14,8 @@
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
+#define HCLGE_MAX_PF_NUM 8
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -53,7 +55,9 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
-#define HCLGE_MTA_TBL_SIZE 4096
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_TQP_RESET_TRY_TIMES 10
@@ -79,6 +83,19 @@
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
+enum HLCGE_PORT_TYPE {
+ HOST_PORT,
+ NETWORK_PORT
+};
+
+#define HCLGE_PF_ID_S 0
+#define HCLGE_PF_ID_M GENMASK(2, 0)
+#define HCLGE_VF_ID_S 3
+#define HCLGE_VF_ID_M GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B 11
+#define HCLGE_NETWORK_PORT_ID_S 0
+#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
@@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
-enum hclge_mta_dmac_sel_type {
- HCLGE_MAC_ADDR_47_36,
- HCLGE_MAC_ADDR_46_35,
- HCLGE_MAC_ADDR_45_34,
- HCLGE_MAC_ADDR_44_33,
-};
-
struct hclge_mac {
u8 phy_addr;
u8 flag;
@@ -238,6 +248,7 @@ struct hclge_cfg {
u8 default_speed;
u32 numa_node_map;
u8 speed_ability;
+ u16 umv_space;
};
struct hclge_tm_info {
@@ -256,109 +267,6 @@ struct hclge_comm_stats_str {
unsigned long offset;
};
-/* all 64bit stats, opcode id: 0x0030 */
-struct hclge_64_bit_stats {
- /* query_igu_stat */
- u64 igu_rx_oversize_pkt;
- u64 igu_rx_undersize_pkt;
- u64 igu_rx_out_all_pkt;
- u64 igu_rx_uni_pkt;
- u64 igu_rx_multi_pkt;
- u64 igu_rx_broad_pkt;
- u64 rsv0;
-
- /* query_egu_stat */
- u64 egu_tx_out_all_pkt;
- u64 egu_tx_uni_pkt;
- u64 egu_tx_multi_pkt;
- u64 egu_tx_broad_pkt;
-
- /* ssu_ppp packet stats */
- u64 ssu_ppp_mac_key_num;
- u64 ssu_ppp_host_key_num;
- u64 ppp_ssu_mac_rlt_num;
- u64 ppp_ssu_host_rlt_num;
-
- /* ssu_tx_in_out_dfx_stats */
- u64 ssu_tx_in_num;
- u64 ssu_tx_out_num;
- /* ssu_rx_in_out_dfx_stats */
- u64 ssu_rx_in_num;
- u64 ssu_rx_out_num;
-};
-
-/* all 32bit stats, opcode id: 0x0031 */
-struct hclge_32_bit_stats {
- u64 igu_rx_err_pkt;
- u64 igu_rx_no_eof_pkt;
- u64 igu_rx_no_sof_pkt;
- u64 egu_tx_1588_pkt;
- u64 egu_tx_err_pkt;
- u64 ssu_full_drop_num;
- u64 ssu_part_drop_num;
- u64 ppp_key_drop_num;
- u64 ppp_rlt_drop_num;
- u64 ssu_key_drop_num;
- u64 pkt_curr_buf_cnt;
- u64 qcn_fb_rcv_cnt;
- u64 qcn_fb_drop_cnt;
- u64 qcn_fb_invaild_cnt;
- u64 rsv0;
- u64 rx_packet_tc0_in_cnt;
- u64 rx_packet_tc1_in_cnt;
- u64 rx_packet_tc2_in_cnt;
- u64 rx_packet_tc3_in_cnt;
- u64 rx_packet_tc4_in_cnt;
- u64 rx_packet_tc5_in_cnt;
- u64 rx_packet_tc6_in_cnt;
- u64 rx_packet_tc7_in_cnt;
- u64 rx_packet_tc0_out_cnt;
- u64 rx_packet_tc1_out_cnt;
- u64 rx_packet_tc2_out_cnt;
- u64 rx_packet_tc3_out_cnt;
- u64 rx_packet_tc4_out_cnt;
- u64 rx_packet_tc5_out_cnt;
- u64 rx_packet_tc6_out_cnt;
- u64 rx_packet_tc7_out_cnt;
-
- /* Tx packet level statistics */
- u64 tx_packet_tc0_in_cnt;
- u64 tx_packet_tc1_in_cnt;
- u64 tx_packet_tc2_in_cnt;
- u64 tx_packet_tc3_in_cnt;
- u64 tx_packet_tc4_in_cnt;
- u64 tx_packet_tc5_in_cnt;
- u64 tx_packet_tc6_in_cnt;
- u64 tx_packet_tc7_in_cnt;
- u64 tx_packet_tc0_out_cnt;
- u64 tx_packet_tc1_out_cnt;
- u64 tx_packet_tc2_out_cnt;
- u64 tx_packet_tc3_out_cnt;
- u64 tx_packet_tc4_out_cnt;
- u64 tx_packet_tc5_out_cnt;
- u64 tx_packet_tc6_out_cnt;
- u64 tx_packet_tc7_out_cnt;
-
- /* packet buffer statistics */
- u64 pkt_curr_buf_tc0_cnt;
- u64 pkt_curr_buf_tc1_cnt;
- u64 pkt_curr_buf_tc2_cnt;
- u64 pkt_curr_buf_tc3_cnt;
- u64 pkt_curr_buf_tc4_cnt;
- u64 pkt_curr_buf_tc5_cnt;
- u64 pkt_curr_buf_tc6_cnt;
- u64 pkt_curr_buf_tc7_cnt;
-
- u64 mb_uncopy_num;
- u64 lo_pri_unicast_rlt_drop_num;
- u64 hi_pri_multicast_rlt_drop_num;
- u64 lo_pri_multicast_rlt_drop_num;
- u64 rx_oq_drop_pkt_cnt;
- u64 tx_oq_drop_pkt_cnt;
- u64 nic_l2_err_drop_pkt_cnt;
- u64 roc_l2_err_drop_pkt_cnt;
-};
-
/* mac stats ,opcode id: 0x0032 */
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
@@ -450,8 +358,6 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
- struct hclge_64_bit_stats all_64_bit_stats;
- struct hclge_32_bit_stats all_32_bit_stats;
u32 stats_timer;
};
@@ -464,6 +370,221 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+enum HCLGE_FD_MODE {
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+ HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+ HCLGE_FD_KEY_BASE_ON_PTYPE,
+ HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+ HCLGE_FD_STAGE_1,
+ HCLGE_FD_STAGE_2,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ * tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+ OUTER_DST_MAC,
+ OUTER_SRC_MAC,
+ OUTER_VLAN_TAG_FST,
+ OUTER_VLAN_TAG_SEC,
+ OUTER_ETH_TYPE,
+ OUTER_L2_RSV,
+ OUTER_IP_TOS,
+ OUTER_IP_PROTO,
+ OUTER_SRC_IP,
+ OUTER_DST_IP,
+ OUTER_L3_RSV,
+ OUTER_SRC_PORT,
+ OUTER_DST_PORT,
+ OUTER_L4_RSV,
+ OUTER_TUN_VNI,
+ OUTER_TUN_FLOW_ID,
+ INNER_DST_MAC,
+ INNER_SRC_MAC,
+ INNER_VLAN_TAG_FST,
+ INNER_VLAN_TAG_SEC,
+ INNER_ETH_TYPE,
+ INNER_L2_RSV,
+ INNER_IP_TOS,
+ INNER_IP_PROTO,
+ INNER_SRC_IP,
+ INNER_DST_IP,
+ INNER_L3_RSV,
+ INNER_SRC_PORT,
+ INNER_DST_PORT,
+ INNER_L4_RSV,
+ MAX_TUPLE,
+};
+
+enum HCLGE_FD_META_DATA {
+ PACKET_TYPE_ID,
+ IP_FRAGEMENT,
+ ROCE_TYPE,
+ NEXT_KEY,
+ VLAN_NUMBER,
+ SRC_VPORT,
+ DST_VPORT,
+ TUNNEL_PACKET,
+ MAX_META_DATA,
+};
+
+struct key_info {
+ u8 key_type;
+ u8 key_length;
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH 32
+
+enum HCLGE_FD_PACKET_TYPE {
+ NIC_PACKET,
+ ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+ HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_DROP_PACKET,
+};
+
+struct hclge_fd_key_cfg {
+ u8 key_sel;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u32 tuple_active;
+ u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+ u8 fd_mode;
+ u8 fd_en;
+ u16 max_key_length;
+ u32 proto_support;
+ u32 rule_num[2]; /* rule entry number */
+ u16 cnt_num[2]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[2];
+};
+
+struct hclge_fd_rule_tuples {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ u32 src_ip[4];
+ u32 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_tag1;
+ u16 ether_proto;
+ u8 ip_tos;
+ u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+ struct hlist_node rule_node;
+ struct hclge_fd_rule_tuples tuples;
+ struct hclge_fd_rule_tuples tuples_mask;
+ u32 unused_tuple;
+ u32 flow_type;
+ u8 action;
+ u16 vf_id;
+ u16 queue_id;
+ u16 location;
+};
+
+struct hclge_fd_ad_data {
+ u16 ad_id;
+ u8 drop_packet;
+ u8 forward_to_direct_queue;
+ u16 queue_id;
+ u8 use_counter;
+ u8 counter_id;
+ u8 use_next_stage;
+ u8 write_rule_id_to_bd;
+ u8 next_input_key;
+ u16 rule_id;
+};
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y | search value |
+ * ----------------------------------
+ * | 0 | 0 | always hit |
+ * ----------------------------------
+ * | 1 | 0 | match '0' |
+ * ----------------------------------
+ * | 0 | 1 | match '1' |
+ * ----------------------------------
+ * | 1 | 1 | invalid |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ * x = (~k) & v
+ * y = (k ^ ~v) & k
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) \
+ do { \
+ const typeof(k) _k_ = (k); \
+ const typeof(v) _v_ = (v); \
+ (y) = (_k_ ^ ~_v_) & (_k_); \
+ } while (0)
+
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
@@ -547,12 +668,22 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
- enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Multicast filter enable */
-
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ struct hclge_fd_cfg fd_cfg;
+ struct hlist_head fd_rule_list;
+ u16 hclge_fd_rule_num;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+ struct mutex umv_mutex; /* protect share_umv_size */
};
/* VPort level vlan tag configuration for TX direction */
@@ -605,13 +736,12 @@ struct hclge_vport {
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
+ u16 used_umv_num;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
-
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -626,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable);
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter);
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f34851c91eb3..04462a347a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
- u8 *msg, u8 idx, bool is_end)
-{
-#define HCLGE_MTA_STATUS_MSG_SIZE 13
-#define HCLGE_MTA_STATUS_MSG_BITS \
- (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGE_MTA_STATUS_MSG_END_BITS \
- (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
- unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_ofs;
- u8 msg_bit;
-
- tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
- HCLGE_MTA_STATUS_MSG_BITS;
-
- /* set msg field */
- msg_ofs = 0;
- msg_bit = 0;
- memset(status, 0, sizeof(status));
- for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
- if (msg[msg_ofs] & BIT(msg_bit))
- set_bit(tbl_idx, status);
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- return hclge_update_mta_status_common(vport,
- status, idx * HCLGE_MTA_STATUS_MSG_BITS,
- tbl_cnt, is_end);
-}
-
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
status = hclge_add_mc_addr_common(vport, mac_addr);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
- u8 func_id = vport->vport_id;
- bool enable = mbx_req->msg[2];
-
- status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
- resp_data = hdev->mta_mac_sel_type;
- resp_len = sizeof(u8);
- gen_resp = true;
- status = 0;
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
- /* mta status update msg format
- * msg[2.6 : 2.0] msg index
- * msg[2.7] msg is end
- * msg[15 : 3] mta status bits[103 : 0]
- */
- bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
-
- status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
- mbx_req->msg[2] & 0x7F,
- is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 398971a062f4..24b1f2a0c32a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -10,8 +10,6 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
- SUPPORTED_Pause | \
- SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
}
phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 5db70a1451c5..aa5cb9834d73 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -172,7 +172,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap)
{
struct hclge_desc desc;
- struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
@@ -188,11 +188,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
ether_addr_copy(pause_param->mac_addr, addr);
+ ether_addr_copy(pause_param->mac_addr_extra, addr);
pause_param->pause_trans_gap = pause_trans_gap;
pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
@@ -207,7 +208,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
u8 trans_gap;
int ret;
- pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -297,7 +298,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
}
static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
- u8 q_id, u16 qs_id)
+ u16 q_id, u16 qs_id)
{
struct hclge_nq_to_qs_link_cmd *map;
struct hclge_desc desc;
@@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
return 0;
}
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ if (num_tc > hdev->vport[i].alloc_tqps)
+ return -EINVAL;
+ }
+
hdev->tm_info.num_tc = num_tc;
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hdev->hw_tc_map = bit_map;
hclge_tm_schd_info_init(hdev);
+
+ return 0;
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index dd4c194747c1..25eef13a3e14 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd {
u8 pause_trans_gap;
u8 rsvd;
__le16 pause_trans_time;
+ u8 rsvd1[6];
+ /* extra mac address to do double check for pause frame */
+ u8 mac_addr_extra[ETH_ALEN];
+ u16 rsvd2;
};
struct hclge_pfc_stats_cmd {
@@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);