summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2187
1 files changed, 1164 insertions, 1023 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 24f7afacae02..cf8abbe01840 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -13,19 +14,23 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
-#include <net/ipv6.h>
+
#include <net/rtnetlink.h>
+
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
+#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
#include "hclge_comm_cmd.h"
+#include "hclge_trace.h"
+
#define HCLGE_NAME "hclge"
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -40,20 +45,6 @@
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT 1500
-/* Get DFX BD number offset */
-#define HCLGE_DFX_BIOS_BD_OFFSET 1
-#define HCLGE_DFX_SSU_0_BD_OFFSET 2
-#define HCLGE_DFX_SSU_1_BD_OFFSET 3
-#define HCLGE_DFX_IGU_BD_OFFSET 4
-#define HCLGE_DFX_RPU_0_BD_OFFSET 5
-#define HCLGE_DFX_RPU_1_BD_OFFSET 6
-#define HCLGE_DFX_NCSI_BD_OFFSET 7
-#define HCLGE_DFX_RTC_BD_OFFSET 8
-#define HCLGE_DFX_PPP_BD_OFFSET 9
-#define HCLGE_DFX_RCB_BD_OFFSET 10
-#define HCLGE_DFX_TQP_BD_OFFSET 11
-#define HCLGE_DFX_SSU_2_BD_OFFSET 12
-
#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
@@ -71,6 +62,10 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
+static int hclge_update_port_info(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -91,67 +86,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CSQ_DEPTH_REG,
- HCLGE_COMM_NIC_CSQ_TAIL_REG,
- HCLGE_COMM_NIC_CSQ_HEAD_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CRQ_DEPTH_REG,
- HCLGE_COMM_NIC_CRQ_TAIL_REG,
- HCLGE_COMM_NIC_CRQ_HEAD_REG,
- HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
- HCLGE_COMM_CMDQ_INTR_STS_REG,
- HCLGE_COMM_CMDQ_INTR_EN_REG,
- HCLGE_COMM_CMDQ_INTR_GEN_REG};
-
-static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
- HCLGE_PF_OTHER_INT_REG,
- HCLGE_MISC_RESET_STS_REG,
- HCLGE_MISC_VECTOR_INT_STS,
- HCLGE_GLOBAL_RESET_REG,
- HCLGE_FUN_RST_ING,
- HCLGE_GRO_EN_REG};
-
-static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
- HCLGE_RING_RX_ADDR_H_REG,
- HCLGE_RING_RX_BD_NUM_REG,
- HCLGE_RING_RX_BD_LENGTH_REG,
- HCLGE_RING_RX_MERGE_EN_REG,
- HCLGE_RING_RX_TAIL_REG,
- HCLGE_RING_RX_HEAD_REG,
- HCLGE_RING_RX_FBD_NUM_REG,
- HCLGE_RING_RX_OFFSET_REG,
- HCLGE_RING_RX_FBD_OFFSET_REG,
- HCLGE_RING_RX_STASH_REG,
- HCLGE_RING_RX_BD_ERR_REG,
- HCLGE_RING_TX_ADDR_L_REG,
- HCLGE_RING_TX_ADDR_H_REG,
- HCLGE_RING_TX_BD_NUM_REG,
- HCLGE_RING_TX_PRIORITY_REG,
- HCLGE_RING_TX_TC_REG,
- HCLGE_RING_TX_MERGE_EN_REG,
- HCLGE_RING_TX_TAIL_REG,
- HCLGE_RING_TX_HEAD_REG,
- HCLGE_RING_TX_FBD_NUM_REG,
- HCLGE_RING_TX_OFFSET_REG,
- HCLGE_RING_TX_EBD_NUM_REG,
- HCLGE_RING_TX_EBD_OFFSET_REG,
- HCLGE_RING_TX_BD_ERR_REG,
- HCLGE_RING_EN_REG};
-
-static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
- HCLGE_TQP_INTR_GL0_REG,
- HCLGE_TQP_INTR_GL1_REG,
- HCLGE_TQP_INTR_GL2_REG,
- HCLGE_TQP_INTR_RL_REG};
-
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -371,36 +311,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u32 hclge_dfx_bd_offset_list[] = {
- HCLGE_DFX_BIOS_BD_OFFSET,
- HCLGE_DFX_SSU_0_BD_OFFSET,
- HCLGE_DFX_SSU_1_BD_OFFSET,
- HCLGE_DFX_IGU_BD_OFFSET,
- HCLGE_DFX_RPU_0_BD_OFFSET,
- HCLGE_DFX_RPU_1_BD_OFFSET,
- HCLGE_DFX_NCSI_BD_OFFSET,
- HCLGE_DFX_RTC_BD_OFFSET,
- HCLGE_DFX_PPP_BD_OFFSET,
- HCLGE_DFX_RCB_BD_OFFSET,
- HCLGE_DFX_TQP_BD_OFFSET,
- HCLGE_DFX_SSU_2_BD_OFFSET
-};
-
-static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
- HCLGE_OPC_DFX_BIOS_COMMON_REG,
- HCLGE_OPC_DFX_SSU_REG_0,
- HCLGE_OPC_DFX_SSU_REG_1,
- HCLGE_OPC_DFX_IGU_EGU_REG,
- HCLGE_OPC_DFX_RPU_REG_0,
- HCLGE_OPC_DFX_RPU_REG_1,
- HCLGE_OPC_DFX_NCSI_REG,
- HCLGE_OPC_DFX_RTC_REG,
- HCLGE_OPC_DFX_PPP_REG,
- HCLGE_OPC_DFX_RCB_REG,
- HCLGE_OPC_DFX_TQP_REG,
- HCLGE_OPC_DFX_SSU_REG_2
-};
-
static const struct key_info meta_data_key_info[] = {
{ PACKET_TYPE_ID, 6 },
{ IP_FRAGEMENT, 1 },
@@ -485,6 +395,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_pf_cmd_send(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_pf_cmd_get(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
+ .trace_cmd_send = hclge_trace_cmd_send,
+ .trace_cmd_get = hclge_trace_cmd_get,
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -538,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -630,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -643,25 +595,21 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
return buf;
}
-static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
- const struct hclge_comm_stats_str strs[],
- int size, u8 *data)
+static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 **data)
{
- char *buff = (char *)data;
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
- return buff;
+ return;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
continue;
- snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
- buff = buff + ETH_GSTRING_LEN;
+ ethtool_puts(data, strs[i].desc);
}
-
- return (u8 *)buff;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -679,14 +627,15 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
}
-static void hclge_update_stats(struct hnae3_handle *handle,
- struct net_device_stats *net_stats)
+static void hclge_update_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -715,7 +664,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +687,16 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
- handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ if (hdev->ae_dev->dev_specs.hilink_version !=
+ HCLGE_HILINK_H60) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ }
+
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -757,39 +714,38 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
}
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
- u8 *data)
+ u8 **data)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 *p = (char *)data;
+ const char *str;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
- size, p);
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
+ size, data);
+ hclge_comm_tqps_get_strings(handle, data);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL];
+ ethtool_puts(data, str);
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_APP];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PHY];
+ ethtool_puts(data, str);
}
}
}
@@ -811,7 +767,7 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hclge_update_stats(handle, NULL);
+ hclge_update_stats(handle);
mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
@@ -966,9 +922,9 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
- {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
- {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
- {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
@@ -1003,132 +959,161 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
+ {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+};
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_lr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(
- ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_cr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_kr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1217,13 +1202,13 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
static u32 hclge_get_max_speed(u16 speed_ability)
{
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_200G_BITS)
return HCLGE_MAC_SPEED_200G;
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_100G_BITS)
return HCLGE_MAC_SPEED_100G;
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_50G_BITS)
return HCLGE_MAC_SPEED_50G;
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
@@ -1388,6 +1373,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+ ae_dev->dev_specs.tnl_num = 0;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1411,6 +1397,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
+ ae_dev->dev_specs.tnl_num = req1->tnl_num;
+ ae_dev->dev_specs.hilink_version = req1->hilink_version;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1546,9 +1534,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- int node, ret;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1575,7 +1562,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1586,6 +1573,9 @@ static int hclge_configure(struct hclge_dev *hdev)
cfg.default_speed, ret);
return ret;
}
+ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
+ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
+ hdev->hw.mac.req_duplex = DUPLEX_FULL;
hclge_parse_link_mode(hdev, cfg.speed_ability);
@@ -1594,13 +1584,6 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
- /* Set the affinity based on numa node */
- node = dev_to_node(&hdev->pdev->dev);
- if (node != NUMA_NO_NODE)
- cpumask = cpumask_of_node(node);
-
- cpumask_copy(&hdev->affinity_mask, cpumask);
-
return ret;
}
@@ -1625,7 +1608,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -1643,6 +1626,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -1676,6 +1660,14 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -1813,7 +1805,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
@@ -1863,6 +1856,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->port_base_vlan_cfg.tbl_sta = true;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
@@ -2188,8 +2182,8 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
-static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
@@ -2364,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2504,7 +2498,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2587,7 +2582,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2611,6 +2606,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2622,33 +2618,44 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = duplex;
+
+ return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2728,6 +2735,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2742,6 +2900,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2792,11 +2953,9 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
- if (ret)
- return ret;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
@@ -2804,6 +2963,14 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
+ if (!hdev->hw.mac.autoneg) {
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
+ hdev->hw.mac.req_duplex,
+ hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -2923,9 +3090,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -2944,10 +3109,20 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (state != hdev->hw.mac.link) {
hdev->hw.mac.link = state;
+ if (state == HCLGE_LINK_STATUS_UP)
+ hclge_update_port_info(hdev);
+
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -2986,6 +3161,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3035,7 +3213,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3117,10 +3294,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3221,9 +3400,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
return ret;
}
- hdev->hw.mac.autoneg = cmd->base.autoneg;
- hdev->hw.mac.speed = cmd->base.speed;
- hdev->hw.mac.duplex = cmd->base.duplex;
+ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
+ hdev->hw.mac.req_speed = cmd->base.speed;
+ hdev->hw.mac.req_duplex = cmd->base.duplex;
linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
return 0;
@@ -3244,6 +3423,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev)
hdev->hw.mac.autoneg = cmd.base.autoneg;
hdev->hw.mac.speed = cmd.base.speed;
hdev->hw.mac.duplex = cmd.base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
return 0;
}
@@ -3255,9 +3435,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
if (!hnae3_dev_phy_imp_supported(hdev))
return 0;
- cmd.base.autoneg = hdev->hw.mac.autoneg;
- cmd.base.speed = hdev->hw.mac.speed;
- cmd.base.duplex = hdev->hw.mac.duplex;
+ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
+ cmd.base.speed = hdev->hw.mac.req_speed;
+ cmd.base.duplex = hdev->hw.mac.req_duplex;
linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
@@ -3266,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3277,10 +3457,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -3292,16 +3475,18 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -3315,7 +3500,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev,
@@ -3374,6 +3559,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
ret = hclge_push_vf_link_status(vport);
if (ret) {
vport->vf_info.link_state = link_state_old;
@@ -3384,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3404,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3414,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3449,9 +3651,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
+#define HCLGE_IMP_RESET_DELAY 5
+
switch (event_type) {
case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
@@ -3554,17 +3761,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
-static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq,
- &hdev->affinity_mask);
-}
-
-static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
-}
-
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3575,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3710,8 +3906,16 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret;
}
- if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ if (!reset ||
+ !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
+ continue;
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
+ hdev->reset_type == HNAE3_FUNC_RESET) {
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
+ &vport->need_notify);
continue;
+ }
/* Inform VF to process the reset.
* hclge_inform_reset_assert_to_vf may fail if VF
@@ -3860,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -3894,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4035,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4278,14 +4484,26 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
@@ -4351,6 +4569,7 @@ static void hclge_handle_err_recovery(struct hclge_dev *hdev)
if (hclge_find_error_source(hdev)) {
hclge_handle_error_info_log(ae_dev);
hclge_handle_mac_tnl(hdev);
+ hclge_handle_vf_queue_err_ras(hdev);
}
hclge_handle_err_reset_request(hdev);
@@ -4409,18 +4628,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
static void hclge_update_vport_alive(struct hclge_dev *hdev)
{
+#define HCLGE_ALIVE_SECONDS_NORMAL 8
+
+ unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
int i;
/* start from vport 1 for PF is always alive */
for (i = 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
- if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+ if (time_after(jiffies, vport->last_active_jiffies +
+ alive_time)) {
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
-
- /* If vf is not alive, set to default value */
- if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ dev_warn(&hdev->pdev->dev,
+ "VF %u heartbeat timeout\n",
+ i - HCLGE_VF_VPORT_START_NUM);
+ }
}
}
@@ -4646,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4660,12 +4886,11 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return ret;
}
- hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -4751,9 +4976,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
- &hdev->hw.hw, true,
- &hdev->rss_cfg);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
if (ret)
return ret;
@@ -5332,7 +5555,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6029,8 +6252,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
}
-static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
rule->tuples.src_ip[IPV4_INDEX] =
@@ -6059,8 +6281,7 @@ static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
rule->tuples_mask.ip_proto = 0xFF;
}
-static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
rule->tuples.src_ip[IPV4_INDEX] =
@@ -6083,19 +6304,18 @@ static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
rule->tuples_mask.ether_proto = 0xFFFF;
}
-static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
@@ -6113,19 +6333,18 @@ static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
rule->tuples_mask.ip_proto = 0xFF;
}
-static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -6137,8 +6356,7 @@ static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
rule->tuples_mask.ether_proto = 0xFFFF;
}
-static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
@@ -6174,8 +6392,7 @@ static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
rule->ep.user_def = *info;
}
-static int hclge_fd_get_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
+static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule,
struct hclge_fd_user_def_info *info)
{
@@ -6183,31 +6400,31 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
switch (flow_type) {
case SCTP_V4_FLOW:
- hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
break;
case TCP_V4_FLOW:
- hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
- hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
break;
case IP_USER_FLOW:
- hclge_fd_get_ip4_tuple(hdev, fs, rule);
+ hclge_fd_get_ip4_tuple(fs, rule);
break;
case SCTP_V6_FLOW:
- hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
break;
case TCP_V6_FLOW:
- hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
break;
case UDP_V6_FLOW:
- hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
break;
case IPV6_USER_FLOW:
- hclge_fd_get_ip6_tuple(hdev, fs, rule);
+ hclge_fd_get_ip6_tuple(fs, rule);
break;
case ETHER_FLOW:
- hclge_fd_get_ether_tuple(hdev, fs, rule);
+ hclge_fd_get_ether_tuple(fs, rule);
break;
default:
return -EOPNOTSUPP;
@@ -6337,7 +6554,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6364,7 +6581,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (!rule)
return -ENOMEM;
- ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
+ ret = hclge_fd_get_tuple(fs, rule, &info);
if (ret) {
kfree(rule);
return ret;
@@ -6393,7 +6610,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6429,9 +6646,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6456,6 +6670,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6471,7 +6688,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6495,7 +6712,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6556,21 +6773,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6589,19 +6804,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6713,7 +6928,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6774,9 +6989,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6819,7 +7034,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
} else {
int i;
- for (i = 0; i < IPV6_SIZE; i++) {
+ for (i = 0; i < IPV6_ADDR_WORDS; i++) {
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
}
@@ -6876,7 +7091,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7044,8 +7259,9 @@ static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
}
}
-static void hclge_get_cls_key_ip(const struct flow_rule *flow,
- struct hclge_fd_rule *rule)
+static int hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule,
+ struct netlink_ext_ack *extack)
{
u16 addr_type = 0;
@@ -7054,6 +7270,9 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
flow_rule_match_control(flow, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -7070,18 +7289,20 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(flow, &match);
- be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- match.mask->src.s6_addr32, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- match.mask->dst.s6_addr32, IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ match.key->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ match.key->dst.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32);
} else {
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
}
+
+ return 0;
}
static void hclge_get_cls_key_port(const struct flow_rule *flow,
@@ -7107,17 +7328,19 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
struct flow_dissector *dissector = flow->match.dissector;
+ int ret;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
- dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -7125,7 +7348,11 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
hclge_get_cls_key_basic(flow, rule);
hclge_get_cls_key_mac(flow, rule);
hclge_get_cls_key_vlan(flow, rule);
- hclge_get_cls_key_ip(flow, rule);
+
+ ret = hclge_get_cls_key_ip(flow, rule, extack);
+ if (ret)
+ return ret;
+
hclge_get_cls_key_port(flow, rule);
return 0;
@@ -7165,6 +7392,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7218,6 +7451,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7229,6 +7465,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false);
if (ret) {
+ /* if tcam config fail, set rule state to TO_DEL,
+ * so the rule will be deleted when periodic
+ * task being scheduled.
+ */
+ hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
@@ -7280,6 +7522,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7341,6 +7586,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
+
struct hclge_desc desc;
struct hclge_config_mac_mode_cmd *req =
(struct hclge_config_mac_mode_cmd *)desc.data;
@@ -7365,9 +7612,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"mac enable fail, ret =%d.\n", ret);
+ return;
+ }
+
+ if (!enable)
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
+ HCLGE_LINK_STATUS_WAIT_CNT);
}
static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
@@ -7430,10 +7683,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
}
-static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt)
{
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-
int link_status;
int i = 0;
int ret;
@@ -7446,13 +7698,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
return 0;
msleep(HCLGE_LINK_STATUS_MS);
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ } while (++i < wait_cnt);
return -EBUSY;
}
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
bool is_phy)
{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
int link_ret;
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
@@ -7460,7 +7714,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
if (is_phy)
hclge_phy_link_status_wait(hdev, link_ret);
- return hclge_mac_link_status_wait(hdev, link_ret);
+ return hclge_mac_link_status_wait(hdev, link_ret,
+ HCLGE_MAC_LINK_STATUS_NUM);
}
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
@@ -7620,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7628,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -7703,7 +7958,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7730,6 +7985,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -7743,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -7788,8 +8045,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclge_flush_link_update(hdev);
}
}
@@ -7825,12 +8081,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET &&
- hdev->reset_type != HNAE3_FLR_RESET) {
- hclge_mac_stop_phy(hdev);
- hclge_update_link_status(hdev);
- return;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
}
hclge_reset_tqp(handle);
@@ -7851,9 +8110,11 @@ int hclge_vport_start(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
+ set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
+ vport->need_notify = 0;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
if (vport->vport_id) {
@@ -7871,7 +8132,9 @@ int hclge_vport_start(struct hclge_vport *vport)
void hclge_vport_stop(struct hclge_vport *vport)
{
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
}
static int hclge_client_start(struct hnae3_handle *handle)
@@ -7960,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -8429,12 +8692,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret) {
+ if (!ret || ret == -ENOENT) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
- } else if (ret == -ENOENT) {
- ret = 0;
+ return 0;
}
return ret;
@@ -8684,7 +8946,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
- else
+ else if (hclge_is_umv_space_full(vport, true))
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)
@@ -8984,14 +9246,20 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
ether_addr_copy(vport->vf_info.mac, mac_addr);
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ */
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
"MAC of VF %d has been set to %s, and it will be reinitialized!\n",
vf, format_mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ (void)hclge_inform_reset_assert_to_vf(vport);
+ return 0;
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, will be active after VF reset\n",
vf, format_mac_addr);
return 0;
}
@@ -9024,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -9161,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
@@ -9177,15 +9444,8 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return hclge_ptp_get_cfg(hdev, ifr);
- case SIOCSHWTSTAMP:
- return hclge_ptp_set_cfg(hdev, ifr);
- default:
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
- }
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
@@ -9308,33 +9568,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -9730,6 +9993,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
+ bool enable = true;
int ret;
int i;
@@ -9749,8 +10013,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
vport->cur_vlan_fltr_en = true;
}
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
+ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ enable = false;
+
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true, 0);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
}
static int hclge_init_vlan_type(struct hclge_dev *hdev)
@@ -9809,19 +10077,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
- if (vlan->vlan_id == vlan_id)
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
+ }
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
+ if (!vlan) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
+ mutex_unlock(&hdev->vport_lock);
}
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
@@ -9830,6 +10107,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
int ret;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
@@ -9839,12 +10118,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
+
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
}
vlan->hd_tbl_status = true;
}
+ mutex_unlock(&hdev->vport_lock);
+
return 0;
}
@@ -9875,6 +10158,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->hd_tbl_status)
hclge_set_vlan_filter_hw(hdev,
@@ -9890,6 +10175,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
}
}
clear_bit(vport->vport_id, hdev->vf_vlan_full);
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -9898,6 +10184,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
+ mutex_lock(&hdev->vport_lock);
+
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -9905,37 +10193,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
-void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
{
- struct hclge_vport_vlan_cfg *vlan, *tmp;
- struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
u16 vlan_proto;
u16 vlan_id;
u16 state;
+ int vf_id;
int ret;
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ /* PF should restore all vfs port base vlan */
+ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
+ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
+ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
+ &vport->port_base_vlan_cfg.vlan_info :
+ &vport->port_base_vlan_cfg.old_vlan_info;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- return;
+ vlan_id = vlan_info->vlan_tag;
+ vlan_proto = vlan_info->vlan_proto;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id,
+ vlan_id, false);
+ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
+ }
}
+}
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
- vlan->hd_tbl_status = true;
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
}
+
+ mutex_unlock(&hdev->vport_lock);
}
/* For global reset and imp reset, hardware will clear the mac table,
@@ -9975,6 +10287,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_port_base_vlan_config(hdev);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
@@ -10031,6 +10344,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ vport->port_base_vlan_cfg.tbl_sta = false;
+
/* force add VLAN 0 */
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
if (ret)
@@ -10071,6 +10386,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
if (ret)
return ret;
+ vport->port_base_vlan_cfg.tbl_sta = false;
/* remove old VLAN tag */
if (old_info->vlan_tag == 0)
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
@@ -10120,7 +10436,9 @@ out:
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ vport->port_base_vlan_cfg.tbl_sta = true;
hclge_set_vport_vlan_fltr_change(vport);
return 0;
@@ -10188,15 +10506,22 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return ret;
}
- /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
* VLAN state.
*/
- if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
- test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id, state,
- &vlan_info);
-
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state,
+ &vlan_info);
+ else
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+ &vport->need_notify);
+ }
return 0;
}
@@ -10234,11 +10559,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
+ mutex_lock(&hdev->vport_lock);
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
return -EBUSY;
+ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
}
+ mutex_unlock(&hdev->vport_lock);
/* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
@@ -10253,17 +10583,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
+ if (!is_kill) {
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
+ } else if (is_kill && vlan_id != 0) {
+ mutex_lock(&hdev->vport_lock);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ mutex_unlock(&hdev->vport_lock);
+ }
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
+ mutex_lock(&hdev->vport_lock);
set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
}
hclge_set_vport_vlan_fltr_change(vport);
@@ -10283,16 +10618,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
@@ -10303,6 +10641,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
int i, ret, sync_cnt = 0;
u16 vlan_id;
+ mutex_lock(&hdev->vport_lock);
/* start from vport 1 for PF is always alive */
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
@@ -10313,21 +10652,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
true);
- if (ret && ret != -EINVAL)
+ if (ret && ret != -EINVAL) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID);
}
}
+ mutex_unlock(&hdev->vport_lock);
hclge_sync_vlan_fltr_state(hdev);
}
@@ -10367,7 +10711,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10378,7 +10722,10 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
@@ -10587,6 +10934,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
+{
+ struct hclge_comm_query_scc_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
+ resp = (struct hclge_comm_query_scc_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *scc_version = le32_to_cpu(resp->scc_version);
+
+ return 0;
+}
+
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -10620,9 +10985,12 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
u32 rx_pause, tx_pause;
u8 flowctl;
- if (!phydev->link || !phydev->autoneg)
+ if (!phydev->link)
return 0;
+ if (!phydev->autoneg)
+ return hclge_mac_pause_setup_hw(hdev);
+
local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
@@ -10727,7 +11095,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10738,6 +11106,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -10811,6 +11181,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
static void hclge_info_show(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
@@ -10827,9 +11198,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -10841,7 +11212,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -10885,7 +11256,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -10978,6 +11349,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -10986,7 +11363,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
@@ -11008,8 +11385,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
-#define HCLGE_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclge_hw *hw = &hdev->hw;
@@ -11046,7 +11421,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -11064,7 +11439,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
- goto err_clr_master;
+ goto err_release_regions;
}
ret = hclge_dev_mem_map(hdev);
@@ -11077,8 +11452,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
err_unmap_io_base:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
-err_clr_master:
- pci_clear_master(pdev);
+err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -11095,8 +11469,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_clear_master(pdev);
- pci_release_mem_regions(pdev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -11117,7 +11490,7 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->reset_timer.function)
- del_timer_sync(&hdev->reset_timer);
+ timer_delete_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
}
@@ -11168,8 +11541,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -11223,6 +11596,124 @@ static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
}
+static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return &vport->back->hw.mac.wol;
+}
+
+static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
+ u32 *wol_supported)
+{
+ struct hclge_query_wol_supported_cmd *wol_supported_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
+ true);
+ wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query wol supported, ret = %d\n", ret);
+ return ret;
+ }
+
+ *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
+
+ return 0;
+}
+
+static int hclge_set_wol_cfg(struct hclge_dev *hdev,
+ struct hclge_wol_info *wol_info)
+{
+ struct hclge_wol_cfg_cmd *wol_cfg_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
+ wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
+ wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
+ wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
+ memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set wol config, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_update_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ return hclge_set_wol_cfg(hdev, wol_info);
+}
+
+static int hclge_init_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+ int ret;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ memset(wol_info, 0, sizeof(struct hclge_wol_info));
+ ret = hclge_get_wol_supported_mode(hdev,
+ &wol_info->wol_support_mode);
+ if (ret) {
+ wol_info->wol_support_mode = 0;
+ return ret;
+ }
+
+ return hclge_update_wol(hdev);
+}
+
+static void hclge_get_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+
+ wol->supported = wol_info->wol_support_mode;
+ wol->wolopts = wol_info->wol_current_mode;
+ if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
+}
+
+static int hclge_set_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u32 wol_mode;
+ int ret;
+
+ wol_mode = wol->wolopts;
+ if (wol_mode & ~wol_info->wol_support_mode)
+ return -EINVAL;
+
+ wol_info->wol_current_mode = wol_mode;
+ if (wol_mode & WAKE_MAGICSECURE) {
+ memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
+ wol_info->wol_sopass_size = SOPASS_MAX;
+ } else {
+ wol_info->wol_sopass_size = 0;
+ }
+
+ ret = hclge_set_wol_cfg(vport->back, wol_info);
+ if (ret)
+ wol_info->wol_current_mode = 0;
+
+ return ret;
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -11250,16 +11741,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- ret = hclge_devlink_init(hdev);
- if (ret)
- goto err_pci_uninit;
-
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
- goto err_devlink_uninit;
+ goto err_pci_uninit;
/* Firmware command initialize */
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
true, hdev->reset_pending);
if (ret)
@@ -11310,9 +11798,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_msi_irq_uninit;
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
- !hnae3_dev_phy_imp_supported(hdev)) {
- ret = hclge_mac_mdio_config(hdev);
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+ ret = hclge_mac_mdio_config(hdev);
+
if (ret)
goto err_msi_irq_uninit;
}
@@ -11379,6 +11871,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_ptp_uninit;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11386,11 +11882,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- /* Setup affinity after service timer setup because add_timer_on
- * is called in affinity notify.
- */
- hclge_misc_affinity_setup(hdev);
-
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -11414,19 +11905,30 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
+ ret = hclge_init_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to wake on lan init, ret = %d\n", ret);
+
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_ptp_uninit;
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
-
return 0;
+err_ptp_uninit:
+ hclge_ptp_uninit(hdev);
err_mdiobus_unreg:
if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus);
@@ -11436,11 +11938,8 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
-err_devlink_uninit:
- hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
@@ -11451,6 +11950,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -11474,7 +11974,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11482,7 +11982,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11586,7 +12086,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -11611,7 +12111,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -11661,7 +12161,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- hclge_vport_stop(vport);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport++;
}
}
@@ -11728,6 +12228,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_tc_config(hdev);
+
ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
@@ -11795,6 +12297,11 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
+ ret = hclge_update_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to update wol config, ret = %d\n", ret);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -11808,7 +12315,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_vf_rate(hdev);
hclge_clear_vf_vlan(hdev);
- hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
@@ -11820,7 +12326,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
@@ -11831,8 +12337,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
- mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -11946,463 +12452,6 @@ out:
return ret;
}
-static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
- u32 *regs_num_64_bit)
-{
- struct hclge_desc desc;
- u32 total_num;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query register number cmd failed, ret = %d.\n", ret);
- return ret;
- }
-
- *regs_num_32_bit = le32_to_cpu(desc.data[0]);
- *regs_num_64_bit = le32_to_cpu(desc.data[1]);
-
- total_num = *regs_num_32_bit + *regs_num_64_bit;
- if (!total_num)
- return -EINVAL;
-
- return 0;
-}
-
-static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_32_BIT_REG_RTN_DATANUM 8
-#define HCLGE_32_BIT_DESC_NODATA_LEN 2
-
- struct hclge_desc *desc;
- u32 *reg_val = data;
- __le32 *desc_data;
- int nodata_num;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
- HCLGE_32_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 32 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le32 *)(&desc[i].data[0]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
- } else {
- desc_data = (__le32 *)(&desc[i]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le32_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_64_BIT_REG_RTN_DATANUM 4
-#define HCLGE_64_BIT_DESC_NODATA_LEN 1
-
- struct hclge_desc *desc;
- u64 *reg_val = data;
- __le64 *desc_data;
- int nodata_len;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
- HCLGE_64_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 64 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le64_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFDFCFBFA
-#define REG_NUM_PER_LINE 4
-#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
-#define REG_SEPARATOR_LINE 1
-#define REG_NUM_REMAIN_MASK 3
-
-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
-{
- int i;
-
- /* initialize command BD except the last one */
- for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
- true);
- desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- }
-
- /* initialize the last command BD */
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
-
- return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
-}
-
-static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
- int *bd_num_list,
- u32 type_num)
-{
- u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
- int ret;
-
- ret = hclge_query_bd_num_cmd_send(hdev, desc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx bd num fail, status is %d.\n", ret);
- return ret;
- }
-
- entries_per_desc = ARRAY_SIZE(desc[0].data);
- for (i = 0; i < type_num; i++) {
- offset = hclge_dfx_bd_offset_list[i];
- index = offset % entries_per_desc;
- desc_index = offset / entries_per_desc;
- bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
- }
-
- return ret;
-}
-
-static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src, int bd_num,
- enum hclge_opcode_type cmd)
-{
- struct hclge_desc *desc = desc_src;
- int i, ret;
-
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- desc++;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- }
-
- desc = desc_src;
- ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
- cmd, ret);
-
- return ret;
-}
-
-static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
- void *data)
-{
- int entries_per_desc, reg_num, separator_num, desc_index, index, i;
- struct hclge_desc *desc = desc_src;
- u32 *reg = data;
-
- entries_per_desc = ARRAY_SIZE(desc->data);
- reg_num = entries_per_desc * bd_num;
- separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++) {
- index = i % entries_per_desc;
- desc_index = i / entries_per_desc;
- *reg++ = le32_to_cpu(desc[desc_index].data[index]);
- }
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- return reg_num + separator_num;
-}
-
-static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, bd_num, i;
- int *bd_num_list;
- u32 data_len;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- data_len_per_desc = sizeof_field(struct hclge_desc, data);
- *len = 0;
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- data_len = data_len_per_desc * bd_num;
- *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
- }
-
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int bd_num, bd_num_max, buf_len, i;
- struct hclge_desc *desc_src;
- int *bd_num_list;
- u32 *reg = data;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- bd_num_max = bd_num_list[0];
- for (i = 1; i < dfx_reg_type_num; i++)
- bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
-
- buf_len = sizeof(*desc_src) * bd_num_max;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
- hclge_dfx_reg_opcode_list[i]);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg fail, status is %d.\n", ret);
- break;
- }
-
- reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
- }
-
- kfree(desc_src);
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
- struct hnae3_knic_private_info *kinfo)
-{
-#define HCLGE_RING_REG_OFFSET 0x200
-#define HCLGE_RING_INT_REG_OFFSET 0x4
-
- int i, j, reg_num, separator_num;
- int data_num_sum;
- u32 *reg = data;
-
- /* fetching per-PF registers valus from PF PCIe register space */
- reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum = reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(common_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum += reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(ring_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
-
- reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- tqp_intr_reg_addr_list[i] +
- HCLGE_RING_INT_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
-
- return data_num_sum;
-}
-
-static int hclge_get_regs_len(struct hnae3_handle *handle)
-{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
- int regs_lines_32_bit, regs_lines_64_bit;
- int ret;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg len failed, ret = %d.\n", ret);
- return ret;
- }
-
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
-
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
- regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
-}
-
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, reg_num, separator_num, ret;
- u32 *reg = data;
-
- *version = hdev->fw_version;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
- }
-
- reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
-
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 32 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_32_bit;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 64 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_64_bit * 2;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_dfx_reg(hdev, reg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Get dfx register failed, ret = %d.\n", ret);
-}
-
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
{
struct hclge_set_led_state_cmd *req;
@@ -12475,60 +12524,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return ret;
}
-static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
{
- struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ bool uc_en = false;
+ bool mc_en = false;
u8 tmp_flags;
+ bool bc_en;
int ret;
- u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
- if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ return 0;
+
+ /* for PF */
+ if (!vport->vport_id) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
- if (!ret) {
- clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ if (!ret)
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
- }
+ else
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return ret;
}
- for (i = 1; i < hdev->num_alloc_vport; i++) {
- bool uc_en = false;
- bool mc_en = false;
- bool bc_en;
+ /* for VF */
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
- vport = &hdev->vport[i];
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ return ret;
+ }
+ hclge_set_vport_vlan_fltr_change(vport);
- if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state))
- continue;
+ return 0;
+}
- if (vport->vf_info.trusted) {
- uc_en = vport->vf_info.request_uc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_UPE;
- mc_en = vport->vf_info.request_mc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_MPE;
- }
- bc_en = vport->vf_info.request_bc_en > 0;
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
- ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
- mc_en, bc_en);
- if (ret) {
- set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+
+ ret = hclge_sync_vport_promisc_mode(vport);
+ if (ret)
return;
- }
- hclge_set_vport_vlan_fltr_change(vport);
}
}
@@ -12656,6 +12716,77 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
return 0;
}
+/* After disable sriov, VF still has some config and info need clean,
+ * which configed by PF.
+ */
+static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ int ret;
+
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
+ vport->mps = 0;
+
+ /* after disable sriov, clean VF rate configured by PF */
+ ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d rate config, ret = %d\n",
+ vfid, ret);
+
+ vlan_info.vlan_tag = 0;
+ vlan_info.qos = 0;
+ vlan_info.vlan_proto = ETH_P_8021Q;
+ ret = hclge_update_port_base_vlan_cfg(vport,
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ &vlan_info);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d port base vlan, ret = %d\n",
+ vfid, ret);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d spoof config, ret = %d\n",
+ vfid, ret);
+
+ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
+}
+
+static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ hclge_clear_vport_vf_info(vport, i);
+ }
+}
+
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
+ if (priority)
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12679,6 +12810,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12730,7 +12862,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12757,6 +12889,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
+ .clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
+ .get_wol = hclge_get_wol,
+ .set_wol = hclge_set_wol,
+ .hwtstamp_get = hclge_ptp_get_cfg,
+ .hwtstamp_set = hclge_ptp_set_cfg,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12764,11 +12902,12 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0,
+ HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -12779,11 +12918,13 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);