summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c66
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c327
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c415
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
19 files changed, 1097 insertions, 242 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 7d4ae467f3ad..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -233,6 +233,17 @@ struct hclgevf_mbx_arq_ring {
__le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 94f80e1c4020..0179fc288f5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -97,13 +97,15 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -159,6 +161,12 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -187,6 +195,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -223,6 +232,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -270,6 +281,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -308,6 +320,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -560,14 +577,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -737,6 +757,8 @@ struct hnae3_ae_ops {
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
};
struct hnae3_dcb_ops {
@@ -745,6 +767,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -774,6 +798,8 @@ struct hnae3_tc_info {
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -784,6 +810,9 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -815,6 +844,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c8b151d29f53..f671a63cecde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -52,9 +52,9 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
{
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
}
@@ -91,6 +91,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
@@ -150,6 +151,10 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -162,6 +167,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
@@ -220,8 +226,10 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= ae_dev->pdev->revision;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
hclge_comm_parse_capability(ae_dev, is_pf, resp);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 7a7d4cf9bf35..b1f9383b418f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -20,6 +20,7 @@
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
@@ -102,6 +103,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
@@ -339,6 +341,10 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 93aeb615191d..66feb23f7b7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -106,6 +106,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.init = hns3_dbg_common_file_init,
},
{
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
@@ -395,6 +402,12 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
}
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 35d70041b9e8..4cb2421e71a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2963,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2988,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3271,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4650,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -5782,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 4a3253692dcc..133a054af6b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -744,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 4c7988e308a2..cdf76fb58d45 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -69,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -95,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -304,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -322,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -371,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -401,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -410,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -418,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -712,7 +739,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -794,6 +822,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -806,9 +835,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -845,10 +874,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -886,7 +919,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1612,6 +1645,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1621,12 +1667,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1637,12 +1683,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1668,6 +1715,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -2051,6 +2100,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -2081,6 +2131,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f9d89511eb32..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -321,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -347,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -359,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 69b8673436ca..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9b870e79c290..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,6 +14,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -1115,10 +1117,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1152,6 +1155,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1517,7 +1572,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1585,6 +1640,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -2374,6 +2432,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
{
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fae79764dc44..6962a9d69cf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -71,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -148,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -679,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -715,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -770,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -1003,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1101,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1574,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1617,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -2589,7 +2625,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2613,6 +2649,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2624,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2730,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2744,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2796,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2988,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3037,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3119,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3302,13 +3499,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -5334,7 +5531,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6339,7 +6536,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6395,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6431,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6458,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6473,7 +6670,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6497,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6715,7 +6912,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6778,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6878,7 +7075,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7167,6 +7364,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7220,6 +7423,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7282,6 +7488,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7705,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7732,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -10793,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10804,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11443,6 +11656,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11510,6 +11727,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -12763,6 +12981,21 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
}
}
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = h->kinfo.tc_map_mode;
+ if (priority)
+ *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ h->kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12786,6 +13019,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12865,6 +13099,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
.clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12872,7 +13107,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -12887,7 +13122,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 18caddd541f8..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -216,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -258,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -488,6 +490,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -826,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -1070,7 +1093,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e1012f7f9b73..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -779,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
@@ -814,152 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_GET_RING_VECTOR_MAP:
- ret = hclge_get_vf_ring_vector_map(vport, req,
- &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get VF ring vector map\n",
- ret);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- if (time_is_before_jiffies(hdev->last_mbx_scheduled +
- HCLGE_MBX_SCHED_TIMEOUT))
- dev_warn(&hdev->pdev->dev,
- "resp vport%u mbx(%u,%u) late\n",
- req->mbx_src_vfid,
- req->msg.code,
- req->msg.subcode);
-
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03d63b6a9b2b..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 2f33b036a47a..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -1275,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1646,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index d943943912f7..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -30,6 +30,9 @@ enum hclge_opcode_type;
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -262,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 26f87330173e..db6f7cdba958 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2125,7 +2125,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
@@ -3177,7 +3177,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3429,7 +3429,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3444,7 +3444,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);