summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:31:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:31:10 -0800
commitb2fe5fa68642860e7de76167c3111623aa0d5de1 (patch)
treeb7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet/hisilicon
parenta103950e0dd2058df5e8a8d4a915707bdcf205f0 (diff)
parenta54667f6728c2714a400f3c884727da74b6d1717 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Significantly shrink the core networking routing structures. Result of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf 2) Add netdevsim driver for testing various offloads, from Jakub Kicinski. 3) Support cross-chip FDB operations in DSA, from Vivien Didelot. 4) Add a 2nd listener hash table for TCP, similar to what was done for UDP. From Martin KaFai Lau. 5) Add eBPF based queue selection to tun, from Jason Wang. 6) Lockless qdisc support, from John Fastabend. 7) SCTP stream interleave support, from Xin Long. 8) Smoother TCP receive autotuning, from Eric Dumazet. 9) Lots of erspan tunneling enhancements, from William Tu. 10) Add true function call support to BPF, from Alexei Starovoitov. 11) Add explicit support for GRO HW offloading, from Michael Chan. 12) Support extack generation in more netlink subsystems. From Alexander Aring, Quentin Monnet, and Jakub Kicinski. 13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From Russell King. 14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso. 15) Many improvements and simplifications to the NFP driver bpf JIT, from Jakub Kicinski. 16) Support for ipv6 non-equal cost multipath routing, from Ido Schimmel. 17) Add resource abstration to devlink, from Arkadi Sharshevsky. 18) Packet scheduler classifier shared filter block support, from Jiri Pirko. 19) Avoid locking in act_csum, from Davide Caratti. 20) devinet_ioctl() simplifications from Al viro. 21) More TCP bpf improvements from Lawrence Brakmo. 22) Add support for onlink ipv6 route flag, similar to ipv4, from David Ahern. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits) tls: Add support for encryption using async offload accelerator ip6mr: fix stale iterator net/sched: kconfig: Remove blank help texts openvswitch: meter: Use 64-bit arithmetic instead of 32-bit tcp_nv: fix potential integer overflow in tcpnv_acked r8169: fix RTL8168EP take too long to complete driver initialization. qmi_wwan: Add support for Quectel EP06 rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK ipmr: Fix ptrdiff_t print formatting ibmvnic: Wait for device response when changing MAC qlcnic: fix deadlock bug tcp: release sk_frag.page in tcp_disconnect ipv4: Get the address of interface correctly. net_sched: gen_estimator: fix lockdep splat net: macb: Handle HRESP error net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring ipv6: addrconf: break critical section in addrconf_verify_rtnl() ipv6: change route cache aging logic i40e/i40evf: Update DESC_NEEDED value to reflect larger value bnxt_en: cleanup DIM work on device shutdown ...
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c)2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c)405
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h)28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c)347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c418
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c181
26 files changed, 5435 insertions, 413 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6aa7b8..8bcf470ff5f3 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@ config HNS3_HCLGE
compatibility layer. The engine would be used in Hisilicon hip08 family of
SoCs and further upcoming SoCs.
-config HNS3_ENET
- tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
- depends on HNS3 && HNS3_HCLGE
- ---help---
- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
- family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
- devices and their associated operations.
-
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
@@ -112,4 +103,23 @@ config HNS3_DCB
If unsure, say N.
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3
+ depends on HNS3_HCLGE
+ ---help---
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ depends on 64BIT && PCI
+ depends on HNS3
+ ---help---
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf490850..cac86e9ae0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_ctrl)
+ if (!mac_cb)
return 0;
return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63faf9a8..acf29633ec79 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
HNS_OP_LED_SET_FUNC = 0x3,
HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
};
enum _dsm_rst_type {
@@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
{
- u32 ret;
-
- if (dsaf_dev->sub_ctrl)
- ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
- else
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
return ret;
}
@@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
ACPI_FREE(obj);
}
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value =
- dsaf_read_syscon(mac_cb->cpld_ctrl,
- mac_cb->cpld_ctrl_reg);
- dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
- CPLD_LED_ON_VALUE);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- mac_cb->cpld_led_value);
+ val);
+ mac_cb->cpld_led_value = val;
break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
@@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
return 0;
}
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define RESET_REQ_OR_DREQ 1
static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
+ u32 val = 0;
+ int ret;
+
if (!mac_cb->cpld_ctrl)
return -ENODEV;
- *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
- + MAC_SFP_PORT_OFFSET);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+ *sfp_prsnt = !val;
return 0;
}
@@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
- int sfp_prsnt;
+ int sfp_prsnt = 0;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!mac_cb->phy_dev) {
@@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin;
+ u32 origin = 0;
if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
#define HILINK_ACCESS_SEL_CFG 0x40008
@@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
HILINK_ACCESS_SEL_CFG, 3);
}
- origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
@@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
misc_op->cpld_set_led = hns_cpld_set_led_acpi;
misc_op->cpld_reset_led = cpld_led_reset_acpi;
- misc_op->cpld_set_led_id = cpld_set_led_id;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 46a52d9bb196..886cbbf25761 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
regmap_write(base, reg, value);
}
-static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
{
- unsigned int val;
-
- regmap_read(base, reg, &val);
- return val;
+ return regmap_read(base, reg, val);
}
#define dsaf_read_dev(a, reg) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1f3e51..002534f12b66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000000..3e9203ea42a6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM 16
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 rsv1[2];
+ u8 msg_len;
+ u8 rsv2[3];
+ u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1[3];
+ u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb2238acb2..02145f2de820 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret = 0, lock_acquired;
+
+ /* we can get deadlocked if SRIOV is being enabled in context to probe
+ * and probe gets called again in same context. This can happen when
+ * pci_enable_sriov() is called to create VFs from PF probes context.
+ * Therefore, for simplicity uniformly defering further probing in all
+ * cases where we detect contention.
+ */
+ lock_acquired = mutex_trylock(&hnae3_common_lock);
+ if (!lock_acquired)
+ return -EPROBE_DEFER;
- mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+ ret = -EOPNOTSUPP;
goto out_err;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1039f2..fd06bc78c58e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
+ u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
@@ -274,10 +279,14 @@ struct hnae3_ae_dev {
* Get firmware version
* get_mdix_mode()
* Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
* set_vlan_filter()
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -347,7 +356,8 @@ struct hnae3_ae_ops {
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
- void (*get_regs)(struct hnae3_handle *handle, void *data);
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
@@ -380,12 +390,23 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle,
enum hnae3_reset_type reset);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *free_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
};
struct hnae3_dcb_ops {
@@ -435,6 +456,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
};
struct hnae3_roce_private_info {
@@ -452,9 +475,10 @@ struct hnae3_unic_private_info {
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
-#define HNAE3_SUPPORT_MAC_LOOPBACK 1
-#define HNAE3_SUPPORT_PHY_LOOPBACK 2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
struct hnae3_handle {
struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a7c50a..eb82700da7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
{
struct net_device *dev = handle->kinfo.netdev;
- if (!handle->kinfo.dcb_ops)
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
return;
dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 59415090ff0f..601b6295d3f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
/* required last entry */
{0, }
};
@@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
{
- /* this defines the configuration for GL (Interrupt Gap Limiter)
- * GL defines inter interrupt gap.
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
- */
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 rl_value)
-{
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+ !tqp_vector->rx_group.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_handle *h = priv->ae_handle;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
- /* Default :enable interrupt coalesce */
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+ /* Default: enable interrupt coalescing self-adaptive and GL */
+ tqp_vector->tx_group.gl_adapt_enable = 1;
+ tqp_vector->rx_group.gl_adapt_enable = 1;
+
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
- /* for now we are disabling Interrupt RL - we
- * will re-enable later
- */
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.int_gl);
+
+ /* Default: disable RL */
+ h->kinfo.int_rl_setting = 0;
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
@@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
return 0;
out_start_err:
@@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+ struct hns3_enet_ring *tx_ring,
+ u32 *inner_vlan_flag,
+ u32 *out_vlan_flag,
+ u16 *inner_vtag,
+ u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->tqp->handle->kinfo.netdev->features &
+ NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag;
+
+ vlan_tag = skb_vlan_tag_get(skb);
+ vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
+ } else if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+ << HNS3_TX_VLAN_PRIO_SHIFT);
+ }
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
@@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 bdtp_fe_sc_vld_ra_ri = 0;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
__be16 protocol;
@@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
skb = (struct sk_buff *)priv;
paylen = skb->len;
+ ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec,
+ &inner_vtag, &out_vtag);
+ if (unlikely(ret))
+ return ret;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
protocol = skb->protocol;
- /* vlan packet*/
- if (protocol == htons(ETH_P_8021Q)) {
- protocol = vlan_get_protocol(skb);
- skb->protocol = protocol;
- }
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
return ret;
@@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
}
/* move ring pointer to next.*/
@@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = hns3_fill_desc_tso;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+ } else {
+ priv->ops.fill_desc = hns3_fill_desc;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+ else
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+ if (ret)
+ return ret;
}
netdev->features = features;
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
unsigned int start;
unsigned int idx;
@@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
@@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.tx_busy;
+ tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.non_vld_descs;
+ rx_drop += ring->stats.err_pkt_len;
+ rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = netdev->stats.rx_dropped;
- stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+ stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+ netdev->mtu = new_mtu;
+
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
@@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF))
+ netdev->hw_features |=
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
- u16 rx_int_gl, tx_int_gl;
- bool rx, tx;
-
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
- rx_int_gl = tqp_vector->rx_group.int_gl;
- tx_int_gl = tqp_vector->tx_group.int_gl;
- if (rx && tx) {
- if (rx_int_gl > tx_int_gl) {
- tqp_vector->tx_group.int_gl = rx_int_gl;
- tqp_vector->tx_group.flow_level =
- tqp_vector->rx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
- } else {
- tqp_vector->rx_group.int_gl = tx_int_gl;
- tqp_vector->rx_group.flow_level =
- tqp_vector->tx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
- }
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ if (rx_group->gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->int_gl);
+ }
+
+ if (tx_group->gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->int_gl);
}
}
@@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
cur_chain = chain;
rx_ring = rx_ring->next;
@@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
- hns3_vector_gl_rl_init(tqp_vector);
+ hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
@@ -2649,6 +2808,19 @@ err:
return ret;
}
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+ devm_kfree(priv->dev, priv->ring_data);
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring);
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
}
+ devm_kfree(priv->dev, priv->ring_data);
return 0;
}
@@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
switch (type) {
case HNAE3_UP_CLIENT:
- ret = hns3_reset_notify_up_enet(handle);
- break;
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
case HNAE3_DOWN_CLIENT:
ret = hns3_reset_notify_down_enet(handle);
break;
@@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 free_tqps, max_rss_size, max_tqps;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+ max_tqps = h->kinfo.num_tc * max_rss_size;
+
+ return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+ if (ret)
+ return ret;
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_put_ring;
+
+ return 0;
+
+err_put_ring:
+ hns3_put_ring_config(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+ return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool if_running = netif_running(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+ new_tqp_num < kinfo->num_tc) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from %d to %d",
+ kinfo->num_tc,
+ hns3_get_max_available_channels(netdev));
+ return -EINVAL;
+ }
+
+ new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+ if (kinfo->num_tqps == new_tqp_num)
+ return 0;
+
+ if (if_running)
+ dev_close(netdev);
+
+ hns3_clear_all_ring(h);
+
+ ret = hns3_nic_uninit_vector_data(priv);
+ if (ret) {
+ dev_err(&netdev->dev,
+ "Unbind vector with tqp fail, nothing is changed");
+ goto open_netdev;
+ }
+
+ hns3_uninit_all_ring(priv);
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ if (ret) {
+ ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ if (ret) {
+ /* If revert to old tqp failed, fatal error occurred */
+ dev_err(&netdev->dev,
+ "Revert to old tqp num fail, ret=%d", ret);
+ return ret;
+ }
+ dev_info(&netdev->dev,
+ "Change tqp num fail, Revert to old tqp num");
+ }
+
+open_netdev:
+ if (if_running)
+ dev_open(netdev);
+
+ return ret;
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de759957b..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
-#define HNS3_INT_GL_50K 0x000A
-#define HNS3_INT_GL_20K 0x0019
-#define HNS3_INT_GL_18K 0x001B
-#define HNS3_INT_GL_8K 0x003E
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
+ u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
@@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
@@ -604,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c72da3..b034c7f24eda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
struct hns3_stats {
char stats_string[ETH_GSTRING_LEN];
- int stats_size;
int stats_offset;
};
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
- .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
- .stats_offset = offsetof(struct hns3_enet_ring, stats), \
-} \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("tx_pkts", tx_pkts),
- HNS3_TQP_STAT("tx_bytes", tx_bytes),
- HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
- HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
};
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("rx_pkts", rx_pkts),
- HNS3_TQP_STAT("rx_bytes", rx_bytes),
- HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
- HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
- HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
- HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
- HNS3_TQP_STAT("rx_l2_err", l2_err),
- HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps)
+ u32 stat_count, u32 num_tqps, const char *prefix)
{
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
u32 size_left;
u32 i, j;
u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
/* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, rx_prefix);
return data;
}
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring;
u8 *stat;
- u32 i;
+ int i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring;
- for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
- for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
&param->rx_pause, &param->tx_pause);
}
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 flowctrl_adv = 0;
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
+ advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
/* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+ /* 5.get flow control setttings */
+ if (h->ae_algo->ops->get_flowctrl_adv)
+ h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+ if (flowctrl_adv & ADVERTISED_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ if (flowctrl_adv & ADVERTISED_Asym_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
return 0;
}
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+ cmd->data = h->kinfo.rss_size;
break;
case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev)
return genphy_restart_aneg(phy);
}
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %d! Queue max id=%d\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+};
+
static const struct ethtool_ops hns3_ethtool_ops = {
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
@@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &hns3_ethtool_ops;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d01a58c..cb8ddd043476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
@@ -5,11 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed8845042..3fd10a6bec53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -102,6 +102,10 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
/* Device management command */
/* MAC commond */
@@ -111,6 +115,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -180,6 +185,10 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+ /* Vlan offload command */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
/* Interrupts cmd */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
@@ -191,6 +200,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table cmd */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
@@ -218,6 +228,9 @@ enum hclge_opcode_type {
/* Mailbox cmd */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -399,6 +412,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-#define HCLGE_MAC_MIN_MTU 64
-#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
+#define HCLGE_VLAN_MASK_EN_B 0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+ u8 rsv0[2];
+ u8 vlan_mask;
+ u8 rsv1;
+ u8 mac_mask[6];
+ u8 rsv2[14];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
@@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_ACCEPT_TAG_B 0
+#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
@@ -726,6 +811,23 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_LED_PORT_SPEED_STATE_S 0
+#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
+#define HCLGE_LED_ACTIVITY_STATE_S 0
+#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LINK_STATE_S 0
+#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 port_speed_led_config;
+ u8 link_led_config;
+ u8 activity_led_config;
+ u8 locate_led_config;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..32bc6f68e297 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
+#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -34,8 +36,10 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
@@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
- {"mac_trans_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
- {"mac_trans_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
- {"mac_trans_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
- {"mac_trans_err_all_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
- {"mac_trans_from_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
- {"mac_trans_from_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
- {"mac_rcv_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
- {"mac_rcv_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
- {"mac_rcv_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
- {"mac_rcv_fcs_err_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
- {"mac_rcv_send_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
- {"mac_rcv_send_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
+ .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .i_port_bitmap = 0x1,
+ },
};
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
+static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
+{
+ struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
+ struct hclge_desc desc;
+ __le64 *desc_data;
+ int ret;
+
+ /* for fiber port, need to query the total rx/tx packets statstics,
+ * used for data transferring checking.
+ */
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC total pkt stats fail, ret = %d\n", ret);
+
+ return ret;
+ }
+
+ desc_data = (__le64 *)(&desc.data[0]);
+ mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
+ mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
+
+ return 0;
+}
+
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
return 0;
@@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status);
hclge_update_netstat(hw_stats, net_stats);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = 1;
+ hdev->rss_size_max = cfg.rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
- if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
- else
- hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
return ret;
}
@@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return 0;
}
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
- struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query cmd failed %d.\n", ret);
- return ret;
- }
-
- mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
- return 0;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- hclge_query_autoneg_result(hdev);
+ if (phydev)
+ return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+ bool mask_vlan,
+ u8 *mac_mask)
+{
+ struct hclge_mac_vlan_mask_entry_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+ hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
+ ether_addr_copy(req->mac_mask, mac_mask);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
+ u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int mtu;
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set default mac_vlan_mask fail ret=%d\n", ret);
+ return ret;
+ }
+
+ if (netdev)
+ mtu = netdev->mtu;
+ else
+ mtu = ETH_DATA_LEN;
+
+ ret = hclge_set_mtu(handle, mtu);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->rst_service_task);
}
static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev);
}
@@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 rst_src_reg;
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ */
+
+ /* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ }
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 event_cause;
+ u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
- if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+
+ default:
+ dev_dbg(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ /* we should clear the source of interrupt */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
- ret = devm_request_irq(&hdev->pdev->dev,
- hdev->misc_vector.vector_irq,
- hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ /* this would be explicitly freed in the end */
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, "hclge_misc", hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
return ret;
}
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
static int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
@@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
cnt++;
}
- /* must clear reset status register to
- * prevent driver detect reset interrupt again
- */
- reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
if (cnt >= HCLGE_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
"Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
return ret;
}
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
u32 val;
- switch (type) {
+ switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
hclge_func_reset_cmd(hdev, 0);
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", type);
+ "Unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+ unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
- u32 rst_reg_val;
- rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_GLOBAL_RESET, addr))
rst_level = HNAE3_GLOBAL_RESET;
- else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_CORE_RESET, addr))
rst_level = HNAE3_CORE_RESET;
- else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_IMP_RESET, addr))
rst_level = HNAE3_IMP_RESET;
+ else if (test_bit(HNAE3_FUNC_RESET, addr))
+ rst_level = HNAE3_FUNC_RESET;
+
+ /* now, clear all other resets */
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
return rst_level;
}
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ /* perform reset of the stack & ae device for a client */
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ rtnl_unlock();
+ } else {
+ /* schedule again to check pending resets later */
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ }
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static void hclge_reset_event(struct hnae3_handle *handle,
enum hnae3_reset_type reset)
{
@@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
case HNAE3_FUNC_RESET:
case HNAE3_CORE_RESET:
case HNAE3_GLOBAL_RESET:
- if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
- dev_err(&hdev->pdev->dev, "Already in reset state");
- return;
- }
- hdev->reset_type = reset;
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->service_task);
+ /* request reset & schedule reset task */
+ set_bit(reset, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
- bool do_reset;
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
- do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
- /* Reset is detected by interrupt */
- if (hdev->reset_type == HNAE3_NONE_RESET)
- hdev->reset_type = hclge_detected_reset_event(hdev);
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
- if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, rst_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- case HNAE3_CORE_RESET:
- case HNAE3_GLOBAL_RESET:
- case HNAE3_IMP_RESET:
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- if (do_reset)
- hclge_do_reset(hdev, hdev->reset_type);
- else
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ hclge_reset_subtask(hdev);
- if (!hclge_reset_wait(hdev)) {
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- }
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- break;
- default:
- dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
- hdev->reset_type);
- break;
- }
- hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
{
- hclge_reset_subtask(hdev);
- hclge_enable_vector(&hdev->misc_vector, true);
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- hclge_misc_irq_service_task(hdev);
+ /* The total rx/tx packets statstics are wanted to be updated
+ * per second. Both hclge_update_stats_for_all() and
+ * hclge_mac_get_traffic_stats() can do it.
+ */
+ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+ hclge_update_stats_for_all(hdev);
+ hdev->hw_stats.stats_timer = 0;
+ } else {
+ hclge_mac_get_traffic_stats(hdev);
+ }
+
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_stats_for_all(hdev);
+ hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
@@ -3174,49 +3433,53 @@ err:
return ret;
}
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
- struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- int ret;
+ struct hclge_ctrl_vector_chain_cmd *req
+ = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
int i;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
+ hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
- ret);
- return ret;
+ status);
+ return -EIO;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ op,
false);
req->int_vector_id = vector_id;
}
@@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) {
req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
- "Map TQP fail, status is %d.\n", ret);
- return ret;
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
}
}
return 0;
}
-static int hclge_map_handle_ring_to_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
+ "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
- return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_from_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
- struct hnae3_ring_chain_node *node;
- struct hclge_desc desc;
- int i, vector_id;
- int ret;
+ int vector_id, ret;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
@@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id;
}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
- req->int_vector_id = vector_id;
-
- i = 0;
- for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
- if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
- req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n",
- ret);
- return ret;
- }
- i = 0;
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_DEL_RING_TO_VECTOR,
- false);
- req->int_vector_id = vector_id;
- }
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
}
- if (i > 0) {
- req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n", ret);
- return ret;
- }
- }
+ /* Free this MSIX or MSI vector */
+ hclge_free_vector(hdev, vector_id);
return 0;
}
@@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
return status;
}
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%d.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL;
}
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail, ret =%d.\n",
+ ret);
- if (!hclge_add_uc_addr(handle, new_addr)) {
- ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
- return 0;
+ ret = hclge_add_uc_addr(handle, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add uc mac address fail, ret =%d.\n",
+ ret);
+
+ ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore uc mac address fail, ret =%d.\n",
+ ret);
+ }
+
+ return -EIO;
}
- return -EIO;
+ ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "configure mac pause address fail, ret =%d.\n",
+ ret);
+ return -EIO;
+ }
+
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+ return 0;
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0;
}
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
@@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
}
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+ vcfg->accept_tag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+ vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_VLAN_TYPE_VF_TABLE 0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
struct hnae3_handle *handle;
+ struct hclge_vport *vport;
int ret;
+ int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
if (ret)
return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
if (ret)
return ret;
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->txvlan_cfg.accept_tag = true;
+ vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_rx_offload_cfg(vport);
+ if (ret)
+ return ret;
+ }
+
handle = &hdev->vport[0].nic;
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ int max_frm_size;
int ret;
- if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
- hdev->mps = new_mtu;
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(new_mtu);
+ req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret;
}
+ hdev->mps = max_frm_size;
+
return 0;
}
@@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+ u32 *flowctrl_adv)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+ (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (rx_en)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (tx_en)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+ return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ local_advertising = ADVERTISE_PAUSE_CAP;
+
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
u32 *rx_en, u32 *tx_en)
{
@@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ if (!fc_autoneg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ return phy_start_aneg(phydev);
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_request = 0;
+ hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ return ret;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+ INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
}
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *free_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 temp_tqps = 0;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ if (!hdev->htqp[i].alloced)
+ temp_tqps++;
+ }
+ *free_tqps = temp_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+ tqp->q.handle = NULL;
+ tqp->q.tqp_index = 0;
+ tqp->alloced = false;
+ }
+
+ devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+ kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int cur_rss_size = kinfo->rss_size;
+ int cur_tqps = kinfo->num_tqps;
+ u16 tc_offset[HCLGE_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 tc_size[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ int ret, i;
+
+ hclge_release_tqp(vport);
+
+ ret = hclge_knic_setup(vport, new_tqps_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+
+ data = (u32 *)data + regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
+ data);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
+ u8 act_led_status, u8 link_led_status,
+ u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
+ HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
+ hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
+ HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
+ hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
+ HCLGE_LED_LINK_STATE_S, link_led_status);
+ hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+#define BLINK_FREQUENCY 2
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret = 0;
+
+ if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_OFF);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+enum hclge_led_port_speed {
+ HCLGE_SPEED_LED_FOR_1G,
+ HCLGE_SPEED_LED_FOR_10G,
+ HCLGE_SPEED_LED_FOR_25G,
+ HCLGE_SPEED_LED_FOR_40G,
+ HCLGE_SPEED_LED_FOR_50G,
+ HCLGE_SPEED_LED_FOR_100G,
+};
+
+static u8 hclge_led_get_speed_status(u32 speed)
+{
+ u8 speed_led;
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_1G:
+ speed_led = HCLGE_SPEED_LED_FOR_1G;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_led = HCLGE_SPEED_LED_FOR_10G;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_led = HCLGE_SPEED_LED_FOR_25G;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_led = HCLGE_SPEED_LED_FOR_40G;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_led = HCLGE_SPEED_LED_FOR_50G;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_led = HCLGE_SPEED_LED_FOR_100G;
+ break;
+ default:
+ speed_led = HCLGE_LED_NO_CHANGE;
+ }
+
+ return speed_led;
+}
+
+static int hclge_update_led_status(struct hclge_dev *hdev)
+{
+ u8 port_speed_status, link_status, activity_status;
+ u64 rx_pkts, tx_pkts;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
+
+ rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
+ tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
+ if (rx_pkts != hdev->rx_pkts_for_led ||
+ tx_pkts != hdev->tx_pkts_for_led)
+ activity_status = HCLGE_LED_ON;
+ else
+ activity_status = HCLGE_LED_OFF;
+ hdev->rx_pkts_for_led = rx_pkts;
+ hdev->tx_pkts_for_led = tx_pkts;
+
+ if (hdev->hw.mac.link)
+ link_status = HCLGE_LED_ON;
+ else
+ link_status = HCLGE_LED_OFF;
+
+ return hclge_set_led_status_sfp(hdev, port_speed_status,
+ activity_status, link_status,
+ HCLGE_LED_NO_CHANGE);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
- .map_ring_to_vector = hclge_map_handle_ring_to_vector,
- .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback,
@@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_flowctrl_adv = hclge_get_flowctrl_adv,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814ea5d7..d99a76a9557c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_VF_NUM_PER_BYTE 8
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_GLOBAL_RESET_REG 0x20A00
@@ -92,6 +96,16 @@
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_REMOVING,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
- HCLGE_STATE_MBX_IRQ,
- HCLGE_STATE_RESET_INT,
+ HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX
};
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
#define HCLGE_MPF_ENBALE 1
struct hclge_caps {
u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
+ u16 rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_overrsize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_max_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+ u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_overrsize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_max_oct_pkt_num;
-
- u64 mac_trans_fragment_pkt_num;
- u64 mac_trans_undermin_pkt_num;
- u64 mac_trans_jabber_pkt_num;
- u64 mac_trans_err_all_pkt_num;
- u64 mac_trans_from_app_good_pkt_num;
- u64 mac_trans_from_app_bad_pkt_num;
- u64 mac_rcv_fragment_pkt_num;
- u64 mac_rcv_undermin_pkt_num;
- u64 mac_rcv_jabber_pkt_num;
- u64 mac_rcv_fcs_err_pkt_num;
- u64 mac_rcv_send_app_good_pkt_num;
- u64 mac_rcv_send_app_bad_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+ u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
};
+#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats;
+ u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
};
struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
unsigned long state;
enum hnae3_reset_type reset_type;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
+ struct work_struct rst_service_task;
+ struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -493,6 +548,29 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ u64 rx_pkts_for_led;
+ u64 tx_pkts_for_led;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag; /* Whether accept tagged packet from host */
+ bool accept_untag; /* Whether accept untagged packet from host */
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+ bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_vport {
@@ -507,6 +585,9 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
@@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
- struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000000..f38fc5ce9f51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ int resp_status,
+ u8 *resp_data, u16 resp_data_len)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ resp_data_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+ resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+ resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+ resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+ if (resp_data && resp_data_len > 0)
+ memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send response to VF\n", status);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg[0] = mbx_opcode;
+
+ memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send mailbox message to VF\n",
+ status);
+
+ return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kzfree(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg[2];
+
+ hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[5]);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+ hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg[1];
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+ if (ret)
+ return ret;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ bool en = req->msg[1] ? true : false;
+ struct hclge_promisc_param param;
+
+ /* always enable broadcast promisc bit */
+ hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+ hclge_rm_uc_addr_common(vport, old_addr);
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_rm_uc_addr_common(vport, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ status = hclge_add_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ status = hclge_rm_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+ u8 func_id = vport->vport_id;
+ bool enable = mbx_req->msg[2];
+
+ status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int status = 0;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ u16 vlan, proto;
+ bool is_kill;
+
+ is_kill = !!mbx_req->msg[2];
+ memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+ memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ is_kill, vlan, 0,
+ cpu_to_be16(proto));
+ }
+
+ if (gen_resp)
+ status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+ sizeof(u8));
+
+ return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 8
+ u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+ memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+ memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+ u8 msg_data[2];
+ u8 dest_vfid;
+
+ /* mac.link can only be 0 or 1 */
+ link_status = (u16)hdev->hw.mac.link;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id;
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+ hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_vport *vport;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* handle all the mailbox requests in the queue */
+ while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+ HCLGE_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ vport = &hdev->vport[req->mbx_src_vfid];
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+ req);
+ break;
+ case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+ req);
+ break;
+ case HCLGE_MBX_SET_PROMISC_MODE:
+ ret = hclge_set_vf_promisc_mode(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF promisc mode\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+ ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_MULTICAST:
+ ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+ ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_QINFO:
+ ret = hclge_get_vf_queue_info(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get Q info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_TCINFO:
+ ret = hclge_get_vf_tcinfo(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get TC info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_LINK_STATUS:
+ ret = hclge_get_link_info(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to get link stat for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_QUEUE_RESET:
+ hclge_reset_vf_queue(vport, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %d\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e9408d7d..c1dea3a47bdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
SUPPORTED_Pause | \
+ SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
}
int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5497cb..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
HCLGE_SHAPER_LVL_PF = 1,
};
-#define HCLGE_SHAPER_BS_U_DEF 1
-#define HCLGE_SHAPER_BS_S_DEF 4
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0;
}
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
{
struct hclge_desc desc;
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+ trans_time);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{
u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret;
u8 i;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_mac_pause_param_setup_hw(hdev);
+ }
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961918ab..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap;
};
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+};
+
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000000..fb93bbd35845
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000000..85985e731311
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used;
+
+ used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ u16 ntc = csq->next_to_clean;
+ struct hclgevf_desc *desc;
+ int clean = 0;
+ u32 head;
+
+ desc = &csq->desc[ntc];
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+ u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+ size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ hclgevf_ring_to_dma_dir(ring));
+
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+ struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_hw *hw = &hdev->hw;
+ int ring_type = ring->flag;
+ u32 reg_val;
+ int ret;
+
+ ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ spin_lock_init(&ring->lock);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->dev = hdev;
+
+ /* allocate CSQ/CRQ descriptor */
+ ret = hclgevf_alloc_cmd_desc(ring);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+ (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+ return ret;
+ }
+
+ /* initialize the hardware registers with csq/crq dma-address,
+ * descriptor number, head & tail pointers
+ */
+ switch (ring_type) {
+ case HCLGEVF_TYPE_CSQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ break;
+ case HCLGEVF_TYPE_CRQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ break;
+ }
+
+ return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode, bool is_read)
+{
+ memset(desc, 0, sizeof(struct hclgevf_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+ HCLGEVF_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int status = 0;
+ u16 retval;
+ u16 opcode;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /* Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ opcode = le16_to_cpu(desc[0].opcode);
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (hclgevf_cmd_csq_done(hw)) {
+ complete = true;
+ handle = 0;
+
+ while (handle < num) {
+ /* Get the result of hardware write back */
+ desc_to_use = &hw->cmq.csq.desc[ntc];
+ desc[handle] = *desc_to_use;
+
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ retval = le16_to_cpu(desc[handle].retval);
+ else
+ retval = le16_to_cpu(desc[0].retval);
+
+ if ((enum hclgevf_cmd_return_status)retval ==
+ HCLGEVF_CMD_EXEC_SUCCESS)
+ status = 0;
+ else
+ status = -EIO;
+ hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+ ntc++;
+ handle++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ status = -EAGAIN;
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle != num) {
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ }
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return status;
+}
+
+static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+ u32 *version)
+{
+ struct hclgevf_query_version_cmd *resp;
+ struct hclgevf_desc desc;
+ int status;
+
+ resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ status = hclgevf_cmd_send(hw, &desc, 1);
+ if (!status)
+ *version = le32_to_cpu(resp->firmware);
+
+ return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ /* setup Tx write back timeout */
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+ /* setup queue CSQ/CRQ rings */
+ hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CSQ ring\n", ret);
+ return ret;
+ }
+
+ hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CRQ ring\n", ret);
+ goto err_csq;
+ }
+
+ /* get firmware version */
+ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+ goto err_crq;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
+err_crq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+ return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000000..2caca9317f8c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT 200
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+ dma_addr_t dma;
+ void *va;
+ u32 length;
+};
+
+struct hclgevf_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclgevf_desc *desc;
+ struct hclgevf_desc_cb *desc_cb;
+ struct hclgevf_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+ HCLGEVF_CMD_EXEC_SUCCESS = 0,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_EXEC = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+};
+
+enum hclgevf_cmd_status {
+ HCLGEVF_STATUS_SUCCESS = 0,
+ HCLGEVF_ERR_CSQ_FULL = -1,
+ HCLGEVF_ERR_CSQ_TIMEOUT = -2,
+ HCLGEVF_ERR_CSQ_ERROR = -3
+};
+
+struct hclgevf_cmq {
+ struct hclgevf_cmq_ring csq;
+ struct hclgevf_cmq_ring crq;
+ u16 tx_timeout; /* Tx timeout */
+ enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+ /* Generic command */
+ HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ /* TQP command */
+ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* RSS cmd */
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
+ /* Mailbox cmd */
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+ __le32 firmware;
+ __le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_HASH_KEY_NUM 16
+struct hclgevf_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_stcp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_stcp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE 16
+
+struct hclgevf_rss_indirection_table_cmd {
+ u16 start_table_index;
+ u16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S 0
+#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S 12
+#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B 15
+#define HCLGEVF_MAX_TC_NUM 8
+struct hclgevf_rss_tc_mode_cmd {
+ u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ 0
+#define HCLGEVF_TYPE_CSQ 1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+#define HCLGEVF_NIC_CMQ_EN_B 16
+#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+ hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+ hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+ ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode,
+ bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000000..0d89965f7928
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+ struct hnae3_handle *handle)
+{
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_desc desc;
+ struct hclgevf_tqp *tqp;
+ int status;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_QUERY_RX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *buff = data;
+ int i = 0;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclgevf_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclgevf_tqps_get_sset_count(handle, strset);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg;
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+ true, &resp_msg, sizeof(u8));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get TC info from PF failed %d",
+ status);
+ return status;
+ }
+
+ hdev->hw_tc_map = resp_msg;
+
+ return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 8
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+ true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.desc_num = hdev->num_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ int i;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tc = 0;
+ kinfo->num_desc = hdev->num_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ kinfo->num_tc++;
+
+ kinfo->rss_size
+ = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+ new_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ int status;
+ u8 resp_msg;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+ 0, false, &resp_msg, sizeof(u8));
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *client;
+
+ client = handle->client;
+
+ if (link_state != hdev->hw.mac.link) {
+ client->ops->link_status_change(handle, !!link_state);
+ hdev->hw.mac.link = link_state;
+ }
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+
+ if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+ dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+ hdev->ae_dev->dev_type);
+ return -EINVAL;
+ }
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+ const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+ struct hclgevf_rss_indirection_table_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+ int i, j;
+
+ req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+ false);
+ req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+ req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set RSS indirection table\n",
+ status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
+{
+ struct hclgevf_rss_tc_mode_cmd *req;
+ u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+ u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+ u16 tc_size[HCLGEVF_MAX_TC_NUM];
+ struct hclgevf_desc desc;
+ u16 roundup_size;
+ int status;
+ int i;
+
+ req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ }
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set rss tc mode\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+ u8 *key)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_config_cmd *req;
+ int lkup_times = key ? 3 : 1;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int status;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+ lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+ for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ true);
+ req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get hardware RSS cfg, status = %d\n",
+ status);
+ return status;
+ }
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ if (key)
+ memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+ req->hash_key,
+ key_size);
+ }
+
+ if (hash) {
+ if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+ *hash = ETH_RSS_HASH_TOP;
+ else
+ *hash = ETH_RSS_HASH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ if (indir)
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+ return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM 3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ring_chain_node *node;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int i, vector_id;
+ int status;
+ u8 type;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ type = en ?
+ HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ req->msg[0] = type;
+ req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ i++;
+ /* msg[2] is cause num */
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+ node->tqp_index;
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+ HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+ HCLGEVF_RING_NODE_VARIABLE_NUM) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_MBX_VF_TO_PF,
+ false);
+ req->msg[0] = type;
+ req->msg[1] = vector_id;
+ }
+ }
+
+ if (i > 0) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
+ }
+
+ hclgevf_free_vector(hdev, vector);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+ req->msg[1] = en;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", status);
+
+ return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+ false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "TQP enable fail, status =%d.\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+ return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg[2] = {0};
+
+ msg[0] = en;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+ msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ u8 *new_mac_addr = (u8 *)p;
+ u8 msg_data[ETH_ALEN * 2];
+ int status;
+
+ ether_addr_copy(msg_data, new_mac_addr);
+ ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+ msg_data, ETH_ALEN * 2,
+ false, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ msg_data[0] = is_kill;
+ memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+ memcpy(&msg_data[3], &proto, sizeof(proto));
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+ NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+ mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+ hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclgevf_mbx_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, service_task);
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+ */
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return true;
+ }
+
+ dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+ return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ if (!hclgevf_check_event_cause(hdev, &clearval))
+ goto skip_sched;
+
+ /* schedule the VF mailbox service task, if not already scheduled */
+ hclgevf_mbx_task_schedule(hdev);
+
+ hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* get queue configuration from PF */
+ ret = hclge_get_queue_info(hdev);
+ if (ret)
+ return ret;
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector =
+ hdev->vector_status[hdev->num_msi_used];
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i, ret;
+
+ rss_cfg->rss_size = hdev->rss_size_max;
+
+ /* Initialize RSS indirect table for each vport */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+ ret = hclgevf_set_rss_indir_table(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ /* other vlan config(like, VLAN TX/RX offload) would also be added
+ * here later
+ */
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ /* ring enable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ /* Ring disable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, false);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ /* setup tasks for the MBX */
+ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+ /* setup tasks for service timer */
+ timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+ INIT_WORK(&hdev->service_task, hclgevf_service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ hclgevf_get_misc_vector(hdev);
+
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, "hclgevf_cmd", hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+ ret = rc->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ /* un-init roce, if it exists */
+ if (hdev->roce_client)
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if ((client->ops->uninit_instance) &&
+ (client->type != HNAE3_CLIENT_ROCE))
+ client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ goto err_no_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hdev = hdev;
+ hw->io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_no_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI initialization failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_irq_init;
+ }
+
+ hclgevf_state_init(hdev);
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ goto err_misc_irq_init;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize VF's MTA */
+ hdev->accept_mta_mc = true;
+ ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to set mta filter mode\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize RSS for this VF */
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+ return 0;
+
+err_config:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_irq_init:
+ hclgevf_pci_uninit(hdev);
+ return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_cmd_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = hdev->num_tqps;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .init_client_instance = hclgevf_register_client,
+ .uninit_client_instance = hclgevf_unregister_client,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_indir_size = hclgevf_get_rss_indir_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .get_channels = hclgevf_get_channels,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .name = HCLGEVF_NAME,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000000..a63bee4a3674
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM 0
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGEVF_RSS_KEY_SIZE 40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ /* task states */
+ HCLGEVF_STATE_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+ u8 mac_addr[ETH_ALEN];
+ int link;
+};
+
+struct hclgevf_hw {
+ void __iomem *io_base;
+ int num_vec;
+ struct hclgevf_cmq cmq;
+ struct hclgevf_mac mac;
+ void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+ struct device *dev; /* device for DMA mapping */
+ struct hnae3_queue q;
+ struct hlcgevf_tqp_stats tqp_stats;
+ u16 index; /* global index in a NIC controller */
+
+ bool alloced;
+};
+
+struct hclgevf_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+ u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+ u32 hash_algo;
+ u32 rss_size;
+ u8 hw_tc_map;
+ u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclgevf_rss_cfg rss_cfg;
+ unsigned long state;
+
+ u32 fw_version;
+ u16 num_tqps; /* num task queue pairs of this PF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_desc;
+ u8 hw_tc_map;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
+ struct work_struct mbx_service_task;
+
+ struct hclgevf_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000000..e39cad285fa9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to prtect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USCOEND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ udelay(HCLGEVF_SLEEP_USCOEND);
+ i++;
+ }
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx resp(=%d) from PF in %d tries\n",
+ hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%d,code1=%d), %d",
+ code0, code1, mbx_resp->resp_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ /* first two bytes are reserved for code & subcode */
+ if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ dev_err(&hdev->pdev->dev,
+ "VF send mbx msg fail, msg len %d exceeds max len %d\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = code;
+ req->msg[1] = subcode;
+ memcpy(&req->msg[2], msg_data, msg_len);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_mbx_resp_status *resp;
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclgevf_cmq_ring *crq;
+ struct hclgevf_desc *desc;
+ u16 link_status, flag;
+ u8 *temp;
+ int i;
+
+ resp = &hdev->mbx_resp;
+ crq = &hdev->hw.cmq.crq;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_PF_VF_RESP:
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%d)\n",
+ req->msg[1]);
+ resp->received_resp = true;
+
+ resp->origin_mbx_msg = (req->msg[1] << 16);
+ resp->origin_mbx_msg |= req->msg[2];
+ resp->resp_status = req->msg[3];
+
+ temp = (u8 *)&req->msg[4];
+ for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+ resp->additional_info[i] = *temp;
+ temp++;
+ }
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_status = le16_to_cpu(req->msg[1]);
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
+
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%d) mbx msg from PF\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}