summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/wangxun/libwx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/wangxun/libwx')
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c23
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c60
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c243
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h16
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h127
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c280
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h14
15 files changed, 1792 insertions, 37 deletions
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 9b78b604a94e..a71b0ad77de3 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
+libwx-objs += wx_vf.o wx_vf_lib.o wx_vf_common.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 0f4be72116b8..bcd07a715752 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -11,6 +11,7 @@
#include "wx_type.h"
#include "wx_lib.h"
#include "wx_sriov.h"
+#include "wx_vf.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -124,6 +125,11 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
{
u32 mask;
+ if (wx->pdev->is_virtfn) {
+ wr32(wx, WX_VXIMC, qmask);
+ return;
+ }
+
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
@@ -1107,7 +1113,7 @@ static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
{
u32 vector = 0;
@@ -1827,7 +1833,7 @@ void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
}
EXPORT_SYMBOL(wx_disable_rx_queue);
-static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
{
u8 reg_idx = ring->reg_idx;
u32 rxdctl;
@@ -1843,6 +1849,7 @@ static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
reg_idx);
}
}
+EXPORT_SYMBOL(wx_enable_rx_queue);
static void wx_configure_srrctl(struct wx *wx,
struct wx_ring *rx_ring)
@@ -1912,7 +1919,6 @@ static void wx_configure_rx_ring(struct wx *wx,
struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
- union wx_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
@@ -1942,9 +1948,9 @@ static void wx_configure_rx_ring(struct wx *wx,
memset(ring->rx_buffer_info, 0,
sizeof(struct wx_rx_buffer) * ring->count);
- /* initialize Rx descriptor 0 */
- rx_desc = WX_RX_DESC(ring, 0);
- rx_desc->wb.upper.length = 0;
+ /* reset ntu and ntc to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
/* enable receive descriptor ring */
wr32m(wx, WX_PX_RR_CFG(reg_idx),
@@ -2368,7 +2374,8 @@ int wx_sw_init(struct wx *wx)
wx->bus.device = PCI_SLOT(pdev->devfn);
wx->bus.func = PCI_FUNC(pdev->devfn);
- if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
+ pdev->is_virtfn) {
wx->subsystem_vendor_id = pdev->subsystem_vendor;
wx->subsystem_device_id = pdev->subsystem_device;
} else {
@@ -2778,6 +2785,8 @@ void wx_update_stats(struct wx *wx)
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
+ /* qmprc is not cleared on read, manual reset it */
+ hwstats->qmprc = 0;
for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 26a56cba60b9..2393a743b564 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -29,6 +29,7 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
@@ -37,6 +38,7 @@ void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 5c747509d56b..723785ef87bb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -174,10 +174,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
-
- /* If the page was released, just unmap it. */
- if (unlikely(WX_CB(skb)->page_released))
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
}
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
@@ -227,10 +223,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
- /* the page has been released from the ring */
- WX_CB(skb)->page_released = true;
-
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
rx_buffer->skb = NULL;
@@ -315,7 +307,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return false;
dma = page_pool_get_dma_addr(page);
- bi->page_dma = dma;
+ bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
@@ -352,7 +344,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
+ cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -365,6 +357,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -1705,6 +1699,7 @@ static void wx_set_rss_queues(struct wx *wx)
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->ring_feature[RING_F_FDIR].indices = 1;
/* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
@@ -1746,7 +1741,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = { .pre_vectors = 1 };
+ struct irq_affinity affd = { .post_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1783,16 +1778,24 @@ static int wx_acquire_msix_vectors(struct wx *wx)
return nvecs;
}
- wx->msix_entry->entry = 0;
- wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) {
wx->msix_q_entries[i].entry = i;
- wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
}
wx->num_q_vectors = nvecs;
+ wx->msix_entry->entry = nvecs;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
+
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ wx->msix_q_entries[0].entry = 0;
+ wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
+ }
+
return 0;
}
@@ -1810,7 +1813,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* We will try to get MSI-X interrupts first */
ret = wx_acquire_msix_vectors(wx);
- if (ret == 0 || (ret == -ENOMEM))
+ if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
return ret;
/* Disable VMDq support */
@@ -2161,7 +2164,12 @@ int wx_init_interrupt_scheme(struct wx *wx)
int ret;
/* Number of supported queues */
- wx_set_num_queues(wx);
+ if (wx->pdev->is_virtfn) {
+ if (wx->set_num_queues)
+ wx->set_num_queues(wx);
+ } else {
+ wx_set_num_queues(wx);
+ }
/* Set interrupt mode */
ret = wx_set_interrupt_capability(wx);
@@ -2291,6 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
if (direction == -1) {
/* other causes */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ msix_vector = 0;
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = 0;
ivar = rd32(wx, WX_PX_MISC_IVAR);
@@ -2299,8 +2309,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
- msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2339,7 +2347,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
}
/**
@@ -2392,9 +2400,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, 0);
+ wx_set_ivar(wx, -1, 0, v_idx);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(0), 1950);
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2414,9 +2422,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (WX_CB(skb)->page_released)
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
-
dev_kfree_skb(skb);
}
@@ -2440,6 +2445,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
}
}
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2623,7 +2631,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
- .pool_size = rx_ring->size,
+ .pool_size = rx_ring->count,
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -3116,7 +3124,7 @@ EXPORT_SYMBOL(wx_service_event_complete);
void wx_service_timer(struct timer_list *t)
{
- struct wx *wx = from_timer(wx, t, service_timer);
+ struct wx *wx = timer_container_of(wx, t, service_timer);
unsigned long next_event_offset = HZ * 2;
/* Reset the timer */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
index 73af5f11c3bd..2aa03eadf064 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -174,3 +174,246 @@ int wx_check_for_rst_pf(struct wx *wx, u16 vf)
return 0;
}
+
+static u32 wx_read_v2p_mailbox(struct wx *wx)
+{
+ u32 mailbox = rd32(wx, WX_VXMAILBOX);
+
+ mailbox |= wx->mbx.mailbox;
+ wx->mbx.mailbox |= mailbox & WX_VXMAILBOX_R2C_BITS;
+
+ return mailbox;
+}
+
+static u32 wx_mailbox_get_lock_vf(struct wx *wx)
+{
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_VFU);
+ return wx_read_v2p_mailbox(wx);
+}
+
+/**
+ * wx_obtain_mbx_lock_vf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_vf(struct wx *wx)
+{
+ int count = 5, ret;
+ u32 mailbox;
+
+ ret = readx_poll_timeout_atomic(wx_mailbox_get_lock_vf, wx, mailbox,
+ (mailbox & WX_VXMAILBOX_VFU),
+ 1, count);
+ if (ret)
+ wx_err(wx, "Failed to obtain mailbox lock for VF.\n");
+
+ return ret;
+}
+
+static int wx_check_for_bit_vf(struct wx *wx, u32 mask)
+{
+ u32 mailbox = wx_read_v2p_mailbox(wx);
+
+ wx->mbx.mailbox &= ~mask;
+
+ return (mailbox & mask ? 0 : -EBUSY);
+}
+
+/**
+ * wx_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the status bit or else -EBUSY
+ **/
+static int wx_check_for_ack_vf(struct wx *wx)
+{
+ /* read clear the pf ack bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFACK);
+}
+
+/**
+ * wx_check_for_msg_vf - checks to see if the PF has sent mail
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_vf(struct wx *wx)
+{
+ /* read clear the pf sts bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFSTS);
+}
+
+/**
+ * wx_check_for_rst_vf - checks to see if the PF has reset
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the reset done and -EBUSY on failure
+ **/
+int wx_check_for_rst_vf(struct wx *wx)
+{
+ /* read clear the pf reset done bit */
+ return wx_check_for_bit_vf(wx,
+ WX_VXMAILBOX_RSTD |
+ WX_VXMAILBOX_RSTI);
+}
+
+/**
+ * wx_poll_for_msg - Wait for message notification
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message notification
+ **/
+static int wx_poll_for_msg(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_msg_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_poll_for_ack - Wait for message acknowledgment
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message ack
+ **/
+static int wx_poll_for_ack(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_ack_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_read_posted_mbx - Wait for message notification and receive message
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ ret = wx_poll_for_msg(wx);
+ /* if ack received read message, otherwise we timed out */
+ if (ret)
+ return ret;
+
+ return wx_read_mbx_vf(wx, msg, size);
+}
+
+/**
+ * wx_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ /* send msg */
+ ret = wx_write_mbx_vf(wx, msg, size);
+ /* if msg sent wait until we receive an ack */
+ if (ret)
+ return ret;
+
+ return wx_poll_for_ack(wx);
+}
+
+/**
+ * wx_write_mbx_vf - Write a message to the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_vf(wx);
+ wx_check_for_ack_vf(wx);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_VXMBMEM, i, msg[i]);
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_REQ);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_VXMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_ACK);
+
+ return 0;
+}
+
+int wx_init_mbx_params_vf(struct wx *wx)
+{
+ wx->vfinfo = kzalloc(sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ /* Initialize mailbox parameters */
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->mbx.mailbox = WX_VXMAILBOX;
+ wx->mbx.udelay = 10;
+ wx->mbx.timeout = 1000;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_mbx_params_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
index 05aae138dbc3..82df9218490a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -11,6 +11,20 @@
#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+/* VF Registers */
+#define WX_VXMAILBOX 0x600
+#define WX_VXMAILBOX_REQ BIT(0) /* Request for PF Ready bit */
+#define WX_VXMAILBOX_ACK BIT(1) /* Ack PF message received */
+#define WX_VXMAILBOX_VFU BIT(2) /* VF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFSTS BIT(4) /* PF wrote a message in the MB */
+#define WX_VXMAILBOX_PFACK BIT(5) /* PF ack the previous VF msg */
+#define WX_VXMAILBOX_RSTI BIT(6) /* PF has reset indication */
+#define WX_VXMAILBOX_RSTD BIT(7) /* PF has indicated reset done */
+#define WX_VXMAILBOX_R2C_BITS (WX_VXMAILBOX_RSTD | \
+ WX_VXMAILBOX_PFSTS | WX_VXMAILBOX_PFACK)
+
+#define WX_VXMBMEM 0x00C00 /* 16*4B */
#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
@@ -74,4 +88,12 @@ int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_check_for_rst_vf(struct wx *wx);
+int wx_check_for_msg_vf(struct wx *wx);
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_init_mbx_params_vf(struct wx *wx);
+
#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
index 2c39b879f977..44f3e6505246 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -652,7 +652,7 @@ static int wx_ptp_set_timestamp_mode(struct wx *wx,
return 0;
}
-static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+static u64 wx_ptp_read(struct cyclecounter *hw_cc)
{
struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index e8656d9d733b..c82ae137756c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -64,6 +64,7 @@ static void wx_sriov_clear_data(struct wx *wx)
wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
wx->ring_feature[RING_F_VMDQ].offset = 0;
+ clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
/* Disable VMDq flag so device will be set in NM mode */
if (wx->ring_feature[RING_F_VMDQ].limit == 1)
@@ -78,6 +79,9 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+ if (num_vfs == 7 && wx->mac.type == wx_mac_em)
+ set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+
/* Enable VMDq flag so device will be set in VM mode */
set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
if (!wx->ring_feature[RING_F_VMDQ].limit)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 7730c9fc3e02..9d5d10f9e410 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -825,6 +825,11 @@ struct wx_bus_info {
struct wx_mbx_info {
u16 size;
+ u32 mailbox;
+ u32 udelay;
+ u32 timeout;
+ /* lock mbx access */
+ spinlock_t mbx_lock;
};
struct wx_thermal_sensor_data {
@@ -909,7 +914,6 @@ enum wx_reset_type {
struct wx_cb {
dma_addr_t dma;
u16 append_cnt; /* number of skb's appended */
- bool page_released;
bool dma_released;
};
@@ -998,7 +1002,6 @@ struct wx_tx_buffer {
struct wx_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
};
@@ -1191,6 +1194,7 @@ enum wx_pf_flags {
WX_FLAG_VMDQ_ENABLED,
WX_FLAG_VLAN_PROMISC,
WX_FLAG_SRIOV_ENABLED,
+ WX_FLAG_IRQ_VECTOR_SHARED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
@@ -1200,6 +1204,8 @@ enum wx_pf_flags {
WX_FLAG_PTP_PPS_ENABLED,
WX_FLAG_NEED_LINK_CONFIG,
WX_FLAG_NEED_SFP_RESET,
+ WX_FLAG_NEED_UPDATE_LINK,
+ WX_FLAG_NEED_DO_RESET,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1210,6 +1216,7 @@ struct wx {
void *priv;
u8 __iomem *hw_addr;
+ u8 __iomem *b4_addr; /* vf only */
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
@@ -1284,6 +1291,8 @@ struct wx {
u32 *isb_mem;
u32 isb_tag[WX_ISB_MAX];
bool misc_irq_domain;
+ u32 eims_other;
+ u32 eims_enable_mask;
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
@@ -1315,6 +1324,7 @@ struct wx {
int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
int (*ptp_setup_sdp)(struct wx *wx);
+ void (*set_num_queues)(struct wx *wx);
bool pps_enabled;
u64 pps_width;
@@ -1343,7 +1353,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT((i) + 1)
+#define WX_INTR_Q(i) BIT((i))
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.c b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
new file mode 100644
index 000000000000..7567216a005f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_vf.h"
+
+static void wx_virt_clr_reg(struct wx *wx)
+{
+ u32 vfsrrctl, i;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ vfsrrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ /* clear all rxd ctl */
+ for (i = 0; i < WX_VF_MAX_RING_NUMS; i++)
+ wr32m(wx, WX_VXRXDCTL(i),
+ WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK,
+ vfsrrctl);
+
+ rd32(wx, WX_VXSTATUS);
+}
+
+/**
+ * wx_init_hw_vf - virtual function hardware initialization
+ * @wx: pointer to hardware structure
+ *
+ * Initialize the mac address
+ **/
+void wx_init_hw_vf(struct wx *wx)
+{
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+}
+EXPORT_SYMBOL(wx_init_hw_vf);
+
+static int wx_mbx_write_and_read_reply(struct wx *wx, u32 *req_buf,
+ u32 *resp_buf, u16 size)
+{
+ int ret;
+
+ ret = wx_write_posted_mbx(wx, req_buf, size);
+ if (ret)
+ return ret;
+
+ return wx_read_posted_mbx(wx, resp_buf, size);
+}
+
+/**
+ * wx_reset_hw_vf - Performs hardware reset
+ * @wx: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_reset_hw_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[4] = {WX_VF_RESET};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 b4_buf[16] = {0};
+ u32 timeout = 200;
+ int ret;
+ u32 i;
+
+ /* Call wx stop to disable tx/rx and clear interrupts */
+ wx_stop_adapter_vf(wx);
+
+ /* reset the api version */
+ wx->vfinfo->vf_api = wx_mbox_api_null;
+
+ /* backup msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ b4_buf[i] = readl(wx->b4_addr + i * 4);
+ }
+
+ wr32m(wx, WX_VXCTRL, WX_VXCTRL_RST, WX_VXCTRL_RST);
+ rd32(wx, WX_VXSTATUS);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!wx_check_for_rst_vf(wx) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ /* restore msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ writel(b4_buf[i], wx->b4_addr + i * 4);
+ }
+
+ /* amlite: bme */
+ if (wx->mac.type == wx_mac_aml || wx->mac.type == wx_mac_aml40)
+ wr32(wx, WX_VX_PF_BME, WX_VF_BME_ENABLE);
+
+ if (!timeout)
+ return -EBUSY;
+
+ /* Reset VF registers to initial values */
+ wx_virt_clr_reg(wx);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = 2000;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ if (msgbuf[0] == (WX_VF_RESET | WX_VT_MSGTYPE_ACK))
+ ether_addr_copy(wx->mac.perm_addr, addr);
+
+ wx->mac.mc_filter_type = msgbuf[3];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_reset_hw_vf);
+
+/**
+ * wx_stop_adapter_vf - Generic stop Tx/Rx units
+ * @wx: pointer to hardware structure
+ *
+ * Clears interrupts, disables transmit and receive units.
+ **/
+void wx_stop_adapter_vf(struct wx *wx)
+{
+ u32 reg_val;
+ u16 i;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ wr32(wx, WX_VXIMS, WX_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(wx, WX_VXICR, U32_MAX);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < wx->mac.max_tx_queues; i++)
+ wr32(wx, WX_VXTXDCTL(i), WX_VXTXDCTL_FLUSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < wx->mac.max_rx_queues; i++) {
+ reg_val = rd32(wx, WX_VXRXDCTL(i));
+ reg_val &= ~WX_VXRXDCTL_ENABLE;
+ wr32(wx, WX_VXRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ wr32(wx, WX_VXMRQC, 0);
+
+ /* flush all queues disables */
+ rd32(wx, WX_VXSTATUS);
+}
+EXPORT_SYMBOL(wx_stop_adapter_vf);
+
+/**
+ * wx_set_rar_vf - set device MAC address
+ * @wx: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @enable_addr: set flag that address is active
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MAC_ADDR};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ memcpy(msg_addr, addr, ETH_ALEN);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (msgbuf[0] == (WX_VF_SET_MAC_ADDR | WX_VT_MSGTYPE_NACK)) {
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rar_vf);
+
+/**
+ * wx_update_mc_addr_list_vf - Update Multicast addresses
+ * @wx: pointer to the HW structure
+ * @netdev: pointer to the net device structure
+ *
+ * Updates the Multicast Table Array.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev)
+{
+ u32 msgbuf[WX_VXMAILBOX_SIZE] = {WX_VF_SET_MULTICAST};
+ u16 *vector_l = (u16 *)&msgbuf[1];
+ struct netdev_hw_addr *ha;
+ u32 cnt, i;
+
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 28)
+ cnt = 28;
+ msgbuf[0] |= cnt << WX_VT_MSGINFO_SHIFT;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
+ vector_l[i++] = wx_mta_vector(wx, ha->addr);
+ }
+
+ return wx_write_posted_mbx(wx, msgbuf, ARRAY_SIZE(msgbuf));
+}
+EXPORT_SYMBOL(wx_update_mc_addr_list_vf);
+
+/**
+ * wx_update_xcast_mode_vf - Update Multicast mode
+ * @wx: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode)
+{
+ u32 msgbuf[2] = {WX_VF_UPDATE_XCAST_MODE, xcast_mode};
+ int ret = 0;
+
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (WX_VF_UPDATE_XCAST_MODE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_update_xcast_mode_vf);
+
+/**
+ * wx_get_link_state_vf - Get VF link state from PF
+ * @wx: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Return: return state of the operation error or success.
+ **/
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state)
+{
+ u32 msgbuf[2] = {WX_VF_GET_LINK_STATE};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+
+ *link_state = msgbuf[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_link_state_vf);
+
+/**
+ * wx_set_vfta_vf - Set/Unset vlan filter table address
+ * @wx: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
+{
+ u32 msgbuf[2] = {WX_VF_SET_VLAN, vlan};
+ bool vlan_offload = false;
+ int ret;
+
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << WX_VT_MSGINFO_SHIFT;
+ /* if vf vlan offload is disabled, allow to create vlan under pf port vlan */
+ msgbuf[0] |= BIT(vlan_offload);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_ACK)
+ return 0;
+
+ return msgbuf[0] & WX_VT_MSGTYPE_NACK;
+}
+EXPORT_SYMBOL(wx_set_vfta_vf);
+
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr)
+{
+ ether_addr_copy(mac_addr, wx->mac.perm_addr);
+}
+EXPORT_SYMBOL(wx_get_mac_addr_vf);
+
+int wx_get_fw_version_vf(struct wx *wx)
+{
+ u32 msgbuf[2] = {WX_VF_GET_FW_VERSION};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+ snprintf(wx->eeprom_id, 32, "0x%08x", msgbuf[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_fw_version_vf);
+
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MACVLAN};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ /* If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << WX_VT_MSGINFO_SHIFT;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (WX_VF_SET_MACVLAN | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_uc_addr_vf);
+
+/**
+ * wx_rlpml_set_vf - Set the maximum receive packet length
+ * @wx: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size)
+{
+ u32 msgbuf[2] = {WX_VF_SET_LPE, max_size};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ if ((msgbuf[0] & WX_VF_SET_LPE) &&
+ (msgbuf[0] & WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_rlpml_set_vf);
+
+/**
+ * wx_negotiate_api_version - Negotiate supported API version
+ * @wx: pointer to the HW structure
+ * @api: integer containing requested API version
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_negotiate_api_version(struct wx *wx, int api)
+{
+ u32 msgbuf[2] = {WX_VF_API_NEGOTIATE, api};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msgbuf[0] == (WX_VF_API_NEGOTIATE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+ wx->vfinfo->vf_api = api;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_negotiate_api_version);
+
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc)
+{
+ u32 msgbuf[5] = {WX_VF_GET_QUEUES};
+ int ret;
+
+ /* do nothing if API doesn't support wx_get_queues */
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ /* Fetch queue configuration from the PF */
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msgbuf[0] != (WX_VF_GET_QUEUES | WX_VT_MSGTYPE_ACK))
+ return -EINVAL;
+ /* record and validate values from message */
+ wx->mac.max_tx_queues = msgbuf[WX_VF_TX_QUEUES];
+ if (wx->mac.max_tx_queues == 0 ||
+ wx->mac.max_tx_queues > WX_VF_MAX_TX_QUEUES)
+ wx->mac.max_tx_queues = WX_VF_MAX_TX_QUEUES;
+
+ wx->mac.max_rx_queues = msgbuf[WX_VF_RX_QUEUES];
+ if (wx->mac.max_rx_queues == 0 ||
+ wx->mac.max_rx_queues > WX_VF_MAX_RX_QUEUES)
+ wx->mac.max_rx_queues = WX_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msgbuf[WX_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > wx->mac.max_rx_queues)
+ *num_tcs = 1;
+ *default_tc = msgbuf[WX_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= wx->mac.max_tx_queues)
+ *default_tc = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_queues_vf);
+
+static int wx_get_link_status_from_pf(struct wx *wx, u32 *msgbuf)
+{
+ u32 links_reg = msgbuf[1];
+
+ if (msgbuf[1] & WX_PF_NOFITY_VF_NET_NOT_RUNNING)
+ wx->notify_down = true;
+ else
+ wx->notify_down = false;
+
+ if (wx->notify_down) {
+ wx->link = false;
+ wx->speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ wx->link = WX_PFLINK_STATUS(links_reg);
+ wx->speed = WX_PFLINK_SPEED(links_reg);
+
+ return 0;
+}
+
+static int wx_pf_ping_vf(struct wx *wx, u32 *msgbuf)
+{
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS))
+ /* msg is not CTS, we need to do reset */
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct wx_link_reg_fields wx_speed_lookup_vf[] = {
+ {wx_mac_unknown},
+ {wx_mac_sp, SPEED_10000, SPEED_1000, SPEED_100, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_em, SPEED_1000, SPEED_100, SPEED_10, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_aml, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+ {wx_mac_aml40, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+};
+
+static void wx_check_physical_link(struct wx *wx)
+{
+ u32 val, link_val;
+ int ret;
+
+ /* get link status from hw status reg
+ * for SFP+ modules and DA cables, it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (wx->mac.type == wx_mac_em)
+ ret = read_poll_timeout_atomic(rd32, val, val & GENMASK(4, 1),
+ 100, 500, false, wx, WX_VXSTATUS);
+ else
+ ret = read_poll_timeout_atomic(rd32, val, val & BIT(0), 100,
+ 500, false, wx, WX_VXSTATUS);
+ if (ret) {
+ wx->speed = SPEED_UNKNOWN;
+ wx->link = false;
+ return;
+ }
+
+ wx->link = true;
+ link_val = WX_VXSTATUS_SPEED(val);
+
+ if (link_val & BIT(0))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit0_f;
+ else if (link_val & BIT(1))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit1_f;
+ else if (link_val & BIT(2))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit2_f;
+ else if (link_val & BIT(3))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit3_f;
+ else
+ wx->speed = SPEED_UNKNOWN;
+}
+
+int wx_check_mac_link_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[2] = {0};
+ int ret = 0;
+
+ if (!mbx->timeout)
+ goto out;
+
+ wx_check_for_rst_vf(wx);
+ if (!wx_check_for_msg_vf(wx))
+ ret = wx_read_mbx_vf(wx, msgbuf, 2);
+ if (ret)
+ goto out;
+
+ switch (msgbuf[0] & GENMASK(8, 0)) {
+ case WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG:
+ ret = wx_get_link_status_from_pf(wx, msgbuf);
+ goto out;
+ case WX_PF_CONTROL_MSG:
+ ret = wx_pf_ping_vf(wx, msgbuf);
+ goto out;
+ case 0:
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK) {
+ /* msg is NACK, we must have lost CTS status */
+ ret = -EBUSY;
+ goto out;
+ }
+ /* no message, check link status */
+ wx_check_physical_link(wx);
+ goto out;
+ default:
+ break;
+ }
+
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
new file mode 100644
index 000000000000..fec1126703e3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_H_
+#define _WX_VF_H_
+
+#define WX_VF_MAX_RING_NUMS 8
+#define WX_VX_PF_BME 0x4B8
+#define WX_VF_BME_ENABLE BIT(0)
+#define WX_VXSTATUS 0x4
+#define WX_VXCTRL 0x8
+#define WX_VXCTRL_RST BIT(0)
+
+#define WX_VXMRQC 0x78
+#define WX_VXICR 0x100
+#define WX_VXIMS 0x108
+#define WX_VXIMC 0x10C
+#define WX_VF_IRQ_CLEAR_MASK 7
+#define WX_VF_MAX_TX_QUEUES 4
+#define WX_VF_MAX_RX_QUEUES 4
+#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
+#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
+#define WX_VXRXDCTL_ENABLE BIT(0)
+#define WX_VXTXDCTL_FLUSH BIT(26)
+
+#define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */
+#define WX_VXITR_MASK GENMASK(8, 0)
+#define WX_VXITR_CNT_WDIS BIT(31)
+#define WX_VXIVAR_MISC 0x260
+#define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */
+
+#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
+#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
+
+#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
+#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
+#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
+#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
+
+#define wx_conf_size(v, mwidth, uwidth) ({ \
+ typeof(v) _v = (v); \
+ (_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \
+})
+#define wx_buf_len(v) wx_conf_size(v, 13, 7)
+#define wx_hdr_sz(v) wx_conf_size(v, 10, 6)
+#define wx_buf_sz(v) wx_conf_size(v, 14, 10)
+#define wx_pkt_thresh(v) wx_conf_size(v, 4, 0)
+
+#define WX_RX_HDR_SIZE 256
+#define WX_RX_BUF_SIZE 2048
+
+#define WX_RXBUFFER_2048 (2048)
+#define WX_RXBUFFER_3072 3072
+
+/* Receive Path */
+#define WX_VXRDBAL(r) (0x1000 + (0x40 * (r)))
+#define WX_VXRDBAH(r) (0x1004 + (0x40 * (r)))
+#define WX_VXRDT(r) (0x1008 + (0x40 * (r)))
+#define WX_VXRDH(r) (0x100C + (0x40 * (r)))
+
+#define WX_VXRXDCTL_RSCEN BIT(29)
+#define WX_VXRXDCTL_DROP BIT(30)
+#define WX_VXRXDCTL_VLAN BIT(31)
+
+#define WX_VXTDBAL(r) (0x3000 + (0x40 * (r)))
+#define WX_VXTDBAH(r) (0x3004 + (0x40 * (r)))
+#define WX_VXTDT(r) (0x3008 + (0x40 * (r)))
+#define WX_VXTDH(r) (0x300C + (0x40 * (r)))
+
+#define WX_VXTXDCTL_ENABLE BIT(0)
+#define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f)
+
+#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
+#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
+#define WX_VXMRQC_PSR_L4HDR BIT(0)
+#define WX_VXMRQC_PSR_L3HDR BIT(1)
+#define WX_VXMRQC_PSR_L2HDR BIT(2)
+#define WX_VXMRQC_PSR_TUNHDR BIT(3)
+#define WX_VXMRQC_PSR_TUNMAC BIT(4)
+
+#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
+#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
+
+#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
+#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
+#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
+#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
+#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
+#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
+#define WX_VXMRQC_RSS_EN BIT(8)
+#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+
+#define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g)
+#define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g)
+#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
+
+struct wx_link_reg_fields {
+ u32 mac_type;
+ u32 bit0_f;
+ u32 bit1_f;
+ u32 bit2_f;
+ u32 bit3_f;
+ u32 bit4_f;
+};
+
+void wx_init_hw_vf(struct wx *wx);
+int wx_reset_hw_vf(struct wx *wx);
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr);
+void wx_stop_adapter_vf(struct wx *wx);
+int wx_get_fw_version_vf(struct wx *wx);
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr);
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev);
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr);
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size);
+int wx_negotiate_api_version(struct wx *wx, int api);
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc);
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode);
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state);
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
+int wx_check_mac_link_vf(struct wx *wx);
+
+#endif /* _WX_VF_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
new file mode 100644
index 000000000000..ade2bfe563aa
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_mbx.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+#include "wx_vf_common.h"
+
+int wxvf_suspend(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ netif_device_detach(wx->netdev);
+ wx_clear_interrupt_scheme(wx);
+ pci_disable_device(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_suspend);
+
+void wxvf_shutdown(struct pci_dev *pdev)
+{
+ wxvf_suspend(&pdev->dev);
+}
+EXPORT_SYMBOL(wxvf_shutdown);
+
+int wxvf_resume(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ pci_set_master(pdev);
+ wx_init_interrupt_scheme(wx);
+ netif_device_attach(wx->netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_resume);
+
+void wxvf_remove(struct pci_dev *pdev)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ cancel_work_sync(&wx->service_task);
+ netdev = wx->netdev;
+ unregister_netdev(netdev);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(wxvf_remove);
+
+static irqreturn_t wx_msix_misc_vf(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+ /* Clear the interrupt */
+ if (netif_running(wx->netdev))
+ wr32(wx, WX_VXIMC, wx->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+int wx_request_msix_irqs_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
+ NULL, IRQF_ONESHOT, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+EXPORT_SYMBOL(wx_request_msix_irqs_vf);
+
+void wx_negotiate_api_vf(struct wx *wx)
+{
+ int api[] = {
+ wx_mbox_api_13,
+ wx_mbox_api_null};
+ int err = 0, idx = 0;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ while (api[idx] != wx_mbox_api_null) {
+ err = wx_negotiate_api_version(wx, api[idx]);
+ if (!err)
+ break;
+ idx++;
+ }
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_negotiate_api_vf);
+
+void wx_reset_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int ret = 0;
+
+ ret = wx_reset_hw_vf(wx);
+ if (!ret)
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_valid_ether_addr(wx->mac.addr)) {
+ eth_hw_addr_set(netdev, wx->mac.addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+ }
+}
+EXPORT_SYMBOL(wx_reset_vf);
+
+void wx_set_rx_mode_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? WXVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ WXVF_XCAST_MODE_MULTI : WXVF_XCAST_MODE_NONE;
+ /* request the most inclusive mode we need */
+ if (flags & IFF_PROMISC)
+ xcast_mode = WXVF_XCAST_MODE_PROMISC;
+ else if (flags & IFF_ALLMULTI)
+ xcast_mode = WXVF_XCAST_MODE_ALLMULTI;
+ else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+ xcast_mode = WXVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = WXVF_XCAST_MODE_NONE;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ wx_update_xcast_mode_vf(wx, xcast_mode);
+ wx_update_mc_addr_list_vf(wx, netdev);
+ wx_write_uc_addr_list_vf(netdev);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_set_rx_mode_vf);
+
+/**
+ * wx_configure_rx_vf - Configure Receive Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int i, ret;
+
+ wx_setup_psrtype_vf(wx);
+ wx_setup_vfmrqc_vf(wx);
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_rlpml_set_vf(wx,
+ netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (ret)
+ wx_dbg(wx, "Failed to set MTU at %d\n", netdev->mtu);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *rx_ring = wx->rx_ring[i];
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+ wx_set_rx_buffer_len_vf(wx, rx_ring);
+#endif
+ wx_configure_rx_ring_vf(wx, rx_ring);
+ }
+}
+
+void wx_configure_vf(struct wx *wx)
+{
+ wx_set_rx_mode_vf(wx->netdev);
+ wx_configure_tx_vf(wx);
+ wx_configure_rx_vf(wx);
+}
+EXPORT_SYMBOL(wx_configure_vf);
+
+int wx_set_mac_vf(struct net_device *netdev, void *p)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_set_rar_vf(wx, 1, (u8 *)addr->sa_data, 1);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return -EPERM;
+
+ memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(wx->mac.perm_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_mac_vf);
+
+void wxvf_watchdog_update_link(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags))
+ return;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ err = wx_check_mac_link_vf(wx);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (err) {
+ wx->link = false;
+ set_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+ }
+ clear_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+}
+EXPORT_SYMBOL(wxvf_watchdog_update_link);
+
+static void wxvf_irq_enable(struct wx *wx)
+{
+ wr32(wx, WX_VXIMC, wx->eims_enable_mask);
+}
+
+static void wxvf_up_complete(struct wx *wx)
+{
+ /* Always set the carrier off */
+ netif_carrier_off(wx->netdev);
+ mod_timer(&wx->service_timer, jiffies + HZ);
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+
+ wx_configure_msix_vf(wx);
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ wr32(wx, WX_VXICR, U32_MAX);
+ wxvf_irq_enable(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+}
+
+int wxvf_open(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+ wx_configure_vf(wx);
+
+ err = wx_request_msix_irqs_vf(wx);
+ if (err)
+ goto err_free_resources;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
+
+ wxvf_up_complete(wx);
+
+ return 0;
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+err_reset:
+ wx_reset_vf(wx);
+ return err;
+}
+EXPORT_SYMBOL(wxvf_open);
+
+static void wxvf_down(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ timer_delete_sync(&wx->service_timer);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ netif_carrier_off(netdev);
+ wx_napi_disable_all(wx);
+ wx_reset_vf(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
+}
+
+static void wxvf_reinit_locked(struct wx *wx)
+{
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state))
+ usleep_range(1000, 2000);
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_configure_vf(wx);
+ wx_request_msix_irqs_vf(wx);
+ wxvf_up_complete(wx);
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+static void wxvf_reset_subtask(struct wx *wx)
+{
+ if (!test_bit(WX_FLAG_NEED_DO_RESET, wx->flags))
+ return;
+ clear_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+
+ rtnl_lock();
+ if (test_bit(WX_STATE_RESETTING, wx->state) ||
+ !(netif_running(wx->netdev))) {
+ rtnl_unlock();
+ return;
+ }
+ wxvf_reinit_locked(wx);
+ rtnl_unlock();
+}
+
+int wxvf_close(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_close);
+
+static void wxvf_link_config_subtask(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ wxvf_watchdog_update_link(wx);
+ if (wx->link) {
+ if (netif_carrier_ok(netdev))
+ return;
+ netif_carrier_on(netdev);
+ netdev_info(netdev, "Link is Up - %s\n",
+ phy_speed_to_str(wx->speed));
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "Link is Down\n");
+ }
+}
+
+static void wxvf_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ wxvf_link_config_subtask(wx);
+ wxvf_reset_subtask(wx);
+ wx_service_event_complete(wx);
+}
+
+void wxvf_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, wxvf_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wxvf_init_service);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
new file mode 100644
index 000000000000..cbbb1b178cb2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_COMMON_H_
+#define _WX_VF_COMMON_H_
+
+int wxvf_suspend(struct device *dev_d);
+void wxvf_shutdown(struct pci_dev *pdev);
+int wxvf_resume(struct device *dev_d);
+void wxvf_remove(struct pci_dev *pdev);
+int wx_request_msix_irqs_vf(struct wx *wx);
+void wx_negotiate_api_vf(struct wx *wx);
+void wx_reset_vf(struct wx *wx);
+void wx_set_rx_mode_vf(struct net_device *netdev);
+void wx_configure_vf(struct wx *wx);
+int wx_set_mac_vf(struct net_device *netdev, void *p);
+void wxvf_watchdog_update_link(struct wx *wx);
+int wxvf_open(struct net_device *netdev);
+int wxvf_close(struct net_device *netdev);
+void wxvf_init_service(struct wx *wx);
+
+#endif /* _WX_VF_COMMON_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
new file mode 100644
index 000000000000..5d48df7a849f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+
+static void wx_write_eitr_vf(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ itr_reg = q_vector->itr & WX_VXITR_MASK;
+
+ /* set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= WX_VXITR_CNT_WDIS;
+
+ wr32(wx, WX_VXITR(v_idx), itr_reg);
+}
+
+static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue,
+ u8 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ ivar = rd32(wx, WX_VXIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ wr32(wx, WX_VXIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_VXIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_VXIVAR(queue >> 1), ivar);
+ }
+}
+
+void wx_configure_msix_vf(struct wx *wx)
+{
+ int v_idx;
+
+ wx->eims_enable_mask = 0;
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx);
+
+ /* add q_vector eims value to global eims_enable_mask */
+ wx->eims_enable_mask |= BIT(v_idx);
+ wx_write_eitr_vf(q_vector);
+ }
+
+ wx_set_ivar_vf(wx, -1, 1, v_idx);
+
+ /* setup eims_other and add value to global eims_enable_mask */
+ wx->eims_other = BIT(v_idx);
+ wx->eims_enable_mask |= wx->eims_other;
+}
+
+int wx_write_uc_addr_list_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev)
+ wx_set_uc_addr_vf(wx, ++count, ha->addr);
+ } else {
+ /*
+ * If the list is empty then send message to PF driver to
+ * clear all macvlans on this VF.
+ */
+ wx_set_uc_addr_vf(wx, 0, NULL);
+ }
+
+ return count;
+}
+
+/**
+ * wx_configure_tx_ring_vf - Configure Tx ring after Reset
+ * @wx: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ u32 txdctl = 0;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH);
+ wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXTDH(reg_idx), 0);
+ wr32(wx, WX_VXTDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count));
+ txdctl |= WX_VXTXDCTL_ENABLE;
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ wr32(wx, WX_VXTXDCTL(reg_idx), txdctl);
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE,
+ 1000, 10000, true, wx, WX_VXTXDCTL(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
+ * wx_configure_tx_vf - Configure Transmit Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+void wx_configure_tx_vf(struct wx *wx)
+{
+ u32 i;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring_vf(wx, wx->tx_ring[i]);
+}
+
+static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring,
+ int index)
+{
+ u32 srrctl;
+
+ srrctl = rd32m(wx, WX_VXRXDCTL(index),
+ (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK));
+ srrctl |= WX_VXRXDCTL_DROP;
+ srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ wr32(wx, WX_VXRXDCTL(index), srrctl);
+}
+
+void wx_setup_psrtype_vf(struct wx *wx)
+{
+ /* PSRTYPE must be initialized */
+ u32 psrtype = WX_VXMRQC_PSR_L2HDR |
+ WX_VXMRQC_PSR_L3HDR |
+ WX_VXMRQC_PSR_L4HDR |
+ WX_VXMRQC_PSR_TUNHDR |
+ WX_VXMRQC_PSR_TUNMAC;
+
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype));
+}
+
+void wx_setup_vfmrqc_vf(struct wx *wx)
+{
+ u16 rss_i = wx->num_rx_queues;
+ u32 vfmrqc = 0, vfreta = 0;
+ u8 i, j;
+
+ /* Fill out hash function seeds */
+ netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+ for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
+ wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
+
+ for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ wr32(wx, WX_VXRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
+ }
+
+ /* Perform hash on these packet types */
+ vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 |
+ WX_VXMRQC_RSS_ALG_IPV4_TCP |
+ WX_VXMRQC_RSS_ALG_IPV6 |
+ WX_VXMRQC_RSS_ALG_IPV6_TCP;
+
+ vfmrqc |= WX_VXMRQC_RSS_EN;
+
+ if (wx->num_rx_queues > 3)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(2);
+ else if (wx->num_rx_queues > 1)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(1);
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc));
+}
+
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXRDH(reg_idx), 0);
+ wr32(wx, WX_VXRDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXRDT(reg_idx);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->next_to_alloc = 0;
+
+ wx_configure_srrctl_vf(wx, ring, reg_idx);
+
+ /* allow any size packet since we can handle overflow */
+ rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK;
+ rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count));
+ rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN;
+
+ /* enable RSC */
+ rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK;
+ rxdctl |= WX_VXRXDCTL_RSCMAX(0);
+ rxdctl |= WX_VXRXDCTL_RSCEN;
+
+ wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl);
+
+ /* pf/vf reuse */
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
new file mode 100644
index 000000000000..43ea126b79eb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_LIB_H_
+#define _WX_VF_LIB_H_
+
+void wx_configure_msix_vf(struct wx *wx);
+int wx_write_uc_addr_list_vf(struct net_device *netdev);
+void wx_setup_psrtype_vf(struct wx *wx);
+void wx_setup_vfmrqc_vf(struct wx *wx);
+void wx_configure_tx_vf(struct wx *wx);
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring);
+
+#endif /* _WX_VF_LIB_H_ */