summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/enetc/enetc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/enetc/enetc.c')
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1340
1 files changed, 1023 insertions, 317 deletions
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 09471329f3a3..3ca93adb9662 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2,88 +2,138 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
+#include <linux/bpf_trace.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
+#include <linux/ptp_classify.h>
+#include <net/pkt_sched.h>
-/* ENETC overhead: optional extension BD + 1 BD gap */
-#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
-#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
-
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads);
-
-netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_bdr *tx_ring;
- int count;
+ int num_tx_rings = priv->num_tx_rings;
+ int i;
- tx_ring = priv->tx_ring[skb->queue_mapping];
+ for (i = 0; i < priv->num_rx_rings; i++)
+ if (priv->rx_ring[i]->xdp.prog)
+ return num_tx_rings - num_possible_cpus();
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ return num_tx_rings;
+}
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
+ struct enetc_bdr *tx_ring)
+{
+ int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
- enetc_unlock_mdio();
+ return priv->rx_ring[index];
+}
- if (unlikely(!count))
- goto drop_packet_err;
+static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
+ return NULL;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
- netif_stop_subqueue(ndev, tx_ring->index);
+ return tx_swbd->skb;
+}
- return NETDEV_TX_OK;
+static struct xdp_frame *
+enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_redirect)
+ return tx_swbd->xdp_frame;
-drop_packet_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
+ /* For XDP_TX, pages come from RX, whereas for the other contexts where
+ * we have is_dma_page_set, those come from skb_frag_dma_map. We need
+ * to match the DMA mapping length, so we need to differentiate those.
+ */
if (tx_swbd->is_dma_page)
dma_unmap_page(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
+ tx_swbd->dir);
else
dma_unmap_single(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->len, tx_swbd->dir);
tx_swbd->dma = 0;
}
-static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
- struct enetc_tx_swbd *tx_swbd)
+static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
{
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
if (tx_swbd->dma)
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (tx_swbd->skb) {
- dev_kfree_skb_any(tx_swbd->skb);
+ if (xdp_frame) {
+ xdp_return_frame(tx_swbd->xdp_frame);
+ tx_swbd->xdp_frame = NULL;
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
tx_swbd->skb = NULL;
}
}
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads)
+/* Let H/W know BD ring has been updated */
+static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
+{
+ /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
+}
+
+static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ u8 *msgtype, u8 *twostep,
+ u16 *correction_offset, u16 *body_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_tx_swbd *tx_swbd;
- skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
+ u8 msgtype, twostep, udp;
union enetc_tx_bd *txbd;
- bool do_vlan, do_tstamp;
+ u16 offset1, offset2;
int i, count = 0;
+ skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
@@ -104,15 +154,25 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
do_vlan = skb_vlan_tag_present(skb);
- do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
- tx_swbd->do_tstamp = do_tstamp;
- tx_swbd->check_wb = tx_swbd->do_tstamp;
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
+ &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep)
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ else
+ do_onestep_tstamp = true;
+ } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
- if (do_vlan || do_tstamp)
+ tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+
+ if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
if (tx_ring->tsd_enable)
@@ -149,7 +209,40 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
}
- if (do_tstamp) {
+ if (do_onestep_tstamp) {
+ u32 lo, hi, val;
+ u64 sec, nsec;
+ u8 *data;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Configure extension BD */
+ temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ */
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) =
+ htons((sec >> 32) & 0xffff);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(sec & 0xffffffff);
+ *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+
+ /* Configure single-step register */
+ val = ENETC_PM0_SINGLE_STEP_EN;
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
+ enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ } else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
}
@@ -186,6 +279,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
}
@@ -194,6 +288,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
temp_bd.flags = flags;
*txbd = temp_bd;
+ tx_ring->tx_swbd[i].is_eof = true;
tx_ring->tx_swbd[i].skb = skb;
enetc_bdr_idx_inc(tx_ring, &i);
@@ -201,8 +296,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
- /* let H/W know BD ring has been updated */
- enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
+ enetc_update_tx_ring_tail(tx_ring);
return count;
@@ -211,7 +305,7 @@ dma_err:
do {
tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
if (i == 0)
i = tx_ring->bd_count;
i--;
@@ -220,6 +314,76 @@ dma_err:
return 0;
}
+static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count;
+
+ /* Queue one-step Sync packet if already locked */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
+ &priv->flags)) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 udp, msgtype, twostep;
+ u16 offset1, offset2;
+
+ /* Mark tx timestamp type on skb->cb[0] if requires */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
+ skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ } else {
+ skb->cb[0] = 0;
+ }
+
+ /* Fall back to two-step timestamp if not one-step Sync packet */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
+ skb->cb[0] = ENETC_F_TX_TSTAMP;
+ }
+
+ return enetc_start_xmit(skb, ndev);
+}
+
static irqreturn_t enetc_msix(int irq, void *data)
{
struct enetc_int_vector *v = data;
@@ -241,10 +405,6 @@ static irqreturn_t enetc_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
-static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
- struct napi_struct *napi, int work_limit);
-
static void enetc_rx_dim_work(struct work_struct *w)
{
struct dim *dim = container_of(w, struct dim, work);
@@ -273,55 +433,30 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
net_dim(&v->rx_dim, dim_sample);
}
-static int enetc_poll(struct napi_struct *napi, int budget)
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- struct enetc_int_vector
- *v = container_of(napi, struct enetc_int_vector, napi);
- bool complete = true;
- int work_done;
- int i;
-
- enetc_lock_mdio();
-
- for (i = 0; i < v->count_tx_rings; i++)
- if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
- complete = false;
-
- work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
- if (work_done == budget)
- complete = false;
- if (work_done)
- v->rx_napi_work = true;
-
- if (!complete) {
- enetc_unlock_mdio();
- return budget;
- }
-
- napi_complete_done(napi, work_done);
-
- if (likely(v->rx_dim_en))
- enetc_rx_net_dim(v);
-
- v->rx_napi_work = false;
-
- /* enable interrupts */
- enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
-
- for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
- enetc_unlock_mdio();
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
- return work_done;
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
}
-static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
{
- int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ struct enetc_rx_swbd *new;
- return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
}
static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
@@ -344,23 +479,58 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
- /* Ensure skb_mstamp_ns, which might have been populated with
- * the txtime, is not mistaken for a software timestamp,
- * because this will prevent the dispatch of our hardware
- * timestamp to the socket.
- */
- skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(skb);
skb_tstamp_tx(skb, &shhwtstamps);
}
}
+static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_rx_swbd rx_swbd = {
+ .dma = tx_swbd->dma,
+ .page = tx_swbd->page,
+ .page_offset = tx_swbd->page_offset,
+ .dir = tx_swbd->dir,
+ .len = tx_swbd->len,
+ };
+ struct enetc_bdr *rx_ring;
+
+ rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
+
+ if (likely(enetc_swbd_unused(rx_ring))) {
+ enetc_reuse_page(rx_ring, &rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
+ rx_swbd.page_offset,
+ ENETC_RXB_DMA_SIZE_XDP,
+ rx_swbd.dir);
+
+ rx_ring->stats.recycles++;
+ } else {
+ /* RX ring is already full, we need to unmap and free the
+ * page, since there's nothing useful we can do with it.
+ */
+ rx_ring->stats.recycle_failures++;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
+ rx_swbd.dir);
+ __free_page(rx_swbd.page);
+ }
+
+ rx_ring->xdp.xdp_tx_in_flight--;
+}
+
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
- bool do_tstamp;
+ bool do_twostep_tstamp;
u64 tstamp = 0;
i = tx_ring->next_to_clean;
@@ -368,10 +538,12 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
- do_tstamp = false;
+ do_twostep_tstamp = false;
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
- bool is_eof = !!tx_swbd->skb;
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+ bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -380,26 +552,40 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
- tx_swbd->do_tstamp) {
+ tx_swbd->do_twostep_tstamp) {
enetc_get_tx_tstamp(&priv->si->hw, txbd,
&tstamp);
- do_tstamp = true;
+ do_twostep_tstamp = true;
}
}
- if (likely(tx_swbd->dma))
+ if (tx_swbd->is_xdp_tx)
+ enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
+ else if (likely(tx_swbd->dma))
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (is_eof) {
- if (unlikely(do_tstamp)) {
- enetc_tstamp_tx(tx_swbd->skb, tstamp);
- do_tstamp = false;
+ if (xdp_frame) {
+ xdp_return_frame(xdp_frame);
+ } else if (skb) {
+ if (unlikely(tx_swbd->skb->cb[0] &
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ /* Start work to release lock for next one-step
+ * timestamping packet. And send one skb in
+ * tx_skbs queue if has.
+ */
+ schedule_work(&priv->tx_onestep_tstamp);
+ } else if (unlikely(do_twostep_tstamp)) {
+ enetc_tstamp_tx(skb, tstamp);
+ do_twostep_tstamp = false;
}
- napi_consume_skb(tx_swbd->skb, napi_budget);
- tx_swbd->skb = NULL;
+ napi_consume_skb(skb, napi_budget);
}
tx_byte_cnt += tx_swbd->len;
+ /* Scrub the swbd here so we don't have to do that
+ * when we reuse it during xmit
+ */
+ memset(tx_swbd, 0, sizeof(*tx_swbd));
bds_to_clean--;
tx_swbd++;
@@ -437,6 +623,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
static bool enetc_new_page(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ bool xdp = !!(rx_ring->xdp.prog);
struct page *page;
dma_addr_t addr;
@@ -444,7 +631,10 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
if (unlikely(!page))
return false;
- addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* For XDP_TX, we forgo dma_unmap -> dma_map */
+ rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
__free_page(page);
@@ -453,7 +643,7 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
rx_swbd->dma = addr;
rx_swbd->page = page;
- rx_swbd->page_offset = ENETC_RXB_PAD;
+ rx_swbd->page_offset = rx_ring->buffer_offset;
return true;
}
@@ -483,18 +673,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- rx_swbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
- i = 0;
- rx_swbd = rx_ring->rx_swbd;
- }
+ enetc_rxbd_next(rx_ring, &rxbd, &i);
+ rx_swbd = &rx_ring->rx_swbd[i];
}
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
+
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
}
return j;
@@ -570,32 +758,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
#endif
}
-static void enetc_process_skb(struct enetc_bdr *rx_ring,
- struct sk_buff *skb)
-{
- skb_record_rx_queue(skb, rx_ring->index);
- skb->protocol = eth_type_trans(skb, rx_ring->ndev);
-}
-
-static bool enetc_page_reusable(struct page *page)
-{
- return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
-}
-
-static void enetc_reuse_page(struct enetc_bdr *rx_ring,
- struct enetc_rx_swbd *old)
-{
- struct enetc_rx_swbd *new;
-
- new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
-
- /* next buf that may reuse a page */
- enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
-
- /* copy page reference */
- *new = *old;
-}
-
+/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
+ * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
+ * mapped buffers.
+ */
static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
int i, u16 size)
{
@@ -603,30 +769,39 @@ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
rx_swbd->page_offset,
- size, DMA_FROM_DEVICE);
+ size, rx_swbd->dir);
return rx_swbd;
}
+/* Reuse the current page without performing half-page buffer flipping */
static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ buffer_size, rx_swbd->dir);
+
+ rx_swbd->page = NULL;
+}
+
+/* Reuse the current page by performing half-page buffer flipping */
+static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
if (likely(enetc_page_reusable(rx_swbd->page))) {
rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
page_ref_inc(rx_swbd->page);
- enetc_reuse_page(rx_ring, rx_swbd);
-
- /* sync for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
- rx_swbd->page_offset,
- ENETC_RXB_DMA_SIZE,
- DMA_FROM_DEVICE);
+ enetc_put_rx_buff(rx_ring, rx_swbd);
} else {
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ rx_swbd->page = NULL;
}
-
- rx_swbd->page = NULL;
}
static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
@@ -637,16 +812,16 @@ static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
void *ba;
ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
- skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+ skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
if (unlikely(!skb)) {
rx_ring->stats.rx_alloc_errs++;
return NULL;
}
- skb_reserve(skb, ENETC_RXB_PAD);
+ skb_reserve(skb, rx_ring->buffer_offset);
__skb_put(skb, size);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
return skb;
}
@@ -659,7 +834,72 @@ static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
+}
+
+static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
+ u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i)
+{
+ if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
+ return false;
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ return true;
+}
+
+static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
+ u32 bd_status, union enetc_rx_bd **rxbd,
+ int *i, int *cleaned_cnt, int buffer_size)
+{
+ struct sk_buff *skb;
+ u16 size;
+
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
+ if (!skb)
+ return NULL;
+
+ enetc_get_offloads(rx_ring, *rxbd, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = buffer_size;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+
+ return skb;
}
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
@@ -678,15 +918,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
u32 bd_status;
- u16 size;
- if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
- int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
-
- /* update ENETC's consumer index */
- enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
- cleaned_cnt -= count;
- }
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
+ cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
+ cleaned_cnt);
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -695,73 +930,511 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
- size = le16_to_cpu(rxbd->r.buf_len);
- skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, ENETC_RXB_DMA_SIZE);
if (!skb)
break;
- enetc_get_offloads(rx_ring, rxbd, skb);
+ rx_byte_cnt += skb->len;
+ rx_frm_cnt++;
- cleaned_cnt++;
+ napi_gro_receive(napi, skb);
+ }
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ rx_ring->next_to_clean = i;
- if (unlikely(bd_status &
- ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
- dev_kfree_skb(skb);
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- dma_rmb();
- bd_status = le32_to_cpu(rxbd->r.lstatus);
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
- }
+ return rx_frm_cnt;
+}
+
+static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
+ struct enetc_tx_swbd *tx_swbd,
+ int frm_len)
+{
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(txbd);
+ txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
+ txbd->buf_len = cpu_to_le16(tx_swbd->len);
+ txbd->frm_len = cpu_to_le16(frm_len);
+
+ memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
+}
+
+/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
+ * descriptors.
+ */
+static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
+{
+ struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
+ int i, k, frm_len = tmp_tx_swbd->len;
+
+ if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
+ return false;
+
+ while (unlikely(!tmp_tx_swbd->is_eof)) {
+ tmp_tx_swbd++;
+ frm_len += tmp_tx_swbd->len;
+ }
+
+ i = tx_ring->next_to_use;
+
+ for (k = 0; k < num_tx_swbd; k++) {
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
+
+ enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
+
+ /* last BD needs 'F' bit set */
+ if (xdp_tx_swbd->is_eof) {
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ txbd->flags = ENETC_TXBD_FLAGS_F;
+ }
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr,
+ struct xdp_frame *xdp_frame)
+{
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
+ struct skb_shared_info *shinfo;
+ void *data = xdp_frame->data;
+ int len = xdp_frame->len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ unsigned int f;
+ int n = 0;
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+
+ shinfo = xdp_get_shared_info_from_frame(xdp_frame);
+
+ for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
+ f++, frag++) {
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ /* Undo the DMA mapping for all fragments */
+ while (--n >= 0)
+ enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
+
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+ }
+
+ xdp_tx_arr[n - 1].is_eof = true;
+ xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
+
+ return n;
+}
+
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int xdp_tx_bd_cnt, i, k;
+ int xdp_tx_frm_cnt = 0;
+
+ enetc_lock_mdio();
+
+ tx_ring = priv->xdp_tx_ring[smp_processor_id()];
- rx_ring->ndev->stats.rx_dropped++;
- rx_ring->ndev->stats.rx_errors++;
+ prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
+ for (k = 0; k < num_frames; k++) {
+ xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
+ xdp_redirect_arr,
+ frames[k]);
+ if (unlikely(xdp_tx_bd_cnt < 0))
+ break;
+
+ if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
+ xdp_tx_bd_cnt))) {
+ for (i = 0; i < xdp_tx_bd_cnt; i++)
+ enetc_unmap_tx_buff(tx_ring,
+ &xdp_redirect_arr[i]);
+ tx_ring->stats.xdp_tx_drops++;
break;
}
- /* not last BD in frame? */
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- bd_status = le32_to_cpu(rxbd->r.lstatus);
- size = ENETC_RXB_DMA_SIZE;
+ xdp_tx_frm_cnt++;
+ }
- if (bd_status & ENETC_RXBD_LSTATUS_F) {
- dma_rmb();
- size = le16_to_cpu(rxbd->r.buf_len);
- }
+ if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
+ enetc_update_tx_ring_tail(tx_ring);
+
+ tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
+
+ enetc_unlock_mdio();
+
+ return xdp_tx_frm_cnt;
+}
+
+static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ struct xdp_buff *xdp_buff, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ struct skb_shared_info *shinfo;
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
- enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+ xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
+ rx_ring->buffer_offset, size, false);
- cleaned_cnt++;
+ shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ shinfo->nr_frags = 0;
+}
+
+static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct xdp_buff *xdp_buff)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
+
+ skb_frag_off_set(frag, rx_swbd->page_offset);
+ skb_frag_size_set(frag, size);
+ __skb_frag_set_page(frag, rx_swbd->page);
+
+ shinfo->nr_frags++;
+}
+
+static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i,
+ int *cleaned_cnt, struct xdp_buff *xdp_buff)
+{
+ u16 size = le16_to_cpu((*rxbd)->r.buf_len);
+
+ xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
+
+ enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE_XDP;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
}
- rx_byte_cnt += skb->len;
+ enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+}
- enetc_process_skb(rx_ring, skb);
+/* Convert RX buffer descriptors to TX buffer descriptors. These will be
+ * recycled back into the RX ring in enetc_clean_tx_ring.
+ */
+static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
+ struct enetc_bdr *rx_ring,
+ int rx_ring_first, int rx_ring_last)
+{
+ int n = 0;
+
+ for (; rx_ring_first != rx_ring_last;
+ n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+ struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
+
+ /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
+ tx_swbd->dma = rx_swbd->dma;
+ tx_swbd->dir = rx_swbd->dir;
+ tx_swbd->page = rx_swbd->page;
+ tx_swbd->page_offset = rx_swbd->page_offset;
+ tx_swbd->len = rx_swbd->len;
+ tx_swbd->is_dma_page = true;
+ tx_swbd->is_xdp_tx = true;
+ tx_swbd->is_eof = false;
+ }
- napi_gro_receive(napi, skb);
+ /* We rely on caller providing an rx_ring_last > rx_ring_first */
+ xdp_tx_arr[n - 1].is_eof = true;
+
+ return n;
+}
+
+static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_put_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_drops++;
+}
+
+static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+
+ if (rx_swbd->page) {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_redirect_failures++;
+}
+
+static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit,
+ struct bpf_prog *prog)
+{
+ int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
+ struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ struct enetc_bdr *tx_ring;
+ int cleaned_cnt, i;
+ u32 xdp_act;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd, *orig_rxbd;
+ int orig_i, orig_cleaned_cnt;
+ struct xdp_buff xdp_buff;
+ struct sk_buff *skb;
+ int tmp_orig_i, err;
+ u32 bd_status;
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ orig_rxbd = rxbd;
+ orig_cleaned_cnt = cleaned_cnt;
+ orig_i = i;
+
+ enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, &xdp_buff);
+
+ xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+
+ switch (xdp_act) {
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ break;
+ case XDP_PASS:
+ rxbd = orig_rxbd;
+ cleaned_cnt = orig_cleaned_cnt;
+ i = orig_i;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
+ &i, &cleaned_cnt,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (unlikely(!skb))
+ goto out;
+
+ napi_gro_receive(napi, skb);
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+ rx_ring,
+ orig_i, i);
+
+ if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ } else {
+ tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ xdp_tx_frm_cnt++;
+ /* The XDP_TX enqueue was successful, so we
+ * need to scrub the RX software BDs because
+ * the ownership of the buffers no longer
+ * belongs to the RX ring, and we must prevent
+ * enetc_refill_rx_ring() from reusing
+ * rx_swbd->page.
+ */
+ while (orig_i != i) {
+ rx_ring->rx_swbd[orig_i].page = NULL;
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+ }
+ break;
+ case XDP_REDIRECT:
+ /* xdp_return_frame does not support S/G in the sense
+ * that it leaks the fragments (__xdp_return should not
+ * call page_frag_free only for the initial buffer).
+ * Until XDP_REDIRECT gains support for S/G let's keep
+ * the code structure in place, but dead. We drop the
+ * S/G frames ourselves to avoid memory leaks which
+ * would otherwise leave the kernel OOM.
+ */
+ if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_sg++;
+ break;
+ }
+
+ tmp_orig_i = orig_i;
+
+ while (orig_i != i) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[orig_i]);
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ if (unlikely(err)) {
+ enetc_xdp_free(rx_ring, tmp_orig_i, i);
+ } else {
+ xdp_redirect_frm_cnt++;
+ rx_ring->stats.xdp_redirect++;
+ }
+ }
rx_frm_cnt++;
}
+out:
rx_ring->next_to_clean = i;
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ if (xdp_redirect_frm_cnt)
+ xdp_do_flush_map();
+
+ if (xdp_tx_frm_cnt)
+ enetc_update_tx_ring_tail(tx_ring);
+
+ if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
+ rx_ring->xdp.xdp_tx_in_flight);
+
return rx_frm_cnt;
}
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ struct bpf_prog *prog;
+ bool complete = true;
+ int work_done;
+ int i;
+
+ enetc_lock_mdio();
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ prog = rx_ring->xdp.prog;
+ if (prog)
+ work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
+ else
+ work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+ if (work_done)
+ v->rx_napi_work = true;
+
+ if (!complete) {
+ enetc_unlock_mdio();
+ return budget;
+ }
+
+ napi_complete_done(napi, work_done);
+
+ if (likely(v->rx_dim_en))
+ enetc_rx_net_dim(v);
+
+ v->rx_napi_work = false;
+
+ /* enable interrupts */
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
+
+ return work_done;
+}
+
/* Probing and Init */
#define ENETC_MAX_RFS_SIZE 64
void enetc_get_si_caps(struct enetc_si *si)
@@ -836,7 +1509,7 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
int size, i;
for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+ enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
size = txr->bd_count * sizeof(union enetc_tx_bd);
@@ -953,7 +1626,7 @@ static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
}
tx_ring->next_to_clean = 0;
@@ -973,8 +1646,8 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
if (!rx_swbd->page)
continue;
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
@@ -995,60 +1668,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
enetc_free_tx_ring(priv->tx_ring[i]);
}
-int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
- GFP_KERNEL);
- if (!cbdr->bd_base)
- return -ENOMEM;
-
- /* h/w requires 128B alignment */
- if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- return -EINVAL;
- }
-
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
-
- return 0;
-}
-
-void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- cbdr->bd_base = NULL;
-}
-
-void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
-{
- /* set CBDR cache attributes */
- enetc_wr(hw, ENETC_SICAR2,
- ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
-
- enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
-
- enetc_wr(hw, ENETC_SICBDRPIR, 0);
- enetc_wr(hw, ENETC_SICBDRCIR, 0);
-
- /* enable ring */
- enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
-
- cbdr->pir = hw->reg + ENETC_SICBDRPIR;
- cbdr->cir = hw->reg + ENETC_SICBDRCIR;
-}
-
-void enetc_clear_cbdr(struct enetc_hw *hw)
-{
- enetc_wr(hw, ENETC_SICBDRMR, 0);
-}
-
static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
{
int *rss_table;
@@ -1108,45 +1727,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->bdr_int_num = cpus;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
-
- /* SI specific */
- si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
}
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
- int err;
-
- err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
- if (err)
- return err;
-
- enetc_setup_cbdr(&si->hw, &si->cbd_ring);
priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
GFP_KERNEL);
- if (!priv->cls_rules) {
- err = -ENOMEM;
- goto err_alloc_cls;
- }
+ if (!priv->cls_rules)
+ return -ENOMEM;
return 0;
-
-err_alloc_cls:
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
- return err;
}
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
- struct enetc_si *si = priv->si;
-
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
kfree(priv->cls_rules);
}
@@ -1199,7 +1795,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
- enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ if (rx_ring->xdp.prog)
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
+ else
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
@@ -1217,9 +1816,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
- /* update ENETC's consumer index */
- enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
+ enetc_unlock_mdio();
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
@@ -1408,6 +2007,29 @@ static int enetc_phylink_connect(struct net_device *ndev)
return 0;
}
+static void enetc_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct enetc_ndev_priv *priv;
+ struct sk_buff *skb;
+
+ priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+
+ netif_tx_lock(priv->ndev);
+
+ clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (skb)
+ enetc_start_xmit(skb, priv->ndev);
+
+ netif_tx_unlock(priv->ndev);
+}
+
+static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+{
+ INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
+ skb_queue_head_init(&priv->tx_skbs);
+}
+
void enetc_start(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1434,6 +2056,7 @@ void enetc_start(struct net_device *ndev)
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int num_stack_tx_queues;
int err;
err = enetc_setup_irqs(priv);
@@ -1452,7 +2075,9 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_alloc_rx;
- err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
if (err)
goto err_set_queues;
@@ -1460,6 +2085,7 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_set_queues;
+ enetc_tx_onestep_tstamp_init(priv);
enetc_setup_bdrs(priv);
enetc_start(ndev);
@@ -1524,15 +2150,17 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_bdr *tx_ring;
+ int num_stack_tx_queues;
u8 num_tc;
int i;
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
if (!num_tc) {
netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -1544,7 +2172,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
}
/* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > priv->num_tx_rings) {
+ if (num_tc > num_stack_tx_queues) {
netdev_err(ndev, "Max %d traffic classes supported\n",
priv->num_tx_rings);
return -EINVAL;
@@ -1590,6 +2218,54 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ bool is_up;
+ int i;
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ is_up = netif_running(dev);
+ if (is_up)
+ dev_close(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+
+ rx_ring->xdp.prog = prog;
+
+ if (prog)
+ rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
+ else
+ rx_ring->buffer_offset = ENETC_RXB_PAD;
+ }
+
+ if (is_up)
+ return dev_open(dev, extack);
+
+ return 0;
+}
+
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1710,11 +2386,16 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
priv->active_offloads |= ENETC_F_TX_TSTAMP;
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ break;
default:
return -ERANGE;
}
@@ -1745,7 +2426,9 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
config.flags = 0;
- if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
@@ -1777,8 +2460,9 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- int v_tx_rings;
+ int first_xdp_tx_ring;
int i, n, err, nvec;
+ int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -1806,6 +2490,28 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err) {
+ kfree(v);
+ goto fail;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ kfree(v);
+ goto fail;
+ }
+
/* init defaults for adaptive IC */
if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
v->rx_ictt = 0x1;
@@ -1820,11 +2526,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
int idx;
/* default tx ring mapping policy */
- if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
- idx = 2 * j + i; /* 2 CPUs */
- else
- idx = j + i * v_tx_rings; /* default */
-
+ idx = priv->bdr_int_num * j + i;
__set_bit(idx, &v->tx_rings_map);
bdr = &v->tx_ring[j];
bdr->index = idx;
@@ -1833,22 +2535,23 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
bdr->bd_count = priv->tx_bd_count;
priv->tx_ring[idx] = bdr;
}
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- priv->rx_ring[i] = bdr;
}
+ first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
+ priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
+
return 0;
fail:
while (i--) {
- netif_napi_del(&priv->int_vector[i]->napi);
- cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
- kfree(priv->int_vector[i]);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+ kfree(v);
}
pci_free_irq_vectors(pdev);
@@ -1862,7 +2565,10 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
for (i = 0; i < priv->bdr_int_num; i++) {
struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
netif_napi_del(&v->napi);
cancel_work_sync(&v->rx_dim.work);
}