summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1112
1 files changed, 487 insertions, 625 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 086f0b3ab68d..ad76768a4232 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -7,6 +7,8 @@
#include <linux/netdevice.h>
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/xdp.h>
@@ -20,7 +22,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -85,7 +86,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
ICE_TX_DESC_CMD_RE;
- tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->type = ICE_TX_BUF_DUMMY;
tx_buf->raw_buf = raw_packet;
tx_desc->cmd_type_offset_bsz =
@@ -112,27 +113,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
- devm_kfree(ring->dev, tx_buf->raw_buf);
- else if (ice_ring_is_xdp(ring))
- page_frag_free(tx_buf->raw_buf);
- else
- dev_kfree_skb_any(tx_buf->skb);
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buf, len)) {
+ if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_DUMMY:
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_SKB:
+ dev_kfree_skb_any(tx_buf->skb);
+ break;
+ case ICE_TX_BUF_XDP_TX:
+ libeth_xdp_return_va(tx_buf->raw_buf, false);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame(tx_buf->xdpf);
+ break;
}
tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* tx_buf must be completely set up in the transmit path */
}
@@ -143,6 +146,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -174,14 +227,15 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
- tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -267,7 +321,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buf data */
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* unmap remaining buffers */
@@ -333,6 +387,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -376,60 +508,67 @@ err:
return -ENOMEM;
}
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
+{
+ struct libeth_fq fq = {
+ .fqes = rq->rx_fqes,
+ .pp = rq->pp,
+ };
+
+ libeth_rx_fq_destroy(&fq);
+ rq->rx_fqes = NULL;
+ rq->pp = NULL;
+
+ if (!rq->hdr_pp)
+ return;
+
+ fq.fqes = rq->hdr_fqes;
+ fq.pp = rq->hdr_pp;
+
+ libeth_rx_fq_destroy(&fq);
+ rq->hdr_fqes = NULL;
+ rq->hdr_pp = NULL;
+}
+
/**
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
u32 size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buf)
- return;
-
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_fqes)
+ return;
+
+ libeth_xdp_return_stash(&rx_ring->xdp);
+
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem);
- if (!rx_buf->page)
- continue;
+ if (rx_ring->hdr_pp)
+ libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(dev, rx_buf->dma,
- rx_buf->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
-
- rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
-rx_skip_free:
- if (rx_ring->xsk_pool)
- memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
- else
- memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
+ xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+ ice_rxq_pp_destroy(rx_ring);
+rx_skip_free:
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -448,26 +587,20 @@ rx_skip_free:
*/
void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
ice_clean_rx_ring(rx_ring);
- if (rx_ring->vsi->type == ICE_VSI_PF)
- if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- rx_ring->xdp_prog = NULL;
+ WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- } else {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
}
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
- dmam_free_coherent(rx_ring->dev, size,
- rx_ring->desc, rx_ring->dma);
+ dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
}
@@ -480,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
*/
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
- if (!dev)
- return -ENOMEM;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_buf);
- rx_ring->rx_buf =
- kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- if (!rx_ring->rx_buf)
- return -ENOMEM;
-
/* round up to nearest page */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -501,7 +624,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_use = 0;
@@ -510,33 +633,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
- if (rx_ring->vsi->type == ICE_VSI_PF &&
- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
- goto err;
return 0;
-
-err:
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- return -ENOMEM;
-}
-
-static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
-{
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
- truesize = rx_ring->rx_offset ?
- SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
- SKB_DATA_ALIGN(size);
-#endif
- return truesize;
}
/**
@@ -545,34 +642,41 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static int
-ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+static u32
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ union ice_32b_rx_flex_desc *eop_desc)
{
- int err;
+ unsigned int ret = ICE_XDP_PASS;
u32 act;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (!xdp_prog)
+ goto exit;
+
+ xdp->desc = eop_desc;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp->base);
switch (act) {
case XDP_PASS:
- return ICE_XDP_PASS;
+ break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
- if (err == ICE_XDP_CONSUMED)
+ if (ret == ICE_XDP_CONSUMED)
goto out_failure;
- return err;
+ break;
case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
+ if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog))
goto out_failure;
- return ICE_XDP_REDIR;
+ ret = ICE_XDP_REDIR;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
@@ -581,8 +685,31 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- return ICE_XDP_CONSUMED;
+ libeth_xdp_return_buff(xdp);
+ ret = ICE_XDP_CONSUMED;
}
+
+exit:
+ return ret;
+}
+
+/**
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
+ */
+static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
+ struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_buff xdp;
+
+ xdp.data_hard_start = (void *)xdpf;
+ xdp.data = xdpf->data;
+ xdp.data_end = xdp.data + xdpf->len;
+ xdp.frame_sz = xdpf->frame_sz;
+ xdp.flags = xdpf->flags;
+
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
}
/**
@@ -605,6 +732,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *xdp_ring;
+ struct ice_tx_buf *tx_buf;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -627,16 +755,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
}
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ const struct xdp_frame *xdpf = frames[i];
int err;
- err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ err = ice_xmit_xdp_ring(xdpf, xdp_ring);
if (err != ICE_XDP_TX)
break;
nxmit++;
}
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
@@ -647,50 +777,34 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
/**
- * ice_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buf struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
*/
-static bool
-ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
- /* alloc new page for storage */
- page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ if (!count)
+ return;
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = rx_ring->rx_offset;
- page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
- return true;
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
}
/**
@@ -706,43 +820,62 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
+ const struct libeth_fq_fp hdr_fq = {
+ .pp = rx_ring->hdr_pp,
+ .fqes = rx_ring->hdr_fqes,
+ .truesize = rx_ring->hdr_truesize,
+ .count = rx_ring->count,
+ };
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_buf[ntu];
do {
- /* if we fail here, we have work remaining */
- if (!ice_alloc_mapped_page(rx_ring, bi))
- break;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ break;
+ }
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
+
+ if (!hdr_fq.pp)
+ goto next;
+
+ addr = libeth_rx_alloc(&hdr_fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+
+ libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
+ break;
+ }
+ rx_desc->read.hdr_addr = cpu_to_le64(addr);
+
+next:
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_buf;
ntu = 0;
}
@@ -759,341 +892,41 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
}
/**
- * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
- * @rx_buf: Rx buffer to adjust
- * @size: Size of adjustment
- *
- * Update the offset within page so that Rx buf will be ready to be reused.
- * For systems with PAGE_SIZE < 8192 this function will flip the page offset
- * so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by "size" bytes
- */
-static void
-ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
-{
-#if (PAGE_SIZE < 8192)
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= size;
-#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += size;
-#endif
-}
-
-/**
- * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
- * @rx_buf: buffer containing the page
- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
- *
- * If page is reusable, we have a green light for calling ice_reuse_rx_page,
- * which will assign the current buffer to the buffer that next_to_alloc is
- * pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
- */
-static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
-{
- unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
- struct page *page = rx_buf->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
- return false;
-#else
-#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
- if (rx_buf->page_offset > ICE_LAST_OFFSET)
- return false;
-#endif /* PAGE_SIZE < 8192) */
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buf->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
- */
-static void
-ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct sk_buff *skb, unsigned int size)
-{
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
-
- if (!size)
- return;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
-}
-
-/**
- * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: Rx descriptor ring to store buffers on
- * @old_buf: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- */
-static void
-ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* Transfer page from old buffer to new buffer.
- * Move each member individually to avoid possible store
- * forwarding stalls and unnecessary copy of skb.
- */
- new_buf->dma = old_buf->dma;
- new_buf->page = old_buf->page;
- new_buf->page_offset = old_buf->page_offset;
- new_buf->pagecnt_bias = old_buf->pagecnt_bias;
-}
-
-/**
- * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
- * @rx_ring: Rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- * @rx_buf_pgcnt: rx_buf page refcount
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
*
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
*/
-static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- int *rx_buf_pgcnt)
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- *rx_buf_pgcnt =
-#if (PAGE_SIZE < 8192)
- page_count(rx_buf->page);
-#else
- 0;
-#endif
- prefetchw(rx_buf->page);
-
- if (!size)
- return rx_buf;
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset, size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buf->pagecnt_bias--;
-
- return rx_buf;
-}
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
-/**
- * ice_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- * @xdp: xdp_buff pointing to the data
- *
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
- */
-static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
-{
- u8 metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
- struct sk_buff *skb;
-
- /* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points exactly as xdp->data, otherwise we
- * likely have a consumer accessing first few bytes of meta
- * data, and then actual data.
- */
- net_prefetch(xdp->data_meta);
- /* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, truesize);
- if (unlikely(!skb))
- return NULL;
-
- /* must to record Rx queue, otherwise OS features such as
- * symmetric queue won't work
- */
- skb_record_rx_queue(skb, rx_ring->q_index);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- __skb_put(skb, xdp->data_end - xdp->data);
- if (metasize)
- skb_metadata_set(skb, metasize);
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
- return skb;
-}
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
-/**
- * ice_construct_skb - Allocate skb and populate it
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- * @xdp: xdp_buff pointing to the data
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
-{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int size = xdp->data_end - xdp->data;
- unsigned int headlen;
- struct sk_buff *skb;
+ dma_rmb();
- /* prefetch first cache line of first page */
- net_prefetch(xdp->data_meta);
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE + metasize,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- skb_record_rx_queue(skb, rx_ring->q_index);
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
- ALIGN(headlen + metasize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (size) {
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
- skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size, truesize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
- } else {
- /* buffer is unused, reset bias back to rx_buf; data was copied
- * onto skb's linear part so there's no need for adjusting
- * page offset and we can reuse this buffer as-is
- */
- rx_buf->pagecnt_bias++;
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
}
- return skb;
-}
-
-/**
- * ice_put_rx_buf - Clean up used buffer and either recycle or free
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
- *
- * This function will update next_to_clean and then clean up the contents
- * of the rx_buf. It will either recycle the buffer or unmap it and free
- * the associated resources.
- */
-static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- int rx_buf_pgcnt)
-{
- u16 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
-
- if (!rx_buf)
- return;
-
- if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
- ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
- ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
- }
-
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
-}
-
-/**
- * ice_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * If the buffer is an EOP buffer, this function exits returning false,
- * otherwise return true indicating that this is in fact a non-EOP buffer.
- */
-static bool
-ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
-{
- /* if we are the last buffer then there is nothing else to do */
-#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
- return false;
-
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
-
- return true;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring));
}
/**
@@ -1108,46 +941,44 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
- unsigned int offset = rx_ring->rx_offset;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
struct ice_tx_ring *xdp_ring = NULL;
- unsigned int xdp_res, xdp_xmit = 0;
- struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
- struct xdp_buff xdp;
+ u32 ntc = rx_ring->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ u32 cached_ntu, xdp_verdict;
+ u32 cnt = rx_ring->count;
+ u32 xdp_xmit = 0;
bool failure;
- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
-#if (PAGE_SIZE < 8192)
- frame_sz = ice_rx_frame_truesize(rx_ring, 0);
-#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (xdp_prog)
+ if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
+ cached_ntu = xdp_ring->next_to_use;
+ }
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
- struct ice_rx_buf *rx_buf;
- unsigned char *hard_start;
+ struct libeth_fqe *rx_buf;
+ struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- int rx_buf_pgcnt;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
+ bool rxe;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ /*
+ * The DD bit will always be zero for unused descriptors
+ * because it's cleared in cleanup or when setting the DMA
+ * address of the header buffer, which never uses the DD bit.
+ * If the hardware wrote the descriptor, it will be non-zero.
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -1160,120 +991,88 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- ice_put_rx_buf(rx_ring, NULL, 0);
- cleaned_count++;
- continue;
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ rxe = ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits);
+
+ if (!rx_ring->hdr_pp)
+ goto payload;
+
+ size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1,
+ ICE_RX_FLEX_DESC_HDR_LEN_M);
+ if (unlikely(rxe))
+ size = 0;
+
+ rx_buf = &rx_ring->hdr_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
+ rx_buf->netmem = 0;
+
+payload:
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (unlikely(rxe))
+ size = 0;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
+ rx_buf = &rx_ring->rx_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
- if (!size) {
- xdp.data = NULL;
- xdp.data_end = NULL;
- xdp.data_hard_start = NULL;
- xdp.data_meta = NULL;
- goto construct_skb;
- }
+ if (++ntc == cnt)
+ ntc = 0;
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
-#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
-#endif
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data))
+ continue;
- if (!xdp_prog)
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
- if (!xdp_res)
- goto construct_skb;
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
- } else {
- rx_buf->pagecnt_bias++;
- }
- total_rx_bytes += size;
+ if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
+ xdp_xmit |= xdp_verdict;
+ total_rx_bytes += xdp_get_buff_len(&xdp->base);
total_rx_pkts++;
- cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
+ xdp->data = NULL;
continue;
+
construct_skb:
- if (skb) {
- ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- } else if (likely(xdp.data)) {
- if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- }
+ skb = xdp_build_skb_from_buff(&xdp->base);
+ xdp->data = NULL;
+
/* exit if we failed to retrieve a buffer */
if (!skb) {
+ libeth_xdp_return_buff_slow(xdp);
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- if (rx_buf)
- rx_buf->pagecnt_bias++;
- break;
- }
-
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
- cleaned_count++;
-
- /* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
- continue;
-
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
- stat_err_bits))) {
- dev_kfree_skb_any(skb);
continue;
}
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
-
- /* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
+ vlan_tci = ice_get_vlan_tci(rx_desc);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
/* populate checksum, VLAN, and protocol */
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
-
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
- ice_receive_skb(rx_ring, skb, vlan_tag);
- skb = NULL;
+ ice_receive_skb(rx_ring, skb, vlan_tci);
/* update budget accounting */
total_rx_pkts++;
}
+ rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring));
+
+ if (xdp_xmit)
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
- if (xdp_prog)
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
- rx_ring->skb = skb;
+ libeth_xdp_save_buff(&rx_ring->xdp, xdp);
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
@@ -1346,14 +1145,14 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct dim_sample dim_sample;
__ice_update_sample(q_vector, tx, &dim_sample, true);
- net_dim(&tx->dim, dim_sample);
+ net_dim(&tx->dim, &dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
struct dim_sample dim_sample;
__ice_update_sample(q_vector, rx, &dim_sample, false);
- net_dim(&rx->dim, dim_sample);
+ net_dim(&rx->dim, &dim_sample);
}
}
@@ -1446,9 +1245,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
* be static in non-adaptive mode (user configured)
*/
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
- GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
- GLINT_DYN_CTL_WB_ON_ITR_M);
+ FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
+ FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
+ FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
q_vector->wb_on_itr = true;
}
@@ -1476,10 +1275,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd;
- if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring);
+ if (xsk_pool)
+ wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1505,6 +1305,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned;
/* A dedicated path for zero-copy allows making a single
@@ -1512,7 +1313,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
@@ -1616,8 +1417,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
- td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ td_tag = first->vid;
}
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1682,6 +1482,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
DMA_TO_DEVICE);
tx_buf = &tx_ring->tx_buf[i];
+ tx_buf->type = ICE_TX_BUF_FRAG;
}
/* record SW timestamp if HW timestamp is not available */
@@ -1714,10 +1515,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
@@ -1745,6 +1582,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1894,6 +1732,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -1949,7 +1811,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
* VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
- first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+ first->vid = skb_vlan_tag_get(skb);
if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
@@ -1996,7 +1858,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
- /* cppcheck-suppress unreadVariable */
protocol = vlan_get_protocol(skb);
if (eth_p_mpls(protocol))
@@ -2033,8 +1894,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/* reset pointers to inner headers */
-
- /* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@@ -2261,9 +2120,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
@@ -2300,6 +2156,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto out_drop;
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2328,6 +2187,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
+ first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->tx_flags = 0;
@@ -2338,8 +2198,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_IL2TAG2 <<
ICE_TXD_CTX_QW1_CMD_S));
- offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ offload.cd_l2tag2 = first->vid;
}
/* set up TSO offload */
@@ -2354,17 +2213,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
+
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
+ ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
- ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2378,7 +2240,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
@@ -2500,11 +2362,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->type == ICE_TX_BUF_DUMMY)
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
/* clear next_to_watch to prevent false hangs */
- tx_buf->raw_buf = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
tx_buf->tx_flags = 0;
tx_buf->next_to_watch = NULL;
dma_unmap_len_set(tx_buf, len, 0);