diff options
author | Alexander Lobakin <alexandr.lobakin@intel.com> | 2023-02-10 18:06:16 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2023-02-13 19:13:12 +0100 |
commit | aa1d3faf71a6a46f9b859daa8ffa5b86fa07217c (patch) | |
tree | 202149afecf648a168e8ff053015c447bef952fd /drivers/net/ethernet/intel/ice/ice_txrx.c | |
parent | 923096b5cec3a68cc4b3816b1b9a50139df62ac7 (diff) |
ice: Robustify cleaning/completing XDP Tx buffers
When queueing frames from a Page Pool for redirecting to a device backed
by the ice driver, `perf top` shows heavy load on page_alloc() and
page_frag_free(), despite that on a properly working system it must be
fully or at least almost zero-alloc. The problem is in fact a bit deeper
and raises from how ice cleans up completed Tx buffers.
The story so far: when cleaning/freeing the resources related to
a particular completed Tx frame (skbs, DMA mappings etc.), ice uses some
heuristics only without setting any type explicitly (except for dummy
Flow Director packets, which are marked via ice_tx_buf::tx_flags).
This kinda works, but only up to some point. For example, currently ice
assumes that each frame coming to __ice_xmit_xdp_ring(), is backed by
either plain order-0 page or plain page frag, while it may also be
backed by Page Pool or any other possible memory models introduced in
future. This means any &xdp_frame must be freed properly via
xdp_return_frame() family with no assumptions.
In order to do that, the whole heuristics must be replaced with setting
the Tx buffer/frame type explicitly, just how it's always been done via
an enum. Let us reuse 16 bits from ::tx_flags -- 1 bit-and instr won't
hurt much -- especially given that sometimes there was a check for
%ICE_TX_FLAGS_DUMMY_PKT, which is now turned from a flag to an enum
member. The rest of the changes is straightforward and most of it is
just a conversion to rely now on the type set in &ice_tx_buf rather than
to some secondary properties.
For now, no functional changes intended, the change only prepares the
ground for starting freeing XDP frames properly next step. And it must
be done atomically/synchronously to not break stuff.
Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/bpf/20230210170618.1973430-5-alexandr.lobakin@intel.com
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6b99adb695e7..d7e8a3f81e20 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | ICE_TX_DESC_CMD_RE; - tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; + tx_buf->type = ICE_TX_BUF_DUMMY; tx_buf->raw_buf = raw_packet; tx_desc->cmd_type_offset_bsz = @@ -112,28 +112,26 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (tx_buf->skb) { - if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) { - devm_kfree(ring->dev, tx_buf->raw_buf); - } else if (ice_ring_is_xdp(ring)) { - page_frag_free(tx_buf->raw_buf); - } else { - dev_kfree_skb_any(tx_buf->skb); - } - if (dma_unmap_len(tx_buf, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buf, len)) { + if (dma_unmap_len(tx_buf, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + switch (tx_buf->type) { + case ICE_TX_BUF_DUMMY: + devm_kfree(ring->dev, tx_buf->raw_buf); + break; + case ICE_TX_BUF_SKB: + dev_kfree_skb_any(tx_buf->skb); + break; + case ICE_TX_BUF_XDP_TX: + page_frag_free(tx_buf->raw_buf); + break; } tx_buf->next_to_watch = NULL; - tx_buf->skb = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; dma_unmap_len_set(tx_buf, len, 0); /* tx_buf must be completely set up in the transmit path */ } @@ -266,7 +264,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) DMA_TO_DEVICE); /* clear tx_buf data */ - tx_buf->skb = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; dma_unmap_len_set(tx_buf, len, 0); /* unmap remaining buffers */ @@ -1709,6 +1707,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, DMA_TO_DEVICE); tx_buf = &tx_ring->tx_buf[i]; + tx_buf->type = ICE_TX_BUF_FRAG; } /* record SW timestamp if HW timestamp is not available */ @@ -2352,6 +2351,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buf[tx_ring->next_to_use]; first->skb = skb; + first->type = ICE_TX_BUF_SKB; first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); first->gso_segs = 1; first->tx_flags = 0; @@ -2524,11 +2524,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) + if (tx_buf->type == ICE_TX_BUF_DUMMY) devm_kfree(tx_ring->dev, tx_buf->raw_buf); /* clear next_to_watch to prevent false hangs */ - tx_buf->raw_buf = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; tx_buf->tx_flags = 0; tx_buf->next_to_watch = NULL; dma_unmap_len_set(tx_buf, len, 0); |