diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2021-10-27 12:38:36 -0700 |
---|---|---|
committer | Tony Nguyen <anthony.l.nguyen@intel.com> | 2021-12-15 08:46:28 -0800 |
commit | cc14db11c8a40f930fc1e4b254a37e0fce745236 (patch) | |
tree | 053e5a007c65310ad7a8e47c4af24fde75f0fa6c /drivers/net/ethernet | |
parent | 1c96c16858bacb8e77a506b9493a8e4e517b669b (diff) |
ice: use prefetch methods
The kernel provides some prefetch mechanisms to speed up commonly
cold cache line accesses during receive processing. Since these are
software structures it helps to have these strategically placed
prefetches.
Be careful to call BQL prefetch complete only for non XDP queues.
Co-developed-by: Piotr Raczynski <piotr.raczynski@intel.com>
Signed-off-by: Piotr Raczynski <piotr.raczynski@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 12a2edd13877..de9247d45c39 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -3,8 +3,9 @@ /* The driver transmit and receive code */ -#include <linux/prefetch.h> #include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/dsfield.h> #include <net/xdp.h> @@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; + /* get the bql data ready */ + if (!ice_ring_is_xdp(tx_ring)) + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); + tx_buf = &tx_ring->tx_buf[i]; tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) if (!eop_desc) break; + /* follow the guidelines of other drivers */ + prefetchw(&tx_buf->skb->users); + smp_rmb(); /* prevent any other reads prior to eop_desc */ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); @@ -2265,6 +2273,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) return NETDEV_TX_BUSY; } + /* prefetch for bql data which is infrequently used */ + netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); + offload.tx_ring = tx_ring; /* record the location of the first descriptor for this packet */ |