summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:24 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:04 +0200
commit9647c57b11e563f5b33a49ef72b347753917c21c (patch)
treeb0f10d39c70160b836327b848855499a0594931c /drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
parent8ef4e27eb3f03edfbfbe5657b8061f2a47757037 (diff)
xsk: i40e: ice: ixgbe: mlx5: Test for dma_need_sync earlier for better performance
Test for dma_need_sync earlier to increase performance. xsk_buff_dma_sync_for_cpu() takes an xdp_buff as parameter and from that the xsk_buff_pool reference is dug out. Perf shows that this dereference causes a lot of cache misses. But as the buffer pool is now sent down to the driver at zero-copy initialization time, we might as well use this pointer directly, instead of going via the xsk_buff and we can do so already in xsk_buff_dma_sync_for_cpu() instead of in xp_dma_sync_for_cpu. This gets rid of these cache misses. Throughput increases with 3% for the xdpsock l2fwd sample application on my machine. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-11-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index a33a1f762c70..902ce77d0178 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -48,7 +48,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt32;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
prefetch(xdp->data);
rcu_read_lock();
@@ -99,7 +99,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {