summaryrefslogtreecommitdiff
path: root/net/xdp/xsk_buff_pool.c
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2024-10-07 14:24:53 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2024-10-14 17:22:38 +0200
commitb692bf9a7543af7ad11a59d182a3757578f0ba53 (patch)
tree5552d4412e8e07a324a94fcc358961f2ee8a3fbf /net/xdp/xsk_buff_pool.c
parenta8057ed23a368abad2e8b8f53438b98170a82ebc (diff)
xsk: Get rid of xdp_buff_xsk::xskb_list_node
Let's bring xdp_buff_xsk back to occupying 2 cachelines by removing xskb_list_node - for the purpose of gathering the xskb frags free_list_node can be used, head of the list (xsk_buff_pool::xskb_list) stays as-is, just reuse the node ptr. It is safe to do as a single xdp_buff_xsk can never reside in two pool's lists simultaneously. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/20241007122458.282590-2-maciej.fijalkowski@intel.com
Diffstat (limited to 'net/xdp/xsk_buff_pool.c')
-rw-r--r--net/xdp/xsk_buff_pool.c1
1 files changed, 0 insertions, 1 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 521a2938e50a..e5368db7d18e 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -102,7 +102,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
xskb->pool = pool;
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
INIT_LIST_HEAD(&xskb->free_list_node);
- INIT_LIST_HEAD(&xskb->xskb_list_node);
if (pool->unaligned)
pool->free_heads[i] = xskb;
else