summaryrefslogtreecommitdiff
path: root/net/core/page_pool.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-11-20 16:00:35 -0800
committerJakub Kicinski <kuba@kernel.org>2023-11-21 17:22:30 -0800
commit2da0cac1e9494f34c5a3438e5c4c7e662e1b7445 (patch)
tree7426b591fcfd20c14af1a71aec38adce9086ab25 /net/core/page_pool.c
parent5027ec19f1049a07df5b0a37b1f462514cf2724b (diff)
net: page_pool: avoid touching slow on the fastpath
To fully benefit from previous commit add one byte of state in the first cache line recording if we need to look at the slow part. The packing isn't all that impressive right now, we create a 7B hole. I'm expecting Olek's rework will reshuffle this, anyway. Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Reviewed-by: Mina Almasry <almasrymina@google.com> Link: https://lore.kernel.org/r/20231121000048.789613-3-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/page_pool.c')
-rw-r--r--net/core/page_pool.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ab22a2fdae57..df2a06d7da52 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -212,6 +212,8 @@ static int page_pool_init(struct page_pool *pool,
*/
}
+ pool->has_init_callback = !!pool->slow.init_callback;
+
#ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats)
@@ -389,7 +391,7 @@ static void page_pool_set_pp_info(struct page_pool *pool,
* the overhead is negligible.
*/
page_pool_fragment_page(page, 1);
- if (pool->slow.init_callback)
+ if (pool->has_init_callback)
pool->slow.init_callback(page, pool->slow.init_arg);
}