summaryrefslogtreecommitdiff
path: root/net/core/page_pool.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-04-19 11:20:06 -0700
committerJakub Kicinski <kuba@kernel.org>2023-04-20 19:13:37 -0700
commitdd64b232deb8d48812a2ea739d1fedaeaffb59ed (patch)
tree6c10b69c274d4057732e42ee8946f0479b8897e2 /net/core/page_pool.c
parent4bb7aac70b5d8a4bddf4ee0791b834f9f56883d2 (diff)
page_pool: unlink from napi during destroy
Jesper points out that we must prevent recycling into cache after page_pool_destroy() is called, because page_pool_destroy() is not synchronized with recycling (some pages may still be outstanding when destroy() gets called). I assumed this will not happen because NAPI can't be scheduled if its page pool is being destroyed. But I missed the fact that NAPI may get reused. For instance when user changes ring configuration driver may allocate a new page pool, stop NAPI, swap, start NAPI, and then destroy the old pool. The NAPI is running so old page pool will think it can recycle to the cache, but the consumer at that point is the destroy() path, not NAPI. To avoid extra synchronization let the drivers do "unlinking" during the "swap" stage while NAPI is indeed disabled. Fixes: 8c48eea3adf3 ("page_pool: allow caching from safely localized NAPI") Reported-by: Jesper Dangaard Brouer <jbrouer@redhat.com> Link: https://lore.kernel.org/all/e8df2654-6a5b-3c92-489d-2fe5e444135f@redhat.com/ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Link: https://lore.kernel.org/r/20230419182006.719923-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/page_pool.c')
-rw-r--r--net/core/page_pool.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 97f20f7ff4fc..e212e9d7edcb 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -839,6 +839,21 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
+void page_pool_unlink_napi(struct page_pool *pool)
+{
+ if (!pool->p.napi)
+ return;
+
+ /* To avoid races with recycling and additional barriers make sure
+ * pool and NAPI are unlinked when NAPI is disabled.
+ */
+ WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
+ READ_ONCE(pool->p.napi->list_owner) != -1);
+
+ WRITE_ONCE(pool->p.napi, NULL);
+}
+EXPORT_SYMBOL(page_pool_unlink_napi);
+
void page_pool_destroy(struct page_pool *pool)
{
if (!pool)
@@ -847,6 +862,7 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
+ page_pool_unlink_napi(pool);
page_pool_free_frag(pool);
if (!page_pool_release(pool))
@@ -900,7 +916,7 @@ bool page_pool_return_skb_page(struct page *page, bool napi_safe)
* in the same context as the consumer would run, so there's
* no possible race.
*/
- napi = pp->p.napi;
+ napi = READ_ONCE(pp->p.napi);
allow_direct = napi_safe && napi &&
READ_ONCE(napi->list_owner) == smp_processor_id();