summaryrefslogtreecommitdiff
path: root/net/core/page_pool.c
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2020-11-13 12:48:29 +0100
committerDaniel Borkmann <daniel@iogearbox.net>2020-11-14 02:29:00 +0100
commit7886244736a4dbb49987f330772842130493e050 (patch)
treeb77bc5437fc0787c4d38d58742c2f4294a20ca05 /net/core/page_pool.c
parent8965398713d831f6b893805880c249e62e9059ae (diff)
net: page_pool: Add bulk support for ptr_ring
Introduce the capability to batch page_pool ptr_ring refill since it is usually run inside the driver NAPI tx completion loop. Suggested-by: Jesper Dangaard Brouer <brouer@redhat.com> Co-developed-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Link: https://lore.kernel.org/bpf/08dd249c9522c001313f520796faa777c4089e1c.1605267335.git.lorenzo@kernel.org
Diffstat (limited to 'net/core/page_pool.c')
-rw-r--r--net/core/page_pool.c70
1 files changed, 60 insertions, 10 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ef98372facf6..f3c690b8c8e3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,8 @@
#include <linux/device.h>
#include <net/page_pool.h>
+#include <net/xdp.h>
+
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
*/
-void page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct)
+static __always_inline struct page *
+__page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
- if (allow_direct && in_serving_softirq())
- if (page_pool_recycle_in_cache(page, pool))
- return;
+ if (allow_direct && in_serving_softirq() &&
+ page_pool_recycle_in_cache(page, pool))
+ return NULL;
- if (!page_pool_recycle_in_ring(pool, page)) {
- /* Cache full, fallback to free pages */
- page_pool_return_page(pool, page);
- }
- return;
+ /* Page found as candidate for recycling */
+ return page;
}
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
/* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page);
put_page(page);
+
+ return NULL;
+}
+
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
+{
+ page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
+ if (page && !page_pool_recycle_in_ring(pool, page)) {
+ /* Cache full, fallback to free pages */
+ page_pool_return_page(pool, page);
+ }
}
EXPORT_SYMBOL(page_pool_put_page);
+/* Caller must not use data area after call, as this function overwrites it */
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+ int i, bulk_len = 0;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = virt_to_head_page(data[i]);
+
+ page = __page_pool_put_page(pool, page, -1, false);
+ /* Approved for bulk recycling in ptr_ring cache */
+ if (page)
+ data[bulk_len++] = page;
+ }
+
+ if (unlikely(!bulk_len))
+ return;
+
+ /* Bulk producer into ptr_ring page_pool cache */
+ page_pool_ring_lock(pool);
+ for (i = 0; i < bulk_len; i++) {
+ if (__ptr_ring_produce(&pool->ring, data[i]))
+ break; /* ring full */
+ }
+ page_pool_ring_unlock(pool);
+
+ /* Hopefully all pages was return into ptr_ring */
+ if (likely(i == bulk_len))
+ return;
+
+ /* ptr_ring cache full, free remaining pages outside producer lock
+ * since put_page() with refcnt == 1 can be an expensive operation
+ */
+ for (; i < bulk_len; i++)
+ page_pool_return_page(pool, data[i]);
+}
+EXPORT_SYMBOL(page_pool_put_page_bulk);
+
static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;