summaryrefslogtreecommitdiff
path: root/include/net/page_pool
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-11-26 15:07:30 -0800
committerPaolo Abeni <pabeni@redhat.com>2023-11-28 15:48:39 +0100
commit083772c9f972dcc248913b52a0dec1025baa1e16 (patch)
tree571696450af19199654e4531877b3bd6afd258ff /include/net/page_pool
parentf17c69649c698e4df3cfe0010b7bbf142dec3e40 (diff)
net: page_pool: record pools per netdev
Link the page pools with netdevs. This needs to be netns compatible so we have two options. Either we record the pools per netns and have to worry about moving them as the netdev gets moved. Or we record them directly on the netdev so they move with the netdev without any extra work. Implement the latter option. Since pools may outlast netdev we need a place to store orphans. In time honored tradition use loopback for this purpose. Reviewed-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'include/net/page_pool')
-rw-r--r--include/net/page_pool/types.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index c19f0df3bf0b..b258a571201e 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -5,6 +5,7 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
+#include <linux/types.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -48,6 +49,7 @@ struct pp_alloc_cache {
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
@@ -66,6 +68,7 @@ struct page_pool_params {
unsigned int offset;
);
struct_group_tagged(page_pool_params_slow, slow,
+ struct net_device *netdev;
/* private: used by test code only */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
@@ -189,6 +192,7 @@ struct page_pool {
struct page_pool_params_slow slow;
/* User-facing fields, protected by page_pools_lock */
struct {
+ struct hlist_node list;
u32 id;
} user;
};