diff options
author | Mina Almasry <almasrymina@google.com> | 2024-09-10 17:14:50 +0000 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-09-11 20:44:31 -0700 |
commit | 0f921404689398943257793f7240db239a23b609 (patch) | |
tree | f3d5b954252652f9f0de55644b6c330764784025 /net/core/page_pool_user.c | |
parent | 8ab79ed50cf10f338465c296012500de1081646f (diff) |
memory-provider: dmabuf devmem memory provider
Implement a memory provider that allocates dmabuf devmem in the form of
net_iov.
The provider receives a reference to the struct netdev_dmabuf_binding
via the pool->mp_priv pointer. The driver needs to set this pointer for
the provider in the net_iov.
The provider obtains a reference on the netdev_dmabuf_binding which
guarantees the binding and the underlying mapping remains alive until
the provider is destroyed.
Usage of PP_FLAG_DMA_MAP is required for this memory provide such that
the page_pool can provide the driver with the dma-addrs of the devmem.
Support for PP_FLAG_DMA_SYNC_DEV is omitted for simplicity & p.order !=
0.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Kaiyuan Zhang <kaiyuanz@google.com>
Signed-off-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20240910171458.219195-7-almasrymina@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/page_pool_user.c')
-rw-r--r-- | net/core/page_pool_user.c | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c index 3a3277ba167b..cd6267ba6fa3 100644 --- a/net/core/page_pool_user.c +++ b/net/core/page_pool_user.c @@ -4,8 +4,9 @@ #include <linux/netdevice.h> #include <linux/xarray.h> #include <net/net_debug.h> -#include <net/page_pool/types.h> +#include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> +#include <net/page_pool/types.h> #include <net/sock.h> #include "page_pool_priv.h" @@ -344,6 +345,30 @@ void page_pool_unlist(struct page_pool *pool) mutex_unlock(&page_pools_lock); } +int page_pool_check_memory_provider(struct net_device *dev, + struct netdev_rx_queue *rxq) +{ + struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv; + struct page_pool *pool; + struct hlist_node *n; + + if (!binding) + return 0; + + mutex_lock(&page_pools_lock); + hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { + if (pool->mp_priv != binding) + continue; + + if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { + mutex_unlock(&page_pools_lock); + return 0; + } + } + mutex_unlock(&page_pools_lock); + return -ENODATA; +} + static void page_pool_unreg_netdev_wipe(struct net_device *netdev) { struct page_pool *pool; |