summaryrefslogtreecommitdiff
path: root/tools/net/ynl/generated/netdev-user.h
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2023-11-28 15:48:42 +0100
committerPaolo Abeni <pabeni@redhat.com>2023-11-28 15:48:43 +0100
commita379972973a80924b1d03443e20f113ff76a94c7 (patch)
tree03d795a2591a1f3ce239993abf09790f3bc8a799 /tools/net/ynl/generated/netdev-user.h
parenta214724554aee8f6a5953dccab51ceff448c08cd (diff)
parent637567e4a3ef6f6a5ffa48781207d270265f7e68 (diff)
Merge branch 'net-page_pool-add-netlink-based-introspection'
Jakub Kicinski says: ==================== net: page_pool: add netlink-based introspection We recently started to deploy newer kernels / drivers at Meta, making significant use of page pools for the first time. We immediately run into page pool leaks both real and false positive warnings. As Eric pointed out/predicted there's no guarantee that applications will read / close their sockets so a page pool page may be stuck in a socket (but not leaked) forever. This happens a lot in our fleet. Most of these are obviously due to application bugs but we should not be printing kernel warnings due to minor application resource leaks. Conversely the page pool memory may get leaked at runtime, and we have no way to detect / track that, unless someone reconfigures the NIC and destroys the page pools which leaked the pages. The solution presented here is to expose the memory use of page pools via netlink. This allows for continuous monitoring of memory used by page pools, regardless if they were destroyed or not. Sample in patch 15 can print the memory use and recycling efficiency: $ ./page-pool eth0[2] page pools: 10 (zombies: 0) refs: 41984 bytes: 171966464 (refs: 0 bytes: 0) recycling: 90.3% (alloc: 656:397681 recycle: 89652:270201) v4: - use dev_net(netdev)->loopback_dev - extend inflight doc v3: https://lore.kernel.org/all/20231122034420.1158898-1-kuba@kernel.org/ - ID is still here, can't decide if it matters - rename destroyed -> detach-time, good enough? - fix build for netsec v2: https://lore.kernel.org/r/20231121000048.789613-1-kuba@kernel.org - hopefully fix build with PAGE_POOL=n v1: https://lore.kernel.org/all/20231024160220.3973311-1-kuba@kernel.org/ - The main change compared to the RFC is that the API now exposes outstanding references and byte counts even for "live" page pools. The warning is no longer printed if page pool is accessible via netlink. RFC: https://lore.kernel.org/all/20230816234303.3786178-1-kuba@kernel.org/ ==================== Link: https://lore.kernel.org/r/20231126230740.2148636-1-kuba@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'tools/net/ynl/generated/netdev-user.h')
-rw-r--r--tools/net/ynl/generated/netdev-user.h171
1 files changed, 171 insertions, 0 deletions
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
index 4fafac879df3..4093602c9b6c 100644
--- a/tools/net/ynl/generated/netdev-user.h
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -21,6 +21,16 @@ const char *netdev_xdp_act_str(enum netdev_xdp_act value);
const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value);
/* Common nested types */
+struct netdev_page_pool_info {
+ struct {
+ __u32 id:1;
+ __u32 ifindex:1;
+ } _present;
+
+ __u64 id;
+ __u32 ifindex;
+};
+
/* ============== NETDEV_CMD_DEV_GET ============== */
/* NETDEV_CMD_DEV_GET - do */
struct netdev_dev_get_req {
@@ -87,4 +97,165 @@ struct netdev_dev_get_ntf {
void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp);
+/* ============== NETDEV_CMD_PAGE_POOL_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_GET - do */
+struct netdev_page_pool_get_req {
+ struct {
+ __u32 id:1;
+ } _present;
+
+ __u64 id;
+};
+
+static inline struct netdev_page_pool_get_req *
+netdev_page_pool_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_page_pool_get_req));
+}
+void netdev_page_pool_get_req_free(struct netdev_page_pool_get_req *req);
+
+static inline void
+netdev_page_pool_get_req_set_id(struct netdev_page_pool_get_req *req, __u64 id)
+{
+ req->_present.id = 1;
+ req->id = id;
+}
+
+struct netdev_page_pool_get_rsp {
+ struct {
+ __u32 id:1;
+ __u32 ifindex:1;
+ __u32 napi_id:1;
+ __u32 inflight:1;
+ __u32 inflight_mem:1;
+ __u32 detach_time:1;
+ } _present;
+
+ __u64 id;
+ __u32 ifindex;
+ __u64 napi_id;
+ __u64 inflight;
+ __u64 inflight_mem;
+ __u64 detach_time;
+};
+
+void netdev_page_pool_get_rsp_free(struct netdev_page_pool_get_rsp *rsp);
+
+/*
+ * Get / dump information about Page Pools.
+(Only Page Pools associated with a net_device can be listed.)
+
+ */
+struct netdev_page_pool_get_rsp *
+netdev_page_pool_get(struct ynl_sock *ys, struct netdev_page_pool_get_req *req);
+
+/* NETDEV_CMD_PAGE_POOL_GET - dump */
+struct netdev_page_pool_get_list {
+ struct netdev_page_pool_get_list *next;
+ struct netdev_page_pool_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_page_pool_get_list_free(struct netdev_page_pool_get_list *rsp);
+
+struct netdev_page_pool_get_list *
+netdev_page_pool_get_dump(struct ynl_sock *ys);
+
+/* NETDEV_CMD_PAGE_POOL_GET - notify */
+struct netdev_page_pool_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct netdev_page_pool_get_ntf *ntf);
+ struct netdev_page_pool_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_page_pool_get_ntf_free(struct netdev_page_pool_get_ntf *rsp);
+
+/* ============== NETDEV_CMD_PAGE_POOL_STATS_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - do */
+struct netdev_page_pool_stats_get_req {
+ struct {
+ __u32 info:1;
+ } _present;
+
+ struct netdev_page_pool_info info;
+};
+
+static inline struct netdev_page_pool_stats_get_req *
+netdev_page_pool_stats_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_page_pool_stats_get_req));
+}
+void
+netdev_page_pool_stats_get_req_free(struct netdev_page_pool_stats_get_req *req);
+
+static inline void
+netdev_page_pool_stats_get_req_set_info_id(struct netdev_page_pool_stats_get_req *req,
+ __u64 id)
+{
+ req->_present.info = 1;
+ req->info._present.id = 1;
+ req->info.id = id;
+}
+static inline void
+netdev_page_pool_stats_get_req_set_info_ifindex(struct netdev_page_pool_stats_get_req *req,
+ __u32 ifindex)
+{
+ req->_present.info = 1;
+ req->info._present.ifindex = 1;
+ req->info.ifindex = ifindex;
+}
+
+struct netdev_page_pool_stats_get_rsp {
+ struct {
+ __u32 info:1;
+ __u32 alloc_fast:1;
+ __u32 alloc_slow:1;
+ __u32 alloc_slow_high_order:1;
+ __u32 alloc_empty:1;
+ __u32 alloc_refill:1;
+ __u32 alloc_waive:1;
+ __u32 recycle_cached:1;
+ __u32 recycle_cache_full:1;
+ __u32 recycle_ring:1;
+ __u32 recycle_ring_full:1;
+ __u32 recycle_released_refcnt:1;
+ } _present;
+
+ struct netdev_page_pool_info info;
+ __u64 alloc_fast;
+ __u64 alloc_slow;
+ __u64 alloc_slow_high_order;
+ __u64 alloc_empty;
+ __u64 alloc_refill;
+ __u64 alloc_waive;
+ __u64 recycle_cached;
+ __u64 recycle_cache_full;
+ __u64 recycle_ring;
+ __u64 recycle_ring_full;
+ __u64 recycle_released_refcnt;
+};
+
+void
+netdev_page_pool_stats_get_rsp_free(struct netdev_page_pool_stats_get_rsp *rsp);
+
+/*
+ * Get page pool statistics.
+ */
+struct netdev_page_pool_stats_get_rsp *
+netdev_page_pool_stats_get(struct ynl_sock *ys,
+ struct netdev_page_pool_stats_get_req *req);
+
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - dump */
+struct netdev_page_pool_stats_get_list {
+ struct netdev_page_pool_stats_get_list *next;
+ struct netdev_page_pool_stats_get_rsp obj __attribute__((aligned(8)));
+};
+
+void
+netdev_page_pool_stats_get_list_free(struct netdev_page_pool_stats_get_list *rsp);
+
+struct netdev_page_pool_stats_get_list *
+netdev_page_pool_stats_get_dump(struct ynl_sock *ys);
+
#endif /* _LINUX_NETDEV_GEN_H */