diff options
| author | Paolo Abeni <pabeni@redhat.com> | 2023-11-28 15:48:42 +0100 |
|---|---|---|
| committer | Paolo Abeni <pabeni@redhat.com> | 2023-11-28 15:48:43 +0100 |
| commit | a379972973a80924b1d03443e20f113ff76a94c7 (patch) | |
| tree | 03d795a2591a1f3ce239993abf09790f3bc8a799 /tools/net/ynl/generated/netdev-user.c | |
| parent | a214724554aee8f6a5953dccab51ceff448c08cd (diff) | |
| parent | 637567e4a3ef6f6a5ffa48781207d270265f7e68 (diff) | |
Merge branch 'net-page_pool-add-netlink-based-introspection'
Jakub Kicinski says:
====================
net: page_pool: add netlink-based introspection
We recently started to deploy newer kernels / drivers at Meta,
making significant use of page pools for the first time.
We immediately run into page pool leaks both real and false positive
warnings. As Eric pointed out/predicted there's no guarantee that
applications will read / close their sockets so a page pool page
may be stuck in a socket (but not leaked) forever. This happens
a lot in our fleet. Most of these are obviously due to application
bugs but we should not be printing kernel warnings due to minor
application resource leaks.
Conversely the page pool memory may get leaked at runtime, and
we have no way to detect / track that, unless someone reconfigures
the NIC and destroys the page pools which leaked the pages.
The solution presented here is to expose the memory use of page
pools via netlink. This allows for continuous monitoring of memory
used by page pools, regardless if they were destroyed or not.
Sample in patch 15 can print the memory use and recycling
efficiency:
$ ./page-pool
eth0[2] page pools: 10 (zombies: 0)
refs: 41984 bytes: 171966464 (refs: 0 bytes: 0)
recycling: 90.3% (alloc: 656:397681 recycle: 89652:270201)
v4:
- use dev_net(netdev)->loopback_dev
- extend inflight doc
v3: https://lore.kernel.org/all/20231122034420.1158898-1-kuba@kernel.org/
- ID is still here, can't decide if it matters
- rename destroyed -> detach-time, good enough?
- fix build for netsec
v2: https://lore.kernel.org/r/20231121000048.789613-1-kuba@kernel.org
- hopefully fix build with PAGE_POOL=n
v1: https://lore.kernel.org/all/20231024160220.3973311-1-kuba@kernel.org/
- The main change compared to the RFC is that the API now exposes
outstanding references and byte counts even for "live" page pools.
The warning is no longer printed if page pool is accessible via netlink.
RFC: https://lore.kernel.org/all/20230816234303.3786178-1-kuba@kernel.org/
====================
Link: https://lore.kernel.org/r/20231126230740.2148636-1-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'tools/net/ynl/generated/netdev-user.c')
| -rw-r--r-- | tools/net/ynl/generated/netdev-user.c | 419 |
1 files changed, 419 insertions, 0 deletions
diff --git a/tools/net/ynl/generated/netdev-user.c b/tools/net/ynl/generated/netdev-user.c index b5ffe8cd1144..a7b7019d00f1 100644 --- a/tools/net/ynl/generated/netdev-user.c +++ b/tools/net/ynl/generated/netdev-user.c @@ -18,6 +18,11 @@ static const char * const netdev_op_strmap[] = { [NETDEV_CMD_DEV_ADD_NTF] = "dev-add-ntf", [NETDEV_CMD_DEV_DEL_NTF] = "dev-del-ntf", [NETDEV_CMD_DEV_CHANGE_NTF] = "dev-change-ntf", + [NETDEV_CMD_PAGE_POOL_GET] = "page-pool-get", + [NETDEV_CMD_PAGE_POOL_ADD_NTF] = "page-pool-add-ntf", + [NETDEV_CMD_PAGE_POOL_DEL_NTF] = "page-pool-del-ntf", + [NETDEV_CMD_PAGE_POOL_CHANGE_NTF] = "page-pool-change-ntf", + [NETDEV_CMD_PAGE_POOL_STATS_GET] = "page-pool-stats-get", }; const char *netdev_op_str(int op) @@ -59,6 +64,16 @@ const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value) } /* Policies */ +struct ynl_policy_attr netdev_page_pool_info_policy[NETDEV_A_PAGE_POOL_MAX + 1] = { + [NETDEV_A_PAGE_POOL_ID] = { .name = "id", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, }, +}; + +struct ynl_policy_nest netdev_page_pool_info_nest = { + .max_attr = NETDEV_A_PAGE_POOL_MAX, + .table = netdev_page_pool_info_policy, +}; + struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = { [NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, }, [NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, }, @@ -72,7 +87,85 @@ struct ynl_policy_nest netdev_dev_nest = { .table = netdev_dev_policy, }; +struct ynl_policy_attr netdev_page_pool_policy[NETDEV_A_PAGE_POOL_MAX + 1] = { + [NETDEV_A_PAGE_POOL_ID] = { .name = "id", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, }, + [NETDEV_A_PAGE_POOL_NAPI_ID] = { .name = "napi-id", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_INFLIGHT] = { .name = "inflight", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_INFLIGHT_MEM] = { .name = "inflight-mem", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_DETACH_TIME] = { .name = "detach-time", .type = YNL_PT_UINT, }, +}; + +struct ynl_policy_nest netdev_page_pool_nest = { + .max_attr = NETDEV_A_PAGE_POOL_MAX, + .table = netdev_page_pool_policy, +}; + +struct ynl_policy_attr netdev_page_pool_stats_policy[NETDEV_A_PAGE_POOL_STATS_MAX + 1] = { + [NETDEV_A_PAGE_POOL_STATS_INFO] = { .name = "info", .type = YNL_PT_NEST, .nest = &netdev_page_pool_info_nest, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST] = { .name = "alloc-fast", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW] = { .name = "alloc-slow", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER] = { .name = "alloc-slow-high-order", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY] = { .name = "alloc-empty", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL] = { .name = "alloc-refill", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE] = { .name = "alloc-waive", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED] = { .name = "recycle-cached", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL] = { .name = "recycle-cache-full", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING] = { .name = "recycle-ring", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL] = { .name = "recycle-ring-full", .type = YNL_PT_UINT, }, + [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT] = { .name = "recycle-released-refcnt", .type = YNL_PT_UINT, }, +}; + +struct ynl_policy_nest netdev_page_pool_stats_nest = { + .max_attr = NETDEV_A_PAGE_POOL_STATS_MAX, + .table = netdev_page_pool_stats_policy, +}; + /* Common nested types */ +void netdev_page_pool_info_free(struct netdev_page_pool_info *obj) +{ +} + +int netdev_page_pool_info_put(struct nlmsghdr *nlh, unsigned int attr_type, + struct netdev_page_pool_info *obj) +{ + struct nlattr *nest; + + nest = mnl_attr_nest_start(nlh, attr_type); + if (obj->_present.id) + mnl_attr_put_uint(nlh, NETDEV_A_PAGE_POOL_ID, obj->id); + if (obj->_present.ifindex) + mnl_attr_put_u32(nlh, NETDEV_A_PAGE_POOL_IFINDEX, obj->ifindex); + mnl_attr_nest_end(nlh, nest); + + return 0; +} + +int netdev_page_pool_info_parse(struct ynl_parse_arg *yarg, + const struct nlattr *nested) +{ + struct netdev_page_pool_info *dst = yarg->data; + const struct nlattr *attr; + + mnl_attr_for_each_nested(attr, nested) { + unsigned int type = mnl_attr_get_type(attr); + + if (type == NETDEV_A_PAGE_POOL_ID) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.id = 1; + dst->id = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_IFINDEX) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.ifindex = 1; + dst->ifindex = mnl_attr_get_u32(attr); + } + } + + return 0; +} + /* ============== NETDEV_CMD_DEV_GET ============== */ /* NETDEV_CMD_DEV_GET - do */ void netdev_dev_get_req_free(struct netdev_dev_get_req *req) @@ -197,6 +290,314 @@ void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp) free(rsp); } +/* ============== NETDEV_CMD_PAGE_POOL_GET ============== */ +/* NETDEV_CMD_PAGE_POOL_GET - do */ +void netdev_page_pool_get_req_free(struct netdev_page_pool_get_req *req) +{ + free(req); +} + +void netdev_page_pool_get_rsp_free(struct netdev_page_pool_get_rsp *rsp) +{ + free(rsp); +} + +int netdev_page_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data) +{ + struct netdev_page_pool_get_rsp *dst; + struct ynl_parse_arg *yarg = data; + const struct nlattr *attr; + + dst = yarg->data; + + mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { + unsigned int type = mnl_attr_get_type(attr); + + if (type == NETDEV_A_PAGE_POOL_ID) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.id = 1; + dst->id = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_IFINDEX) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.ifindex = 1; + dst->ifindex = mnl_attr_get_u32(attr); + } else if (type == NETDEV_A_PAGE_POOL_NAPI_ID) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.napi_id = 1; + dst->napi_id = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_INFLIGHT) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.inflight = 1; + dst->inflight = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_INFLIGHT_MEM) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.inflight_mem = 1; + dst->inflight_mem = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_DETACH_TIME) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.detach_time = 1; + dst->detach_time = mnl_attr_get_uint(attr); + } + } + + return MNL_CB_OK; +} + +struct netdev_page_pool_get_rsp * +netdev_page_pool_get(struct ynl_sock *ys, struct netdev_page_pool_get_req *req) +{ + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; + struct netdev_page_pool_get_rsp *rsp; + struct nlmsghdr *nlh; + int err; + + nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_GET, 1); + ys->req_policy = &netdev_page_pool_nest; + yrs.yarg.rsp_policy = &netdev_page_pool_nest; + + if (req->_present.id) + mnl_attr_put_uint(nlh, NETDEV_A_PAGE_POOL_ID, req->id); + + rsp = calloc(1, sizeof(*rsp)); + yrs.yarg.data = rsp; + yrs.cb = netdev_page_pool_get_rsp_parse; + yrs.rsp_cmd = NETDEV_CMD_PAGE_POOL_GET; + + err = ynl_exec(ys, nlh, &yrs); + if (err < 0) + goto err_free; + + return rsp; + +err_free: + netdev_page_pool_get_rsp_free(rsp); + return NULL; +} + +/* NETDEV_CMD_PAGE_POOL_GET - dump */ +void netdev_page_pool_get_list_free(struct netdev_page_pool_get_list *rsp) +{ + struct netdev_page_pool_get_list *next = rsp; + + while ((void *)next != YNL_LIST_END) { + rsp = next; + next = rsp->next; + + free(rsp); + } +} + +struct netdev_page_pool_get_list * +netdev_page_pool_get_dump(struct ynl_sock *ys) +{ + struct ynl_dump_state yds = {}; + struct nlmsghdr *nlh; + int err; + + yds.ys = ys; + yds.alloc_sz = sizeof(struct netdev_page_pool_get_list); + yds.cb = netdev_page_pool_get_rsp_parse; + yds.rsp_cmd = NETDEV_CMD_PAGE_POOL_GET; + yds.rsp_policy = &netdev_page_pool_nest; + + nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_GET, 1); + + err = ynl_exec_dump(ys, nlh, &yds); + if (err < 0) + goto free_list; + + return yds.first; + +free_list: + netdev_page_pool_get_list_free(yds.first); + return NULL; +} + +/* NETDEV_CMD_PAGE_POOL_GET - notify */ +void netdev_page_pool_get_ntf_free(struct netdev_page_pool_get_ntf *rsp) +{ + free(rsp); +} + +/* ============== NETDEV_CMD_PAGE_POOL_STATS_GET ============== */ +/* NETDEV_CMD_PAGE_POOL_STATS_GET - do */ +void +netdev_page_pool_stats_get_req_free(struct netdev_page_pool_stats_get_req *req) +{ + netdev_page_pool_info_free(&req->info); + free(req); +} + +void +netdev_page_pool_stats_get_rsp_free(struct netdev_page_pool_stats_get_rsp *rsp) +{ + netdev_page_pool_info_free(&rsp->info); + free(rsp); +} + +int netdev_page_pool_stats_get_rsp_parse(const struct nlmsghdr *nlh, + void *data) +{ + struct netdev_page_pool_stats_get_rsp *dst; + struct ynl_parse_arg *yarg = data; + const struct nlattr *attr; + struct ynl_parse_arg parg; + + dst = yarg->data; + parg.ys = yarg->ys; + + mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { + unsigned int type = mnl_attr_get_type(attr); + + if (type == NETDEV_A_PAGE_POOL_STATS_INFO) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.info = 1; + + parg.rsp_policy = &netdev_page_pool_info_nest; + parg.data = &dst->info; + if (netdev_page_pool_info_parse(&parg, attr)) + return MNL_CB_ERROR; + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_fast = 1; + dst->alloc_fast = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_slow = 1; + dst->alloc_slow = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_slow_high_order = 1; + dst->alloc_slow_high_order = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_empty = 1; + dst->alloc_empty = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_refill = 1; + dst->alloc_refill = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.alloc_waive = 1; + dst->alloc_waive = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.recycle_cached = 1; + dst->recycle_cached = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.recycle_cache_full = 1; + dst->recycle_cache_full = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.recycle_ring = 1; + dst->recycle_ring = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.recycle_ring_full = 1; + dst->recycle_ring_full = mnl_attr_get_uint(attr); + } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT) { + if (ynl_attr_validate(yarg, attr)) + return MNL_CB_ERROR; + dst->_present.recycle_released_refcnt = 1; + dst->recycle_released_refcnt = mnl_attr_get_uint(attr); + } + } + + return MNL_CB_OK; +} + +struct netdev_page_pool_stats_get_rsp * +netdev_page_pool_stats_get(struct ynl_sock *ys, + struct netdev_page_pool_stats_get_req *req) +{ + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; + struct netdev_page_pool_stats_get_rsp *rsp; + struct nlmsghdr *nlh; + int err; + + nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_STATS_GET, 1); + ys->req_policy = &netdev_page_pool_stats_nest; + yrs.yarg.rsp_policy = &netdev_page_pool_stats_nest; + + if (req->_present.info) + netdev_page_pool_info_put(nlh, NETDEV_A_PAGE_POOL_STATS_INFO, &req->info); + + rsp = calloc(1, sizeof(*rsp)); + yrs.yarg.data = rsp; + yrs.cb = netdev_page_pool_stats_get_rsp_parse; + yrs.rsp_cmd = NETDEV_CMD_PAGE_POOL_STATS_GET; + + err = ynl_exec(ys, nlh, &yrs); + if (err < 0) + goto err_free; + + return rsp; + +err_free: + netdev_page_pool_stats_get_rsp_free(rsp); + return NULL; +} + +/* NETDEV_CMD_PAGE_POOL_STATS_GET - dump */ +void +netdev_page_pool_stats_get_list_free(struct netdev_page_pool_stats_get_list *rsp) +{ + struct netdev_page_pool_stats_get_list *next = rsp; + + while ((void *)next != YNL_LIST_END) { + rsp = next; + next = rsp->next; + + netdev_page_pool_info_free(&rsp->obj.info); + free(rsp); + } +} + +struct netdev_page_pool_stats_get_list * +netdev_page_pool_stats_get_dump(struct ynl_sock *ys) +{ + struct ynl_dump_state yds = {}; + struct nlmsghdr *nlh; + int err; + + yds.ys = ys; + yds.alloc_sz = sizeof(struct netdev_page_pool_stats_get_list); + yds.cb = netdev_page_pool_stats_get_rsp_parse; + yds.rsp_cmd = NETDEV_CMD_PAGE_POOL_STATS_GET; + yds.rsp_policy = &netdev_page_pool_stats_nest; + + nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_STATS_GET, 1); + + err = ynl_exec_dump(ys, nlh, &yds); + if (err < 0) + goto free_list; + + return yds.first; + +free_list: + netdev_page_pool_stats_get_list_free(yds.first); + return NULL; +} + static const struct ynl_ntf_info netdev_ntf_info[] = { [NETDEV_CMD_DEV_ADD_NTF] = { .alloc_sz = sizeof(struct netdev_dev_get_ntf), @@ -216,6 +617,24 @@ static const struct ynl_ntf_info netdev_ntf_info[] = { .policy = &netdev_dev_nest, .free = (void *)netdev_dev_get_ntf_free, }, + [NETDEV_CMD_PAGE_POOL_ADD_NTF] = { + .alloc_sz = sizeof(struct netdev_page_pool_get_ntf), + .cb = netdev_page_pool_get_rsp_parse, + .policy = &netdev_page_pool_nest, + .free = (void *)netdev_page_pool_get_ntf_free, + }, + [NETDEV_CMD_PAGE_POOL_DEL_NTF] = { + .alloc_sz = sizeof(struct netdev_page_pool_get_ntf), + .cb = netdev_page_pool_get_rsp_parse, + .policy = &netdev_page_pool_nest, + .free = (void *)netdev_page_pool_get_ntf_free, + }, + [NETDEV_CMD_PAGE_POOL_CHANGE_NTF] = { + .alloc_sz = sizeof(struct netdev_page_pool_get_ntf), + .cb = netdev_page_pool_get_rsp_parse, + .policy = &netdev_page_pool_nest, + .free = (void *)netdev_page_pool_get_ntf_free, + }, }; const struct ynl_family ynl_netdev_family = { |
