summaryrefslogtreecommitdiff
path: root/net/core/sock_map.c
diff options
context:
space:
mode:
authorYafang Shao <laoar.shao@gmail.com>2023-03-05 12:46:12 +0000
committerAlexei Starovoitov <ast@kernel.org>2023-03-07 09:33:43 -0800
commit73d2c61919e9aeffad1b826ec23b1a4a07c1e0dd (patch)
treef89e1cba1f765b1724cd07292174a9328478ebbd /net/core/sock_map.c
parent7490b7f1c02ef825ef98f7230662049d4a464a21 (diff)
bpf, net: sock_map memory usage
sockmap and sockhash don't have something in common in allocation, so let's introduce different helpers to calculate their memory usage. The reuslt as follows, - before 28: sockmap name count_map flags 0x0 key 4B value 4B max_entries 65536 memlock 524288B 29: sockhash name count_map flags 0x0 key 4B value 4B max_entries 65536 memlock 524288B - after 28: sockmap name count_map flags 0x0 key 4B value 4B max_entries 65536 memlock 524608B 29: sockhash name count_map flags 0x0 <<<< no updated elements key 4B value 4B max_entries 65536 memlock 1048896B Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Link: https://lore.kernel.org/r/20230305124615.12358-16-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/core/sock_map.c')
-rw-r--r--net/core/sock_map.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index a68a7290a3b2..9b854e236d23 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map);
}
+static u64 sock_map_mem_usage(const struct bpf_map *map)
+{
+ u64 usage = sizeof(struct bpf_stab);
+
+ usage += (u64)map->max_entries * sizeof(struct sock *);
+ return usage;
+}
+
static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_ops = &sock_map_seq_ops,
.init_seq_private = sock_map_init_seq_private,
@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_lookup_elem = sock_map_lookup,
.map_release_uref = sock_map_release_progs,
.map_check_btf = map_check_no_btf,
+ .map_mem_usage = sock_map_mem_usage,
.map_btf_id = &sock_map_btf_ids[0],
.iter_seq_info = &sock_map_iter_seq_info,
};
@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map);
}
+static u64 sock_hash_mem_usage(const struct bpf_map *map)
+{
+ struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+ u64 usage = sizeof(*htab);
+
+ usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
+ usage += atomic_read(&htab->count) * (u64)htab->elem_size;
+ return usage;
+}
+
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_ops = &sock_hash_seq_ops,
.init_seq_private = sock_hash_init_seq_private,
@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf,
+ .map_mem_usage = sock_hash_mem_usage,
.map_btf_id = &sock_hash_map_btf_ids[0],
.iter_seq_info = &sock_hash_iter_seq_info,
};