summaryrefslogtreecommitdiff
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2023-07-05 20:34:47 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2023-07-12 23:45:23 +0200
commit4ed8b5bcfada6687baa478bf8febe891d9107118 (patch)
tree0f87eddd06a2878f6c0e69e3d8c137e6eb8386c1 /kernel/bpf/memalloc.c
parent8e07bb9ebcd9b9ddf58337253ee5522d86b99370 (diff)
bpf: Add object leak check.
The object leak check is cheap. Do it unconditionally to spot difficult races in bpf_mem_alloc. Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20230706033447.54696-15-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 17ef2e9b278a..51d6389e5152 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -567,8 +567,43 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
free_all(llist_del_all(&c->waiting_for_gp), percpu);
}
+static void check_mem_cache(struct bpf_mem_cache *c)
+{
+ WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
+ WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
+ WARN_ON_ONCE(!llist_empty(&c->free_llist));
+ WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
+ WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
+ WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
+ WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
+}
+
+static void check_leaked_objs(struct bpf_mem_alloc *ma)
+{
+ struct bpf_mem_caches *cc;
+ struct bpf_mem_cache *c;
+ int cpu, i;
+
+ if (ma->cache) {
+ for_each_possible_cpu(cpu) {
+ c = per_cpu_ptr(ma->cache, cpu);
+ check_mem_cache(c);
+ }
+ }
+ if (ma->caches) {
+ for_each_possible_cpu(cpu) {
+ cc = per_cpu_ptr(ma->caches, cpu);
+ for (i = 0; i < NUM_CACHES; i++) {
+ c = &cc->cache[i];
+ check_mem_cache(c);
+ }
+ }
+ }
+}
+
static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
{
+ check_leaked_objs(ma);
free_percpu(ma->cache);
free_percpu(ma->caches);
ma->cache = NULL;