summaryrefslogtreecommitdiff
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2023-07-25 22:26:40 +0200
committerAlexei Starovoitov <ast@kernel.org>2023-07-25 17:14:18 -0700
commit63e2da3b7f7f63f881aa508825b0c4241e9910e1 (patch)
treef9165139f432a4abf2a82afb9af74e9f41840fa6 /kernel/bpf/memalloc.c
parent13fd5e14afa57ba82189d5fd9ceb0435e5693bcc (diff)
bpf: work around -Wuninitialized warning
Splitting these out into separate helper functions means that we actually pass an uninitialized variable into another function call if dec_active() happens to not be inlined, and CONFIG_PREEMPT_RT is disabled: kernel/bpf/memalloc.c: In function 'add_obj_to_free_list': kernel/bpf/memalloc.c:200:9: error: 'flags' is used uninitialized [-Werror=uninitialized] 200 | dec_active(c, flags); Avoid this by passing the flags by reference, so they either get initialized and dereferenced through a pointer, or the pointer never gets accessed at all. Fixes: 18e027b1c7c6d ("bpf: Factor out inc/dec of active flag into helpers.") Suggested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Link: https://lore.kernel.org/r/20230725202653.2905259-1-arnd@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 51d6389e5152..14d9b1a9a4ca 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -183,11 +183,11 @@ static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
}
-static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
+static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
{
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(flags);
+ local_irq_restore(*flags);
}
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
@@ -197,7 +197,7 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
inc_active(c, &flags);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
- dec_active(c, flags);
+ dec_active(c, &flags);
}
/* Mostly runs from irq_work except __init phase. */
@@ -344,7 +344,7 @@ static void free_bulk(struct bpf_mem_cache *c)
cnt = --c->free_cnt;
else
cnt = 0;
- dec_active(c, flags);
+ dec_active(c, &flags);
if (llnode)
enque_to_free(tgt, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2);
@@ -384,7 +384,7 @@ static void check_free_by_rcu(struct bpf_mem_cache *c)
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
if (__llist_add(llnode, &c->free_by_rcu))
c->free_by_rcu_tail = llnode;
- dec_active(c, flags);
+ dec_active(c, &flags);
}
if (llist_empty(&c->free_by_rcu))
@@ -408,7 +408,7 @@ static void check_free_by_rcu(struct bpf_mem_cache *c)
inc_active(c, &flags);
WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
c->waiting_for_gp_tail = c->free_by_rcu_tail;
- dec_active(c, flags);
+ dec_active(c, &flags);
if (unlikely(READ_ONCE(c->draining))) {
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);