summaryrefslogtreecommitdiff
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2023-10-21 09:49:59 +0800
committerDaniel Borkmann <daniel@iogearbox.net>2023-10-26 14:23:33 +0200
commitc421c12586b3f00fb96b5c9af15c9a051a9090b1 (patch)
tree93527c11d4684d0c5dc602a575bd7dc90d2cf320 /kernel/bpf/memalloc.c
parent22360fad5889cbefe1eca695b0cc0273ab280b56 (diff)
bpf: Add more WARN_ON_ONCE checks for mismatched alloc and free
There are two possible mismatched alloc and free cases in BPF memory allocator: 1) allocate from cache X but free by cache Y with a different unit_size 2) allocate from per-cpu cache but free by kmalloc cache or vice versa So add more WARN_ON_ONCE checks in free_bulk() and __free_by_rcu() to spot these mismatched alloc and free early. Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20231021014959.3563841-1-houtao@huaweicloud.com
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 5308e386380a..63b909d277d4 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -340,6 +340,7 @@ static void free_bulk(struct bpf_mem_cache *c)
int cnt;
WARN_ON_ONCE(tgt->unit_size != c->unit_size);
+ WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
do {
inc_active(c, &flags);
@@ -365,6 +366,9 @@ static void __free_by_rcu(struct rcu_head *head)
struct bpf_mem_cache *tgt = c->tgt;
struct llist_node *llnode;
+ WARN_ON_ONCE(tgt->unit_size != c->unit_size);
+ WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
+
llnode = llist_del_all(&c->waiting_for_gp);
if (!llnode)
goto out;