summaryrefslogtreecommitdiff
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2023-07-05 20:34:37 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2023-07-12 23:45:22 +0200
commit05ae68656a8e9d9386ce4243fe992122fd29bb51 (patch)
treeddc8bded5f3ff06cda13e7c7087ac9abceb25bbc /kernel/bpf/memalloc.c
parent9de3e81521b4d943c9ec27ae2c871292c12f1409 (diff)
bpf: Refactor alloc_bulk().
Factor out inner body of alloc_bulk into separate helper. No functional changes. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 693651d2648b..9693b1f8cbda 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif
}
+static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+{
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ /* In RT irq_work runs in per-cpu kthread, so disable
+ * interrupts to avoid preemption and interrupts and
+ * reduce the chance of bpf prog executing on this cpu
+ * when active counter is busy.
+ */
+ local_irq_save(flags);
+ /* alloc_bulk runs from irq_work which will not preempt a bpf
+ * program that does unit_alloc/unit_free since IRQs are
+ * disabled there. There is no race to increment 'active'
+ * counter. It protects free_llist from corruption in case NMI
+ * bpf prog preempted this loop.
+ */
+ WARN_ON_ONCE(local_inc_return(&c->active) != 1);
+ __llist_add(obj, &c->free_llist);
+ c->free_cnt++;
+ local_dec(&c->active);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_restore(flags);
+}
+
/* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{
struct mem_cgroup *memcg = NULL, *old_memcg;
- unsigned long flags;
void *obj;
int i;
@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
if (!obj)
break;
}
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- /* In RT irq_work runs in per-cpu kthread, so disable
- * interrupts to avoid preemption and interrupts and
- * reduce the chance of bpf prog executing on this cpu
- * when active counter is busy.
- */
- local_irq_save(flags);
- /* alloc_bulk runs from irq_work which will not preempt a bpf
- * program that does unit_alloc/unit_free since IRQs are
- * disabled there. There is no race to increment 'active'
- * counter. It protects free_llist from corruption in case NMI
- * bpf prog preempted this loop.
- */
- WARN_ON_ONCE(local_inc_return(&c->active) != 1);
- __llist_add(obj, &c->free_llist);
- c->free_cnt++;
- local_dec(&c->active);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(flags);
+ add_obj_to_free_list(c, obj);
}
set_active_memcg(old_memcg);
mem_cgroup_put(memcg);