diff options
author | Stanislav Fomichev <sdf@google.com> | 2022-06-28 10:43:07 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2022-06-29 13:21:52 -0700 |
commit | c0e19f2c9a3edd38e4b1bdae98eb44555d02bc31 (patch) | |
tree | b2198708a1d7ee3dccb5c7ba5d40a84643a66e28 /kernel/bpf/core.c | |
parent | 69fd337a975c7e690dfe49d9cb4fe5ba1e6db44e (diff) |
bpf: minimize number of allocated lsm slots per program
Previous patch adds 1:1 mapping between all 211 LSM hooks
and bpf_cgroup program array. Instead of reserving a slot per
possible hook, reserve 10 slots per cgroup for lsm programs.
Those slots are dynamically allocated on demand and reclaimed.
struct cgroup_bpf {
struct bpf_prog_array * effective[33]; /* 0 264 */
/* --- cacheline 4 boundary (256 bytes) was 8 bytes ago --- */
struct hlist_head progs[33]; /* 264 264 */
/* --- cacheline 8 boundary (512 bytes) was 16 bytes ago --- */
u8 flags[33]; /* 528 33 */
/* XXX 7 bytes hole, try to pack */
struct list_head storages; /* 568 16 */
/* --- cacheline 9 boundary (576 bytes) was 8 bytes ago --- */
struct bpf_prog_array * inactive; /* 584 8 */
struct percpu_ref refcnt; /* 592 16 */
struct work_struct release_work; /* 608 72 */
/* size: 680, cachelines: 11, members: 7 */
/* sum members: 673, holes: 1, sum holes: 7 */
/* last cacheline: 40 bytes */
};
Reviewed-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20220628174314.1216643-5-sdf@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r-- | kernel/bpf/core.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4cc10b942a3c..805c2ad5c793 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -107,6 +107,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag fp->aux->prog = fp; fp->jit_requested = ebpf_jit_enabled(); fp->blinding_requested = bpf_jit_blinding_enabled(fp); +#ifdef CONFIG_CGROUP_BPF + aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; +#endif INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); mutex_init(&fp->aux->used_maps_mutex); @@ -2570,6 +2573,10 @@ static void bpf_prog_free_deferred(struct work_struct *work) #ifdef CONFIG_BPF_SYSCALL bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); #endif +#ifdef CONFIG_CGROUP_BPF + if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) + bpf_cgroup_atype_put(aux->cgroup_atype); +#endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); if (bpf_prog_is_dev_bound(aux)) |