summaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorAndrii Nakryiko <andriin@fb.com>2019-11-17 09:28:03 -0800
committerDaniel Borkmann <daniel@iogearbox.net>2019-11-18 11:41:59 +0100
commit85192dbf4de08795afe2b88e52a36fc6abfc3dba (patch)
tree52379ced52c2c2ed5f023073a1aeeb52779e376d /kernel/events
parent1e0bd5a091e5d9e0f1d5b0e6329b87bb1792f784 (diff)
bpf: Convert bpf_prog refcnt to atomic64_t
Similarly to bpf_map's refcnt/usercnt, convert bpf_prog's refcnt to atomic64 and remove artificial 32k limit. This allows to make bpf_prog's refcounting non-failing, simplifying logic of users of bpf_prog_add/bpf_prog_inc. Validated compilation by running allyesconfig kernel build. Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191117172806.2195367-3-andriin@fb.com
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index aec8dba2bea4..73c616876597 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10477,12 +10477,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
context = parent_event->overflow_handler_context;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
if (overflow_handler == bpf_overflow_handler) {
- struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
+ struct bpf_prog *prog = parent_event->prog;
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto err_ns;
- }
+ bpf_prog_inc(prog);
event->prog = prog;
event->orig_overflow_handler =
parent_event->orig_overflow_handler;