summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-02-24 15:01:35 +0100
committerAlexei Starovoitov <ast@kernel.org>2020-02-24 16:12:20 -0800
commitf03efe49bd16c017107ff5079d08ea428e390dde (patch)
tree50cd6a079cead0ca2eb56fd805f860599238eaaf /kernel/trace
parentdbca151cad736c99f4817076daf9fd02ed0c2daa (diff)
bpf/tracing: Remove redundant preempt_disable() in __bpf_trace_run()
__bpf_trace_run() disables preemption around the BPF_PROG_RUN() invocation. This is redundant because __bpf_trace_run() is invoked from a trace point via __DO_TRACE() which already disables preemption _before_ invoking any of the functions which are attached to a trace point. Remove it and add a cant_sleep() check. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145642.847220186@linutronix.de
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/bpf_trace.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index b8661bd0d028..4d42a5d05ec9 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1516,10 +1516,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
+ cant_sleep();
rcu_read_lock();
- preempt_disable();
(void) BPF_PROG_RUN(prog, args);
- preempt_enable();
rcu_read_unlock();
}