summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_kprobe.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@kernel.org>2018-01-13 02:54:33 +0900
committerAlexei Starovoitov <ast@kernel.org>2018-01-12 17:33:38 -0800
commit66665ad2f1023d3ffb0c12eea9e0a6d0b613ecb3 (patch)
tree2ccb32066d1acf7d5d9f56d881cd80133dba8f15 /kernel/trace/trace_kprobe.c
parentb4da3340eae2c3932144be3e81ccfd4e424d87b7 (diff)
tracing/kprobe: bpf: Compare instruction pointer with original one
Compare instruction pointer with original one on the stack instead using per-cpu bpf_kprobe_override flag. This patch also consolidates reset_current_kprobe() and preempt_enable_no_resched() blocks. Those can be done in one place. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r--kernel/trace/trace_kprobe.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3c8deb977a8b..b8c90441bc87 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -42,8 +42,6 @@ struct trace_kprobe {
(offsetof(struct trace_kprobe, tp.args) + \
(sizeof(struct probe_arg) * (n)))
-DEFINE_PER_CPU(int, bpf_kprobe_override);
-
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
return tk->rp.handler != NULL;
@@ -1205,6 +1203,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
int rctx;
if (bpf_prog_array_valid(call)) {
+ unsigned long orig_ip = instruction_pointer(regs);
int ret;
ret = trace_call_bpf(call, regs);
@@ -1212,12 +1211,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
/*
* We need to check and see if we modified the pc of the
* pt_regs, and if so clear the kprobe and return 1 so that we
- * don't do the instruction skipping. Also reset our state so
- * we are clean the next pass through.
+ * don't do the single stepping.
+ * The ftrace kprobe handler leaves it up to us to re-enable
+ * preemption here before returning if we've modified the ip.
*/
- if (__this_cpu_read(bpf_kprobe_override)) {
- __this_cpu_write(bpf_kprobe_override, 0);
+ if (orig_ip != instruction_pointer(regs)) {
reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
}
if (!ret)
@@ -1325,15 +1325,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
if (tk->tp.flags & TP_FLAG_TRACE)
kprobe_trace_func(tk, regs);
#ifdef CONFIG_PERF_EVENTS
- if (tk->tp.flags & TP_FLAG_PROFILE) {
+ if (tk->tp.flags & TP_FLAG_PROFILE)
ret = kprobe_perf_func(tk, regs);
- /*
- * The ftrace kprobe handler leaves it up to us to re-enable
- * preemption here before returning if we've modified the ip.
- */
- if (ret)
- preempt_enable_no_resched();
- }
#endif
return ret;
}