summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/kprobes/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/kprobes/core.c')
-rw-r--r--arch/x86/kernel/kprobes/core.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6290712cb36d..8ef933c03afa 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -52,6 +52,7 @@
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/set_memory.h>
+#include <asm/ibt.h>
#include "common.h"
@@ -193,17 +194,10 @@ static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
struct kprobe *kp;
- unsigned long faddr;
+ bool faddr;
kp = get_kprobe((void *)addr);
- faddr = ftrace_location(addr);
- /*
- * Addresses inside the ftrace location are refused by
- * arch_check_ftrace_location(). Something went terribly wrong
- * if such an address is checked here.
- */
- if (WARN_ON(faddr && faddr != addr))
- return 0UL;
+ faddr = ftrace_location(addr) == addr;
/*
* Use the current code if it is not modified by Kprobe
* and it cannot be modified by ftrace.
@@ -301,6 +295,22 @@ static int can_probe(unsigned long paddr)
return (addr == paddr);
}
+/* If x86 supports IBT (ENDBR) it must be skipped. */
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
+ bool *on_func_entry)
+{
+ if (is_endbr(*(u32 *)addr)) {
+ *on_func_entry = !offset || offset == 4;
+ if (*on_func_entry)
+ offset = 4;
+
+ } else {
+ *on_func_entry = !offset;
+ }
+
+ return (kprobe_opcode_t *)(addr + offset);
+}
+
/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
@@ -1023,6 +1033,7 @@ asm(
".type __kretprobe_trampoline, @function\n"
"__kretprobe_trampoline:\n"
#ifdef CONFIG_X86_64
+ ANNOTATE_NOENDBR
/* Push a fake return address to tell the unwinder it's a kretprobe. */
" pushq $__kretprobe_trampoline\n"
UNWIND_HINT_FUNC