From 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 May 2019 15:11:17 +0200 Subject: x86_64: Allow breakpoints to emulate call instructions In order to allow breakpoints to emulate call instructions, they need to push the return address onto the stack. The x86_64 int3 handler adds a small gap to allow the stack to grow some. Use this gap to add the return address to be able to emulate a call instruction at the breakpoint location. These helper functions are added: int3_emulate_jmp(): changes the location of the regs->ip to return there. (The next two are only for x86_64) int3_emulate_push(): to push the address onto the gap in the stack int3_emulate_call(): push the return address and change regs->ip Cc: Andy Lutomirski Cc: Nicolai Stange Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: the arch/x86 maintainers Cc: Josh Poimboeuf Cc: Jiri Kosina Cc: Miroslav Benes Cc: Petr Mladek Cc: Joe Lawrence Cc: Shuah Khan Cc: Konrad Rzeszutek Wilk Cc: Tim Chen Cc: Sebastian Andrzej Siewior Cc: Mimi Zohar Cc: Juergen Gross Cc: Nick Desaulniers Cc: Nayna Jain Cc: Masahiro Yamada Cc: Joerg Roedel Cc: "open list:KERNEL SELFTEST FRAMEWORK" Cc: stable@vger.kernel.org Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") Tested-by: Nicolai Stange Reviewed-by: Nicolai Stange Reviewed-by: Masami Hiramatsu Signed-off-by: Peter Zijlstra (Intel) [ Modified to only work for x86_64 and added comment to int3_emulate_push() ] Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/text-patching.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index e85ff65c43c3..05861cc08787 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) +{ + regs->ip = ip; +} + +#define INT3_INSN_SIZE 1 +#define CALL_INSN_SIZE 5 + +#ifdef CONFIG_X86_64 +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) +{ + /* + * The int3 handler in entry_64.S adds a gap between the + * stack where the break point happened, and the saving of + * pt_regs. We can extend the original stack because of + * this gap. See the idtentry macro's create_gap option. + */ + regs->sp -= sizeof(unsigned long); + *(unsigned long *)regs->sp = val; +} + +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) +{ + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); + int3_emulate_jmp(regs, func); +} +#endif + #endif /* _ASM_X86_TEXT_PATCHING_H */ -- cgit From 562e14f72292249e52e6346a9e3a30be652b0cf6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 9 May 2019 15:32:05 -0400 Subject: ftrace/x86: Remove mcount support There's two methods of enabling function tracing in Linux on x86. One is with just "gcc -pg" and the other is "gcc -pg -mfentry". The former will use calls to a special function "mcount" after the frame is set up in all C functions. The latter will add calls to a special function called "fentry" as the very first instruction of all C functions. At compile time, there is a check to see if gcc supports, -mfentry, and if it does, it will use that, because it is more versatile and less error prone for function tracing. Starting with v4.19, the minimum gcc supported to build the Linux kernel, was raised to version 4.6. That also happens to be the first gcc version to support -mfentry. Since on x86, using gcc versions from 4.6 and beyond will unconditionally enable the -mfentry, it will no longer use mcount as the method for inserting calls into the C functions of the kernel. This means that there is no point in continuing to maintain mcount in x86. Remove support for using mcount. This makes the code less complex, and will also allow it to be simplified in the future. Acked-by: Peter Zijlstra (Intel) Acked-by: Jiri Kosina Acked-by: Josh Poimboeuf Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/ftrace.h | 8 +++----- arch/x86/include/asm/livepatch.h | 3 --- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index cf350639e76d..287f1f7b2e52 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -3,12 +3,10 @@ #define _ASM_X86_FTRACE_H #ifdef CONFIG_FUNCTION_TRACER -#ifdef CC_USING_FENTRY -# define MCOUNT_ADDR ((unsigned long)(__fentry__)) -#else -# define MCOUNT_ADDR ((unsigned long)(mcount)) -# define HAVE_FUNCTION_GRAPH_FP_TEST +#ifndef CC_USING_FENTRY +# error Compiler does not support fentry? #endif +# define MCOUNT_ADDR ((unsigned long)(__fentry__)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index ed80003ce3e2..2f2bdf0662f8 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -26,9 +26,6 @@ static inline int klp_check_compiler_support(void) { -#ifndef CC_USING_FENTRY - return 1; -#endif return 0; } -- cgit From 56e33afd7757d5da2664fb797f2544ce167176be Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 10 May 2019 23:47:50 +0200 Subject: livepatch: Remove klp_check_compiler_support() The only purpose of klp_check_compiler_support() is to make sure that we are not using ftrace on x86 via mcount (because that's executed only after prologue has already happened, and that's too late for livepatching purposes). Now that mcount is not supported by ftrace any more, there is no need for klp_check_compiler_support() either. Link: http://lkml.kernel.org/r/nycvar.YFH.7.76.1905102346100.17054@cbobk.fhfr.pm Reported-by: Linus Torvalds Signed-off-by: Jiri Kosina Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/livepatch.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 2f2bdf0662f8..a66f6706c2de 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -24,11 +24,6 @@ #include #include -static inline int klp_check_compiler_support(void) -{ - return 0; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; -- cgit From 693713cbdb3a4bda5a8a678c31f06560bbb14657 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Sat, 11 May 2019 08:32:40 -0400 Subject: x86: Hide the int3_emulate_call/jmp functions from UML User Mode Linux does not have access to the ip or sp fields of the pt_regs, and accessing them causes UML to fail to build. Hide the int3_emulate_jmp() and int3_emulate_call() instructions from UML, as it doesn't need them anyway. Reported-by: kbuild test robot Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/text-patching.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 05861cc08787..0bbb07eaed6b 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +#ifndef CONFIG_UML_X86 static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; @@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); int3_emulate_jmp(regs, func); } -#endif +#endif /* CONFIG_X86_64 */ +#endif /* !CONFIG_UML_X86 */ #endif /* _ASM_X86_TEXT_PATCHING_H */ -- cgit