From e6d6c071f22de29e4993784fc00cd2202b7ba149 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Tue, 18 Aug 2020 15:57:44 +0200 Subject: x86/static_call: Add out-of-line static call implementation Add the x86 out-of-line static call implementation. For each key, a permanent trampoline is created which is the destination for all static calls for the given key. The trampoline has a direct jump which gets patched by static_call_update() when the destination function changes. [peterz: fixed trampoline, rewrote patching code] Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Cc: Linus Torvalds Link: https://lore.kernel.org/r/20200818135804.804315175@infradead.org --- arch/x86/kernel/static_call.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 arch/x86/kernel/static_call.c (limited to 'arch/x86/kernel/static_call.c') diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c new file mode 100644 index 000000000000..0565825970af --- /dev/null +++ b/arch/x86/kernel/static_call.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +static void __static_call_transform(void *insn, u8 opcode, void *func) +{ + const void *code = text_gen_insn(opcode, insn, func); + + if (WARN_ONCE(*(u8 *)insn != opcode, + "unexpected static call insn opcode 0x%x at %pS\n", + opcode, insn)) + return; + + if (memcmp(insn, code, CALL_INSN_SIZE) == 0) + return; + + text_poke_bp(insn, code, CALL_INSN_SIZE, NULL); +} + +void arch_static_call_transform(void *site, void *tramp, void *func) +{ + mutex_lock(&text_mutex); + + if (tramp) + __static_call_transform(tramp, JMP32_INSN_OPCODE, func); + + mutex_unlock(&text_mutex); +} +EXPORT_SYMBOL_GPL(arch_static_call_transform); -- cgit