summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/jump_label.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/jump_label.c')
-rw-r--r--arch/x86/kernel/jump_label.c141
1 files changed, 115 insertions, 26 deletions
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 2889b3d43882..a7949a54a0ff 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* jump label x86 support
*
@@ -13,47 +14,135 @@
#include <linux/cpu.h>
#include <asm/kprobes.h>
#include <asm/alternative.h>
+#include <asm/text-patching.h>
+#include <asm/insn.h>
-#ifdef HAVE_JUMP_LABEL
+int arch_jump_entry_size(struct jump_entry *entry)
+{
+ struct insn insn = {};
+
+ insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
+ BUG_ON(insn.length != 2 && insn.length != 5);
+
+ return insn.length;
+}
-union jump_code_union {
- char code[JUMP_LABEL_NOP_SIZE];
- struct {
- char jump;
- int offset;
- } __attribute__((packed));
+struct jump_label_patch {
+ const void *code;
+ int size;
};
-static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t))
+static struct jump_label_patch
+__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
+{
+ const void *expect, *code, *nop;
+ const void *addr, *dest;
+ int size;
+
+ addr = (void *)jump_entry_code(entry);
+ dest = (void *)jump_entry_target(entry);
+
+ size = arch_jump_entry_size(entry);
+ switch (size) {
+ case JMP8_INSN_SIZE:
+ code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ case JMP32_INSN_SIZE:
+ code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ default: BUG();
+ }
+
+ if (type == JUMP_LABEL_JMP)
+ expect = nop;
+ else
+ expect = code;
+
+ if (memcmp(addr, expect, size)) {
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
+ addr, addr, addr, expect, size, type);
+ BUG();
+ }
+
+ if (type == JUMP_LABEL_NOP)
+ code = nop;
+
+ return (struct jump_label_patch){.code = code, .size = size};
+}
+
+static __always_inline void
+__jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
{
- union jump_code_union code;
+ const struct jump_label_patch jlp = __jump_label_patch(entry, type);
- if (type == JUMP_LABEL_ENABLE) {
- code.jump = 0xe9;
- code.offset = entry->target -
- (entry->code + JUMP_LABEL_NOP_SIZE);
- } else
- memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+ /*
+ * As long as only a single processor is running and the code is still
+ * not marked as RO, text_poke_early() can be used; Checking that
+ * system_state is SYSTEM_BOOTING guarantees it. It will be set to
+ * SYSTEM_SCHEDULING before other cores are awaken and before the
+ * code is write-protected.
+ *
+ * At the time the change is being done, just ignore whether we
+ * are doing nop -> jump or jump -> nop transition, and assume
+ * always nop being the 'currently valid' instruction
+ */
+ if (init || system_state == SYSTEM_BOOTING) {
+ text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
+ return;
+ }
- (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+ smp_text_poke_single((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
+}
+
+static void __ref jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
+{
+ mutex_lock(&text_mutex);
+ __jump_label_transform(entry, type, init);
+ mutex_unlock(&text_mutex);
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- get_online_cpus();
+ jump_label_transform(entry, type, 0);
+}
+
+bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ struct jump_label_patch jlp;
+
+ if (system_state == SYSTEM_BOOTING) {
+ /*
+ * Fallback to the non-batching mode.
+ */
+ arch_jump_label_transform(entry, type);
+ return true;
+ }
+
mutex_lock(&text_mutex);
- __jump_label_transform(entry, type, text_poke_smp);
+ jlp = __jump_label_patch(entry, type);
+ smp_text_poke_batch_add((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
- put_online_cpus();
+ return true;
}
-__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type)
+void arch_jump_label_transform_apply(void)
{
- __jump_label_transform(entry, type, text_poke_early);
+ mutex_lock(&text_mutex);
+ smp_text_poke_batch_finish();
+ mutex_unlock(&text_mutex);
}
-
-#endif