summaryrefslogtreecommitdiff
path: root/arch/x86/power
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/power')
-rw-r--r--arch/x86/power/Makefile11
-rw-r--r--arch/x86/power/cpu.c476
-rw-r--r--arch/x86/power/hibernate.c216
-rw-r--r--arch/x86/power/hibernate_32.c74
-rw-r--r--arch/x86/power/hibernate_64.c167
-rw-r--r--arch/x86/power/hibernate_asm_32.S51
-rw-r--r--arch/x86/power/hibernate_asm_64.S171
7 files changed, 813 insertions, 353 deletions
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c33623..e0cd7afd5302 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,7 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
# __restore_processor_state() restores %gs after S3 resume and so should not
# itself be stack-protected
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_cpu.o := $(nostackp)
+CFLAGS_cpu.o := -fno-stack-protector
+
+# Clang may incorrectly inline functions with stack protector enabled into
+# __restore_processor_state(): https://llvm.org/pr47479
+CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO)
obj-$(CONFIG_PM_SLEEP) += cpu.o
-obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
+obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 1cf5b300305e..916441f5e85c 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Suspend support specific for i386/x86-64.
*
- * Distribute under GPLv2
- *
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
@@ -12,40 +11,72 @@
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/mtrr.h>
#include <asm/page.h>
#include <asm/mce.h>
-#include <asm/xcr.h>
#include <asm/suspend.h>
+#include <asm/fpu/api.h>
#include <asm/debugreg.h>
-#include <asm/fpu-internal.h> /* pcntxt_mask */
#include <asm/cpu.h>
+#include <asm/cacheinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu_device_id.h>
+#include <asm/microcode.h>
+#include <asm/msr.h>
+#include <asm/fred.h>
#ifdef CONFIG_X86_32
-unsigned long saved_context_ebx;
-unsigned long saved_context_esp, saved_context_ebp;
-unsigned long saved_context_esi, saved_context_edi;
-unsigned long saved_context_eflags;
+__visible unsigned long saved_context_ebx;
+__visible unsigned long saved_context_esp, saved_context_ebp;
+__visible unsigned long saved_context_esi, saved_context_edi;
+__visible unsigned long saved_context_eflags;
#endif
struct saved_context saved_context;
+static void msr_save_context(struct saved_context *ctxt)
+{
+ struct saved_msr *msr = ctxt->saved_msrs.array;
+ struct saved_msr *end = msr + ctxt->saved_msrs.num;
+
+ while (msr < end) {
+ if (msr->valid)
+ rdmsrq(msr->info.msr_no, msr->info.reg.q);
+ msr++;
+ }
+}
+
+static void msr_restore_context(struct saved_context *ctxt)
+{
+ struct saved_msr *msr = ctxt->saved_msrs.array;
+ struct saved_msr *end = msr + ctxt->saved_msrs.num;
+
+ while (msr < end) {
+ if (msr->valid)
+ wrmsrq(msr->info.msr_no, msr->info.reg.q);
+ msr++;
+ }
+}
+
/**
- * __save_processor_state - save CPU registers before creating a
- * hibernation image and before restoring the memory state from it
- * @ctxt - structure to store the registers contents in
+ * __save_processor_state() - Save CPU registers before creating a
+ * hibernation image and before restoring
+ * the memory state from it
+ * @ctxt: Structure to store the registers contents in.
*
- * NOTE: If there is a CPU register the modification of which by the
- * boot kernel (ie. the kernel used for loading the hibernation image)
- * might affect the operations of the restored target kernel (ie. the one
- * saved in the hibernation image), then its contents must be saved by this
- * function. In other words, if kernel A is hibernated and different
- * kernel B is used for loading the hibernation image into memory, the
- * kernel A's __save_processor_state() function must save all registers
- * needed by kernel A, so that it can operate correctly after the resume
- * regardless of what kernel B does in the meantime.
+ * NOTE: If there is a CPU register the modification of which by the
+ * boot kernel (ie. the kernel used for loading the hibernation image)
+ * might affect the operations of the restored target kernel (ie. the one
+ * saved in the hibernation image), then its contents must be saved by this
+ * function. In other words, if kernel A is hibernated and different
+ * kernel B is used for loading the hibernation image into memory, the
+ * kernel A's __save_processor_state() function must save all registers
+ * needed by kernel A, so that it can operate correctly after the resume
+ * regardless of what kernel B does in the meantime.
*/
static void __save_processor_state(struct saved_context *ctxt)
{
@@ -57,12 +88,8 @@ static void __save_processor_state(struct saved_context *ctxt)
/*
* descriptor tables
*/
-#ifdef CONFIG_X86_32
store_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
- store_idt((struct desc_ptr *)&ctxt->idt_limit);
-#endif
+
/*
* We save it here, but restore it only in the hibernate case.
* For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -70,7 +97,7 @@ static void __save_processor_state(struct saved_context *ctxt)
* 'pmode_gdt' in wakeup_start.
*/
ctxt->gdt_desc.size = GDT_SIZE - 1;
- ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id());
+ ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
store_tr(ctxt->tr);
@@ -78,25 +105,18 @@ static void __save_processor_state(struct saved_context *ctxt)
/*
* segment registers
*/
-#ifdef CONFIG_X86_32
- savesegment(es, ctxt->es);
- savesegment(fs, ctxt->fs);
savesegment(gs, ctxt->gs);
- savesegment(ss, ctxt->ss);
-#else
-/* CONFIG_X86_64 */
- asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
- asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
- asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
- asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
- asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
-
- rdmsrl(MSR_FS_BASE, ctxt->fs_base);
- rdmsrl(MSR_GS_BASE, ctxt->gs_base);
- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+#ifdef CONFIG_X86_64
+ savesegment(fs, ctxt->fs);
+ savesegment(ds, ctxt->ds);
+ savesegment(es, ctxt->es);
+
+ rdmsrq(MSR_FS_BASE, ctxt->fs_base);
+ rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
mtrr_save_fixed_ranges(NULL);
- rdmsrl(MSR_EFER, ctxt->efer);
+ rdmsrq(MSR_EFER, ctxt->efer);
#endif
/*
@@ -104,16 +124,11 @@ static void __save_processor_state(struct saved_context *ctxt)
*/
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
- ctxt->cr3 = read_cr3();
-#ifdef CONFIG_X86_32
- ctxt->cr4 = read_cr4_safe();
-#else
-/* CONFIG_X86_64 */
- ctxt->cr4 = read_cr4();
- ctxt->cr8 = read_cr8();
-#endif
- ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
+ ctxt->cr3 = __read_cr3();
+ ctxt->cr4 = __read_cr4();
+ ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable);
+ msr_save_context(ctxt);
}
/* Needed by apm.c */
@@ -137,17 +152,19 @@ static void do_fpu_end(void)
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
- struct tss_struct *t = &per_cpu(init_tss, cpu);
#ifdef CONFIG_X86_64
- struct desc_struct *desc = get_cpu_gdt_table(cpu);
+ struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss;
#endif
- set_tss_desc(cpu, t); /*
- * This just modifies memory; should not be
- * necessary. But... This is necessary, because
- * 386 hardware has concept of busy TSS or some
- * similar stupidity.
- */
+
+ /*
+ * We need to reload TR, which requires that we change the
+ * GDT entry to indicate "available" first.
+ *
+ * XXX: This could probably all be replaced by a call to
+ * force_reload_TR().
+ */
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
@@ -155,91 +172,129 @@ static void fix_processor_context(void)
write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
syscall_init(); /* This sets MSR_*STAR and related */
+#else
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ enable_sep_cpu();
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
+ initialize_tlbstate_and_flush();
+
+ fpu__resume_cpu();
+
+ /* The processor is back on the direct GDT, load back the fixmap */
+ load_fixmap_gdt(cpu);
}
/**
- * __restore_processor_state - restore the contents of CPU registers saved
- * by __save_processor_state()
- * @ctxt - structure to load the registers contents from
+ * __restore_processor_state() - Restore the contents of CPU registers saved
+ * by __save_processor_state()
+ * @ctxt: Structure to load the registers contents from.
+ *
+ * The asm code that gets us here will have restored a usable GDT, although
+ * it will be pointing to the wrong alias.
*/
-static void __restore_processor_state(struct saved_context *ctxt)
+static void notrace __restore_processor_state(struct saved_context *ctxt)
{
+ struct cpuinfo_x86 *c;
+
if (ctxt->misc_enable_saved)
- wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
+ wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
* control registers
*/
/* cr4 was introduced in the Pentium CPU */
#ifdef CONFIG_X86_32
if (ctxt->cr4)
- write_cr4(ctxt->cr4);
+ __write_cr4(ctxt->cr4);
#else
/* CONFIG X86_64 */
- wrmsrl(MSR_EFER, ctxt->efer);
- write_cr8(ctxt->cr8);
- write_cr4(ctxt->cr4);
+ wrmsrq(MSR_EFER, ctxt->efer);
+ __write_cr4(ctxt->cr4);
#endif
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
+ /* Restore the IDT. */
+ load_idt(&ctxt->idt);
+
/*
- * now restore the descriptor tables to their proper values
- * ltr is done i fix_processor_context().
+ * Just in case the asm code got us here with the SS, DS, or ES
+ * out of sync with the GDT, update them.
*/
-#ifdef CONFIG_X86_32
- load_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
- load_idt((const struct desc_ptr *)&ctxt->idt_limit);
-#endif
+ loadsegment(ss, __KERNEL_DS);
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
/*
- * segment registers
+ * Restore percpu access. Percpu access can happen in exception
+ * handlers or in complicated helpers like load_gs_index().
*/
-#ifdef CONFIG_X86_32
- loadsegment(es, ctxt->es);
- loadsegment(fs, ctxt->fs);
- loadsegment(gs, ctxt->gs);
- loadsegment(ss, ctxt->ss);
+#ifdef CONFIG_X86_64
+ wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
/*
- * sysenter MSRs
+ * Reinitialize FRED to ensure the FRED MSRs contain the same values
+ * as before hibernation.
+ *
+ * Note, the setup of FRED RSPs requires access to percpu data
+ * structures. Therefore, FRED reinitialization can only occur after
+ * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
*/
- if (boot_cpu_has(X86_FEATURE_SEP))
- enable_sep_cpu();
+ if (ctxt->cr4 & X86_CR4_FRED) {
+ cpu_init_fred_exceptions();
+ cpu_init_fred_rsps();
+ }
#else
-/* CONFIG_X86_64 */
- asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
- asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
- asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
- load_gs_index(ctxt->gs);
- asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
-
- wrmsrl(MSR_FS_BASE, ctxt->fs_base);
- wrmsrl(MSR_GS_BASE, ctxt->gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+ loadsegment(fs, __KERNEL_PERCPU);
#endif
+ /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
+ fix_processor_context();
+
/*
- * restore XCR0 for xsave capable cpu's.
+ * Now that we have descriptor tables fully restored and working
+ * exception handling, restore the usermode segments.
*/
- if (cpu_has_xsave)
- xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
+#ifdef CONFIG_X86_64
+ loadsegment(ds, ctxt->es);
+ loadsegment(es, ctxt->es);
+ loadsegment(fs, ctxt->fs);
+ load_gs_index(ctxt->gs);
- fix_processor_context();
+ /*
+ * Restore FSBASE and GSBASE after restoring the selectors, since
+ * restoring the selectors clobbers the bases. Keep in mind
+ * that MSR_KERNEL_GS_BASE is horribly misnamed.
+ */
+ wrmsrq(MSR_FS_BASE, ctxt->fs_base);
+ wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+#else
+ loadsegment(gs, ctxt->gs);
+#endif
do_fpu_end();
+ tsc_verify_tsc_adjust(true);
x86_platform.restore_sched_clock_state();
- mtrr_bp_restore();
+ cache_bp_restore();
perf_restore_debug_store();
+
+ c = &cpu_data(smp_processor_id());
+ if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+ init_ia32_feat_ctl(c);
+
+ microcode_bsp_resume();
+
+ /*
+ * This needs to happen after the microcode has been updated upon resume
+ * because some of the MSRs are "emulated" in microcode.
+ */
+ msr_restore_context(ctxt);
}
/* Needed by apm.c */
-void restore_processor_state(void)
+void notrace restore_processor_state(void)
{
__restore_processor_state(&saved_context);
}
@@ -247,9 +302,48 @@ void restore_processor_state(void)
EXPORT_SYMBOL(restore_processor_state);
#endif
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
+static void __noreturn resume_play_dead(void)
+{
+ play_dead_common();
+ tboot_shutdown(TB_SHUTDOWN_WFS);
+ hlt_play_dead();
+}
+
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ void (*play_dead)(void) = smp_ops.play_dead;
+ int ret;
+
+ /*
+ * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
+ * during hibernate image restoration, because it is likely that the
+ * monitored address will be actually written to at that time and then
+ * the "dead" CPU will attempt to execute instructions again, but the
+ * address in its instruction pointer may not be possible to resolve
+ * any more at that point (the page tables used by it previously may
+ * have been overwritten by hibernate image data).
+ *
+ * First, make sure that we wake up all the potentially disabled SMT
+ * threads which have been initially brought up and then put into
+ * mwait/cpuidle sleep.
+ * Those will be put to proper (not interfering with hibernation
+ * resume) sleep afterwards, and the resumed kernel will decide itself
+ * what to do with them.
+ */
+ ret = cpuhp_smt_enable();
+ if (ret)
+ return ret;
+ smp_ops.play_dead = resume_play_dead;
+ ret = freeze_secondary_cpus(0);
+ smp_ops.play_dead = play_dead;
+ return ret;
+}
+#endif
+
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
- * is disabled already. So it's unnessary to handle race condition between
+ * is disabled already. So it's unnecessary to handle race condition between
* cpumask query and cpu hotplug.
*/
static int bsp_check(void)
@@ -272,43 +366,6 @@ static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
case PM_HIBERNATION_PREPARE:
ret = bsp_check();
break;
-#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
- case PM_RESTORE_PREPARE:
- /*
- * When system resumes from hibernation, online CPU0 because
- * 1. it's required for resume and
- * 2. the CPU was online before hibernation
- */
- if (!cpu_online(0))
- _debug_hotplug_cpu(0, 1);
- break;
- case PM_POST_RESTORE:
- /*
- * When a resume really happens, this code won't be called.
- *
- * This code is called only when user space hibernation software
- * prepares for snapshot device during boot time. So we just
- * call _debug_hotplug_cpu() to restore to CPU0's state prior to
- * preparing the snapshot device.
- *
- * This works for normal boot case in our CPU0 hotplug debug
- * mode, i.e. CPU0 is offline and user mode hibernation
- * software initializes during boot time.
- *
- * If CPU0 is online and user application accesses snapshot
- * device after boot time, this will offline CPU0 and user may
- * see different CPU0 state before and after accessing
- * the snapshot device. But hopefully this is not a case when
- * user debugging CPU0 hotplug. Even if users hit this case,
- * they can easily online CPU0 back.
- *
- * To simplify this debug code, we only consider normal boot
- * case. Otherwise we need to remember CPU0's state and restore
- * to that state and resolve racy conditions etc.
- */
- _debug_hotplug_cpu(0, 0);
- break;
-#endif
default:
break;
}
@@ -327,3 +384,140 @@ static int __init bsp_pm_check_init(void)
}
core_initcall(bsp_pm_check_init);
+
+static int msr_build_context(const u32 *msr_id, const int num)
+{
+ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
+ struct saved_msr *msr_array;
+ int total_num;
+ int i, j;
+
+ total_num = saved_msrs->num + num;
+
+ msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
+ if (!msr_array) {
+ pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
+ return -ENOMEM;
+ }
+
+ if (saved_msrs->array) {
+ /*
+ * Multiple callbacks can invoke this function, so copy any
+ * MSR save requests from previous invocations.
+ */
+ memcpy(msr_array, saved_msrs->array,
+ sizeof(struct saved_msr) * saved_msrs->num);
+
+ kfree(saved_msrs->array);
+ }
+
+ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+ u64 dummy;
+
+ msr_array[i].info.msr_no = msr_id[j];
+ msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy);
+ msr_array[i].info.reg.q = 0;
+ }
+ saved_msrs->num = total_num;
+ saved_msrs->array = msr_array;
+
+ return 0;
+}
+
+/*
+ * The following sections are a quirk framework for problematic BIOSen:
+ * Sometimes MSRs are modified by the BIOSen after suspended to
+ * RAM, this might cause unexpected behavior after wakeup.
+ * Thus we save/restore these specified MSRs across suspend/resume
+ * in order to work around it.
+ *
+ * For any further problematic BIOSen/platforms,
+ * please add your own function similar to msr_initialize_bdw.
+ */
+static int msr_initialize_bdw(const struct dmi_system_id *d)
+{
+ /* Add any extra MSR ids into this array. */
+ u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
+
+ pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
+ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+}
+
+static const struct dmi_system_id msr_save_dmi_table[] = {
+ {
+ .callback = msr_initialize_bdw,
+ .ident = "BROADWELL BDX_EP",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
+ },
+ },
+ {}
+};
+
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+ u32 cpuid_msr_id[] = {
+ MSR_AMD64_CPUID_FN_1,
+ };
+
+ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+ c->family);
+
+ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+ X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
+ X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
+ {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+ const struct x86_cpu_id *m;
+ int ret = 0;
+
+ m = x86_match_cpu(msr_save_cpu_table);
+ if (m) {
+ pm_cpu_match_t fn;
+
+ fn = (pm_cpu_match_t)m->driver_data;
+ ret = fn(m);
+ }
+
+ return ret;
+}
+
+static void pm_save_spec_msr(void)
+{
+ struct msr_enumeration {
+ u32 msr_no;
+ u32 feature;
+ } msr_enum[] = {
+ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
+ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
+ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
+ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
+ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
+ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
+ if (boot_cpu_has(msr_enum[i].feature))
+ msr_build_context(&msr_enum[i].msr_no, 1);
+ }
+}
+
+static int pm_check_save_msr(void)
+{
+ dmi_check_system(msr_save_dmi_table);
+ pm_cpu_check(msr_save_cpu_table);
+ pm_save_spec_msr();
+
+ return 0;
+}
+
+device_initcall(pm_check_save_msr);
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
new file mode 100644
index 000000000000..a2294c1649f6
--- /dev/null
+++ b/arch/x86/power/hibernate.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hibernation support for x86
+ *
+ * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
+ * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
+ * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
+ */
+#include <linux/gfp.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/scatterlist.h>
+#include <linux/kdebug.h>
+#include <linux/cpu.h>
+#include <linux/pgtable.h>
+#include <linux/types.h>
+#include <linux/crc32.h>
+
+#include <asm/e820/api.h>
+#include <asm/init.h>
+#include <asm/proto.h>
+#include <asm/page.h>
+#include <asm/mtrr.h>
+#include <asm/sections.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Address to jump to in the last phase of restore in order to get to the image
+ * kernel's text (this value is passed in the image header).
+ */
+unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
+
+/*
+ * Value of the cr3 register from before the hibernation (this value is passed
+ * in the image header).
+ */
+unsigned long restore_cr3 __visible;
+unsigned long temp_pgt __visible;
+unsigned long relocated_restore_code __visible;
+
+/**
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ * @pfn: the page frame number to check.
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn;
+ unsigned long nosave_end_pfn;
+
+ nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
+}
+
+struct restore_data_record {
+ unsigned long jump_address;
+ unsigned long jump_address_phys;
+ unsigned long cr3;
+ unsigned long magic;
+ unsigned long e820_checksum;
+};
+
+/**
+ * compute_e820_crc32 - calculate crc32 of a given e820 table
+ *
+ * @table: the e820 table to be calculated
+ *
+ * Return: the resulting checksum
+ */
+static inline u32 compute_e820_crc32(struct e820_table *table)
+{
+ int size = offsetof(struct e820_table, entries) +
+ sizeof(struct e820_entry) * table->nr_entries;
+
+ return ~crc32_le(~0, (unsigned char const *)table, size);
+}
+
+#ifdef CONFIG_X86_64
+#define RESTORE_MAGIC 0x23456789ABCDEF02UL
+#else
+#define RESTORE_MAGIC 0x12345679UL
+#endif
+
+/**
+ * arch_hibernation_header_save - populate the architecture specific part
+ * of a hibernation image header
+ * @addr: address where architecture specific header data will be saved.
+ * @max_size: maximum size of architecture specific data in hibernation header.
+ *
+ * Return: 0 on success, -EOVERFLOW if max_size is insufficient.
+ */
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (max_size < sizeof(struct restore_data_record))
+ return -EOVERFLOW;
+ rdr->magic = RESTORE_MAGIC;
+ rdr->jump_address = (unsigned long)restore_registers;
+ rdr->jump_address_phys = __pa_symbol(restore_registers);
+
+ /*
+ * The restore code fixes up CR3 and CR4 in the following sequence:
+ *
+ * [in hibernation asm]
+ * 1. CR3 <= temporary page tables
+ * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
+ * 3. CR3 <= rdr->cr3
+ * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
+ * [in restore_processor_state()]
+ * 5. CR4 <= saved CR4
+ * 6. CR3 <= saved CR3
+ *
+ * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
+ * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
+ * rdr->cr3 needs to point to valid page tables but must not
+ * have any of the PCID bits set.
+ */
+ rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
+
+ rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
+ return 0;
+}
+
+/**
+ * arch_hibernation_header_restore - read the architecture specific data
+ * from the hibernation image header
+ * @addr: address to read the data from
+ */
+int arch_hibernation_header_restore(void *addr)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (rdr->magic != RESTORE_MAGIC) {
+ pr_crit("Unrecognized hibernate image header format!\n");
+ return -EINVAL;
+ }
+
+ restore_jump_address = rdr->jump_address;
+ jump_address_phys = rdr->jump_address_phys;
+ restore_cr3 = rdr->cr3;
+
+ if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
+ pr_crit("Hibernate inconsistent memory map detected!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int relocate_restore_code(void)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ relocated_restore_code = get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+
+ __memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
+
+ /* Make the page containing the relocated code executable */
+ pgd = (pgd_t *)__va(read_cr3_pa()) +
+ pgd_index(relocated_restore_code);
+ p4d = p4d_offset(pgd, relocated_restore_code);
+ if (p4d_leaf(*p4d)) {
+ set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
+ goto out;
+ }
+ pud = pud_offset(p4d, relocated_restore_code);
+ if (pud_leaf(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ goto out;
+ }
+ pmd = pmd_offset(pud, relocated_restore_code);
+ if (pmd_leaf(*pmd)) {
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+ goto out;
+ }
+ pte = pte_offset_kernel(pmd, relocated_restore_code);
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+out:
+ __flush_tlb_all();
+ return 0;
+}
+
+int arch_resume_nosmt(void)
+{
+ int ret;
+
+ /*
+ * We reached this while coming out of hibernation. This means
+ * that SMT siblings are sleeping in hlt, as mwait is not safe
+ * against control transition during resume (see comment in
+ * hibernate_resume_nonboot_cpu_disable()).
+ *
+ * If the resumed kernel has SMT disabled, we have to take all the
+ * SMT siblings out of hlt, and offline them again so that they
+ * end up in mwait proper.
+ *
+ * Called with hotplug disabled.
+ */
+ cpu_hotplug_enable();
+
+ ret = arch_cpu_rescan_dead_smt_siblings();
+
+ cpu_hotplug_disable();
+
+ return ret;
+}
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 7d28c885d238..223d5bca29b8 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -1,24 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support specific for i386 - temporary page tables
*
- * Distribute under GPLv2
- *
* Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*/
#include <linux/gfp.h>
#include <linux/suspend.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmzone.h>
-
-/* Defined in hibernate_asm_32.S */
-extern int restore_image(void);
-
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
+#include <asm/sections.h>
+#include <asm/suspend.h>
/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;
@@ -34,6 +29,7 @@ pgd_t *resume_pg_dir;
*/
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
{
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd_table;
@@ -43,11 +39,13 @@ static pmd_t *resume_one_md_table_init(pgd_t *pgd)
return NULL;
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
- pud = pud_offset(pgd, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d, 0);
BUG_ON(pmd_table != pmd_offset(pud, 0));
#else
- pud = pud_offset(pgd, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d, 0);
pmd_table = pmd_offset(pud, 0);
#endif
@@ -108,7 +106,7 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
* normal page tables.
* NOTE: We can mark everything as executable here
*/
- if (cpu_has_pse) {
+ if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
pfn += PTRS_PER_PTE;
} else {
@@ -144,7 +142,33 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
#endif
}
-int swsusp_arch_resume(void)
+static int set_up_temporary_text_mapping(pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_base + pgd_index(restore_jump_address);
+
+ pmd = resume_one_md_table_init(pgd);
+ if (!pmd)
+ return -ENOMEM;
+
+ if (boot_cpu_has(X86_FEATURE_PSE)) {
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
+ } else {
+ pte = resume_one_page_table_init(pmd);
+ if (!pte)
+ return -ENOMEM;
+ set_pte(pte + pte_index(restore_jump_address),
+ __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
+ }
+
+ return 0;
+}
+
+asmlinkage int swsusp_arch_resume(void)
{
int error;
@@ -153,22 +177,22 @@ int swsusp_arch_resume(void)
return -ENOMEM;
resume_init_first_level_page_table(resume_pg_dir);
+
+ error = set_up_temporary_text_mapping(resume_pg_dir);
+ if (error)
+ return error;
+
error = resume_physical_mapping_init(resume_pg_dir);
if (error)
return error;
+ temp_pgt = __pa(resume_pg_dir);
+
+ error = relocate_restore_code();
+ if (error)
+ return error;
+
/* We have got enough memory and from now on we cannot recover */
restore_image();
return 0;
}
-
-/*
- * pfn_is_nosave - check if given pfn is in the 'nosave' section
- */
-
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
- return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
-}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a0fde91c16cf..a595953f1d6d 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support for x86-64
*
- * Distribute under GPLv2
- *
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
@@ -11,35 +10,79 @@
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/suspend.h>
+#include <linux/scatterlist.h>
+#include <linux/kdebug.h>
+#include <linux/pgtable.h>
+
+#include <crypto/hash.h>
+#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mtrr.h>
+#include <asm/sections.h>
#include <asm/suspend.h>
+#include <asm/tlbflush.h>
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
-/* Defined in hibernate_asm_64.S */
-extern int restore_image(void);
+static int set_up_temporary_text_mapping(pgd_t *pgd)
+{
+ pmd_t *pmd;
+ pud_t *pud;
+ p4d_t *p4d = NULL;
+ pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
+ pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
+
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
+ pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
+
+ /*
+ * The new mapping only has to cover the page containing the image
+ * kernel's entry point (jump_address_phys), because the switch over to
+ * it is carried out by relocated code running from a page allocated
+ * specifically for this purpose and covered by the identity mapping, so
+ * the temporary kernel text mapping is only needed for the final jump.
+ * Moreover, in that mapping the virtual address of the image kernel's
+ * entry point must be the same as its virtual address in the image
+ * kernel (restore_jump_address), so the image kernel's
+ * restore_registers() code doesn't find itself in a different area of
+ * the virtual address space after switching over to the original page
+ * tables used by the image kernel.
+ */
+
+ if (pgtable_l5_enabled()) {
+ p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
+ if (!p4d)
+ return -ENOMEM;
+ }
-/*
- * Address to jump to in the last phase of restore in order to get to the image
- * kernel's text (this value is passed in the image header).
- */
-unsigned long restore_jump_address;
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
-/*
- * Value of the cr3 register from before the hibernation (this value is passed
- * in the image header).
- */
-unsigned long restore_cr3;
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
-pgd_t *temp_level4_pgt;
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
+ set_pud(pud + pud_index(restore_jump_address),
+ __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
+ if (p4d) {
+ p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
+ pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
+
+ set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
+ set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
+ } else {
+ /* No p4d for 4-level paging: point the pgd to the pud page table */
+ pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
+ set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
+ }
-void *relocated_restore_code;
+ return 0;
+}
static void *alloc_pgt_page(void *context)
{
@@ -50,100 +93,50 @@ static int set_up_temporary_mappings(void)
{
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
- .kernel_mapping = true,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
+ pgd_t *pgd;
int result;
int i;
- temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!temp_level4_pgt)
+ pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pgd)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+ /* Prepare a temporary mapping for the kernel text */
+ result = set_up_temporary_text_mapping(pgd);
+ if (result)
+ return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
- result = kernel_ident_mapping_init(&info, temp_level4_pgt,
- mstart, mend);
-
+ result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
if (result)
return result;
}
+ temp_pgt = __pa(pgd);
return 0;
}
-int swsusp_arch_resume(void)
+asmlinkage int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
- if ((error = set_up_temporary_mappings()))
+ error = set_up_temporary_mappings();
+ if (error)
return error;
- relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
- if (!relocated_restore_code)
- return -ENOMEM;
- memcpy(relocated_restore_code, &core_restore_code,
- &restore_registers - &core_restore_code);
+ error = relocate_restore_code();
+ if (error)
+ return error;
restore_image();
return 0;
}
-
-/*
- * pfn_is_nosave - check if given pfn is in the 'nosave' section
- */
-
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
- return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
-}
-
-struct restore_data_record {
- unsigned long jump_address;
- unsigned long cr3;
- unsigned long magic;
-};
-
-#define RESTORE_MAGIC 0x0123456789ABCDEFUL
-
-/**
- * arch_hibernation_header_save - populate the architecture specific part
- * of a hibernation image header
- * @addr: address to save the data at
- */
-int arch_hibernation_header_save(void *addr, unsigned int max_size)
-{
- struct restore_data_record *rdr = addr;
-
- if (max_size < sizeof(struct restore_data_record))
- return -EOVERFLOW;
- rdr->jump_address = restore_jump_address;
- rdr->cr3 = restore_cr3;
- rdr->magic = RESTORE_MAGIC;
- return 0;
-}
-
-/**
- * arch_hibernation_header_restore - read the architecture specific data
- * from the hibernation image header
- * @addr: address to read the data from
- */
-int arch_hibernation_header_restore(void *addr)
-{
- struct restore_data_record *rdr = addr;
-
- restore_jump_address = rdr->jump_address;
- restore_cr3 = rdr->cr3;
- return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
-}
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 1d0fa0e24070..fb910d9f8471 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This may not use any stack, nor any variable that is not "NoSave":
*
@@ -11,10 +12,11 @@
#include <asm/page_types.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/frame.h>
.text
-ENTRY(swsusp_arch_suspend)
+SYM_FUNC_START(swsusp_arch_suspend)
movl %esp, saved_context_esp
movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp
@@ -23,13 +25,31 @@ ENTRY(swsusp_arch_suspend)
pushfl
popl saved_context_eflags
+ /* save cr3 */
+ movl %cr3, %eax
+ movl %eax, restore_cr3
+
+ FRAME_BEGIN
call swsusp_save
- ret
+ FRAME_END
+ RET
+SYM_FUNC_END(swsusp_arch_suspend)
+
+SYM_CODE_START(restore_image)
+ /* prepare to jump to the image kernel */
+ movl restore_jump_address, %ebx
+ movl restore_cr3, %ebp
-ENTRY(restore_image)
movl mmu_cr4_features, %ecx
- movl resume_pg_dir, %eax
- subl $__PAGE_OFFSET, %eax
+
+ /* jump to relocated restore code */
+ movl relocated_restore_code, %eax
+ jmpl *%eax
+SYM_CODE_END(restore_image)
+
+/* code below has been relocated to a safe page */
+SYM_CODE_START(core_restore_code)
+ movl temp_pgt, %eax
movl %eax, %cr3
jecxz 1f # cr4 Pentium and higher, skip if zero
@@ -48,19 +68,22 @@ copy_loop:
movl pbe_address(%edx), %esi
movl pbe_orig_address(%edx), %edi
- movl $1024, %ecx
- rep
- movsl
+ movl $(PAGE_SIZE >> 2), %ecx
+ rep movsl
movl pbe_next(%edx), %edx
jmp copy_loop
.p2align 4,,7
done:
+ jmpl *%ebx
+SYM_CODE_END(core_restore_code)
+
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
+SYM_FUNC_START(restore_registers)
/* go back to the original page tables */
- movl $swapper_pg_dir, %eax
- subl $__PAGE_OFFSET, %eax
- movl %eax, %cr3
+ movl %ebp, %cr3
movl mmu_cr4_features, %ecx
jecxz 1f # cr4 Pentium and higher, skip if zero
movl %ecx, %cr4; # turn PGE back on
@@ -81,4 +104,8 @@ done:
xorl %eax, %eax
- ret
+ /* tell the hibernation core that we've just restored the memory */
+ movl %eax, in_suspend
+
+ RET
+SYM_FUNC_END(restore_registers)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 3c4469a7a929..c73be0a02a6c 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hibernation support for x86-64
*
- * Distribute under GPLv2.
- *
* Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright 2005 Andi Kleen <ak@suse.de>
* Copyright 2004 Pavel Machek <pavel@suse.cz>
@@ -21,8 +20,57 @@
#include <asm/page_types.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/frame.h>
+#include <asm/nospec-branch.h>
+
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
+SYM_FUNC_START(restore_registers)
+ ENDBR
+ /* go back to the original page tables */
+ movq %r9, %cr3
+
+ /* Flush TLB, including "global" things (vmalloc) */
+ movq mmu_cr4_features(%rip), %rax
+ movq %rax, %rdx
+ andq $~(X86_CR4_PGE), %rdx
+ movq %rdx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3
+ movq %rax, %cr4; # turn PGE back on
+
+ /* We don't restore %rax, it must be 0 anyway */
+ movq $saved_context, %rax
+ movq pt_regs_sp(%rax), %rsp
+ movq pt_regs_bp(%rax), %rbp
+ movq pt_regs_si(%rax), %rsi
+ movq pt_regs_di(%rax), %rdi
+ movq pt_regs_bx(%rax), %rbx
+ movq pt_regs_cx(%rax), %rcx
+ movq pt_regs_dx(%rax), %rdx
+ movq pt_regs_r8(%rax), %r8
+ movq pt_regs_r9(%rax), %r9
+ movq pt_regs_r10(%rax), %r10
+ movq pt_regs_r11(%rax), %r11
+ movq pt_regs_r12(%rax), %r12
+ movq pt_regs_r13(%rax), %r13
+ movq pt_regs_r14(%rax), %r14
+ movq pt_regs_r15(%rax), %r15
+ pushq pt_regs_flags(%rax)
+ popfq
+
+ /* Saved in save_processor_state. */
+ lgdt saved_context_gdt_desc(%rax)
+
+ xorl %eax, %eax
-ENTRY(swsusp_arch_suspend)
+ /* tell the hibernation core that we've just restored the memory */
+ movq %rax, in_suspend(%rip)
+
+ RET
+SYM_FUNC_END(restore_registers)
+
+SYM_FUNC_START(swsusp_arch_suspend)
movq $saved_context, %rax
movq %rsp, pt_regs_sp(%rax)
movq %rbp, pt_regs_bp(%rax)
@@ -42,109 +90,62 @@ ENTRY(swsusp_arch_suspend)
pushfq
popq pt_regs_flags(%rax)
- /* save the address of restore_registers */
- movq $restore_registers, %rax
- movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
+ FRAME_BEGIN
call swsusp_save
- ret
-
-ENTRY(restore_image)
- /* switch to temporary page tables */
- movq $__PAGE_OFFSET, %rdx
- movq temp_level4_pgt(%rip), %rax
- subq %rdx, %rax
- movq %rax, %cr3
- /* Flush TLB */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3;
- movq %rax, %cr4; # turn PGE back on
+ FRAME_END
+ RET
+SYM_FUNC_END(swsusp_arch_suspend)
+SYM_FUNC_START(restore_image)
/* prepare to jump to the image kernel */
- movq restore_jump_address(%rip), %rax
- movq restore_cr3(%rip), %rbx
+ movq restore_jump_address(%rip), %r8
+ movq restore_cr3(%rip), %r9
+
+ /* prepare to switch to temporary page tables */
+ movq temp_pgt(%rip), %rax
+ movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+
+ /* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
+ ANNOTATE_RETPOLINE_SAFE
jmpq *%rcx
+SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
-loop:
+SYM_FUNC_START(core_restore_code)
+ ENDBR
+ /* switch to temporary page tables */
+ movq %rax, %cr3
+ /* flush TLB */
+ movq %rbx, %rcx
+ andq $~(X86_CR4_PGE), %rcx
+ movq %rcx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3;
+ movq %rbx, %cr4; # turn PGE back on
+.Lloop:
testq %rdx, %rdx
- jz done
+ jz .Ldone
/* get addresses from the pbe and copy the page */
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
movq $(PAGE_SIZE >> 3), %rcx
- rep
- movsq
+ rep movsq
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
- jmp loop
-done:
- /* jump to the restore_registers address from the image header */
- jmpq *%rax
- /*
- * NOTE: This assumes that the boot kernel's text mapping covers the
- * image kernel's page containing restore_registers and the address of
- * this page is the same as in the image kernel's text mapping (it
- * should always be true, because the text mapping is linear, starting
- * from 0, and is supposed to cover the entire kernel text for every
- * kernel).
- *
- * code below belongs to the image kernel
- */
-
-ENTRY(restore_registers)
- /* go back to the original page tables */
- movq %rbx, %cr3
-
- /* Flush TLB, including "global" things (vmalloc) */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3
- movq %rax, %cr4; # turn PGE back on
+ jmp .Lloop
- /* We don't restore %rax, it must be 0 anyway */
- movq $saved_context, %rax
- movq pt_regs_sp(%rax), %rsp
- movq pt_regs_bp(%rax), %rbp
- movq pt_regs_si(%rax), %rsi
- movq pt_regs_di(%rax), %rdi
- movq pt_regs_bx(%rax), %rbx
- movq pt_regs_cx(%rax), %rcx
- movq pt_regs_dx(%rax), %rdx
- movq pt_regs_r8(%rax), %r8
- movq pt_regs_r9(%rax), %r9
- movq pt_regs_r10(%rax), %r10
- movq pt_regs_r11(%rax), %r11
- movq pt_regs_r12(%rax), %r12
- movq pt_regs_r13(%rax), %r13
- movq pt_regs_r14(%rax), %r14
- movq pt_regs_r15(%rax), %r15
- pushq pt_regs_flags(%rax)
- popfq
-
- /* Saved in save_processor_state. */
- lgdt saved_context_gdt_desc(%rax)
-
- xorq %rax, %rax
-
- /* tell the hibernation core that we've just restored the memory */
- movq %rax, in_suspend(%rip)
-
- ret
+.Ldone:
+ /* jump to the restore_registers address from the image header */
+ ANNOTATE_RETPOLINE_SAFE
+ jmpq *%r8
+SYM_FUNC_END(core_restore_code)