diff options
Diffstat (limited to 'arch/x86/power')
| -rw-r--r-- | arch/x86/power/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/power/cpu.c | 80 | ||||
| -rw-r--r-- | arch/x86/power/hibernate.c | 31 | ||||
| -rw-r--r-- | arch/x86/power/hibernate_asm_32.S | 3 | ||||
| -rw-r--r-- | arch/x86/power/hibernate_asm_64.S | 5 |
5 files changed, 48 insertions, 73 deletions
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile index 379777572bc9..e0cd7afd5302 100644 --- a/arch/x86/power/Makefile +++ b/arch/x86/power/Makefile @@ -5,7 +5,7 @@ CFLAGS_cpu.o := -fno-stack-protector # Clang may incorrectly inline functions with stack protector enabled into -# __restore_processor_state(): https://bugs.llvm.org/show_bug.cgi?id=47479 +# __restore_processor_state(): https://llvm.org/pr47479 CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO) obj-$(CONFIG_PM_SLEEP) += cpu.o diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 236447ee9beb..916441f5e85c 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -27,6 +27,8 @@ #include <asm/mmu_context.h> #include <asm/cpu_device_id.h> #include <asm/microcode.h> +#include <asm/msr.h> +#include <asm/fred.h> #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -43,7 +45,7 @@ static void msr_save_context(struct saved_context *ctxt) while (msr < end) { if (msr->valid) - rdmsrl(msr->info.msr_no, msr->info.reg.q); + rdmsrq(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -55,7 +57,7 @@ static void msr_restore_context(struct saved_context *ctxt) while (msr < end) { if (msr->valid) - wrmsrl(msr->info.msr_no, msr->info.reg.q); + wrmsrq(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -109,12 +111,12 @@ static void __save_processor_state(struct saved_context *ctxt) savesegment(ds, ctxt->ds); savesegment(es, ctxt->es); - rdmsrl(MSR_FS_BASE, ctxt->fs_base); - rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); - rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + rdmsrq(MSR_FS_BASE, ctxt->fs_base); + rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); + rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); mtrr_save_fixed_ranges(NULL); - rdmsrl(MSR_EFER, ctxt->efer); + rdmsrq(MSR_EFER, ctxt->efer); #endif /* @@ -124,7 +126,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr2 = read_cr2(); ctxt->cr3 = __read_cr3(); ctxt->cr4 = __read_cr4(); - ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, + ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE, &ctxt->misc_enable); msr_save_context(ctxt); } @@ -197,7 +199,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) struct cpuinfo_x86 *c; if (ctxt->misc_enable_saved) - wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); + wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); /* * control registers */ @@ -207,7 +209,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) __write_cr4(ctxt->cr4); #else /* CONFIG X86_64 */ - wrmsrl(MSR_EFER, ctxt->efer); + wrmsrq(MSR_EFER, ctxt->efer); __write_cr4(ctxt->cr4); #endif write_cr3(ctxt->cr3); @@ -230,7 +232,20 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) * handlers or in complicated helpers like load_gs_index(). */ #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); + wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); + + /* + * Reinitialize FRED to ensure the FRED MSRs contain the same values + * as before hibernation. + * + * Note, the setup of FRED RSPs requires access to percpu data + * structures. Therefore, FRED reinitialization can only occur after + * the percpu access pointer (i.e., MSR_GS_BASE) is restored. + */ + if (ctxt->cr4 & X86_CR4_FRED) { + cpu_init_fred_exceptions(); + cpu_init_fred_rsps(); + } #else loadsegment(fs, __KERNEL_PERCPU); #endif @@ -253,8 +268,8 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) * restoring the selectors clobbers the bases. Keep in mind * that MSR_KERNEL_GS_BASE is horribly misnamed. */ - wrmsrl(MSR_FS_BASE, ctxt->fs_base); - wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + wrmsrq(MSR_FS_BASE, ctxt->fs_base); + wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); #else loadsegment(gs, ctxt->gs); #endif @@ -288,7 +303,7 @@ EXPORT_SYMBOL(restore_processor_state); #endif #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) -static void resume_play_dead(void) +static void __noreturn resume_play_dead(void) { play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); @@ -351,43 +366,6 @@ static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, case PM_HIBERNATION_PREPARE: ret = bsp_check(); break; -#ifdef CONFIG_DEBUG_HOTPLUG_CPU0 - case PM_RESTORE_PREPARE: - /* - * When system resumes from hibernation, online CPU0 because - * 1. it's required for resume and - * 2. the CPU was online before hibernation - */ - if (!cpu_online(0)) - _debug_hotplug_cpu(0, 1); - break; - case PM_POST_RESTORE: - /* - * When a resume really happens, this code won't be called. - * - * This code is called only when user space hibernation software - * prepares for snapshot device during boot time. So we just - * call _debug_hotplug_cpu() to restore to CPU0's state prior to - * preparing the snapshot device. - * - * This works for normal boot case in our CPU0 hotplug debug - * mode, i.e. CPU0 is offline and user mode hibernation - * software initializes during boot time. - * - * If CPU0 is online and user application accesses snapshot - * device after boot time, this will offline CPU0 and user may - * see different CPU0 state before and after accessing - * the snapshot device. But hopefully this is not a case when - * user debugging CPU0 hotplug. Even if users hit this case, - * they can easily online CPU0 back. - * - * To simplify this debug code, we only consider normal boot - * case. Otherwise we need to remember CPU0's state and restore - * to that state and resolve racy conditions etc. - */ - _debug_hotplug_cpu(0, 0); - break; -#endif default: break; } @@ -437,7 +415,7 @@ static int msr_build_context(const u32 *msr_id, const int num) u64 dummy; msr_array[i].info.msr_no = msr_id[j]; - msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); + msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy); msr_array[i].info.reg.q = 0; } saved_msrs->num = total_num; diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index 6f955eb1e163..a2294c1649f6 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -42,6 +42,7 @@ unsigned long relocated_restore_code __visible; /** * pfn_is_nosave - check if given pfn is in the 'nosave' section + * @pfn: the page frame number to check. */ int pfn_is_nosave(unsigned long pfn) { @@ -86,7 +87,10 @@ static inline u32 compute_e820_crc32(struct e820_table *table) /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header - * @addr: address to save the data at + * @addr: address where architecture specific header data will be saved. + * @max_size: maximum size of architecture specific data in hibernation header. + * + * Return: 0 on success, -EOVERFLOW if max_size is insufficient. */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { @@ -165,17 +169,17 @@ int relocate_restore_code(void) pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(relocated_restore_code); p4d = p4d_offset(pgd, relocated_restore_code); - if (p4d_large(*p4d)) { + if (p4d_leaf(*p4d)) { set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX)); goto out; } pud = pud_offset(p4d, relocated_restore_code); - if (pud_large(*pud)) { + if (pud_leaf(*pud)) { set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); goto out; } pmd = pmd_offset(pud, relocated_restore_code); - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); goto out; } @@ -188,7 +192,8 @@ out: int arch_resume_nosmt(void) { - int ret = 0; + int ret; + /* * We reached this while coming out of hibernation. This means * that SMT siblings are sleeping in hlt, as mwait is not safe @@ -202,18 +207,10 @@ int arch_resume_nosmt(void) * Called with hotplug disabled. */ cpu_hotplug_enable(); - if (cpu_smt_control == CPU_SMT_DISABLED || - cpu_smt_control == CPU_SMT_FORCE_DISABLED) { - enum cpuhp_smt_control old = cpu_smt_control; - - ret = cpuhp_smt_enable(); - if (ret) - goto out; - ret = cpuhp_smt_disable(old); - if (ret) - goto out; - } -out: + + ret = arch_cpu_rescan_dead_smt_siblings(); + cpu_hotplug_disable(); + return ret; } diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 5606a15cf9a1..fb910d9f8471 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -69,8 +69,7 @@ copy_loop: movl pbe_orig_address(%edx), %edi movl $(PAGE_SIZE >> 2), %ecx - rep - movsl + rep movsl movl pbe_next(%edx), %edx jmp copy_loop diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 0a0539e1cc81..c73be0a02a6c 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -26,6 +26,7 @@ /* code below belongs to the image kernel */ .align PAGE_SIZE SYM_FUNC_START(restore_registers) + ENDBR /* go back to the original page tables */ movq %r9, %cr3 @@ -119,6 +120,7 @@ SYM_FUNC_END(restore_image) /* code below has been relocated to a safe page */ SYM_FUNC_START(core_restore_code) + ENDBR /* switch to temporary page tables */ movq %rax, %cr3 /* flush TLB */ @@ -136,8 +138,7 @@ SYM_FUNC_START(core_restore_code) movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi movq $(PAGE_SIZE >> 3), %rcx - rep - movsq + rep movsq /* progress to the next pbe */ movq pbe_next(%rdx), %rdx |
