diff options
Diffstat (limited to 'arch/x86/boot/compressed/head_64.S')
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 64 |
1 files changed, 44 insertions, 20 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index dea95301196b..d33f060900d2 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -189,11 +189,11 @@ SYM_FUNC_START(startup_32) subl $32, %eax /* Encryption bit is always above bit 31 */ bts %eax, %edx /* Set encryption mask for page tables */ /* - * Mark SEV as active in sev_status so that startup32_check_sev_cbit() - * will do a check. The sev_status memory will be fully initialized - * with the contents of MSR_AMD_SEV_STATUS later in - * set_sev_encryption_mask(). For now it is sufficient to know that SEV - * is active. + * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that + * startup32_check_sev_cbit() will do a check. sev_enable() will + * initialize sev_status with all the bits reported by + * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT + * needs to be set for now. */ movl $1, rva(sev_status)(%ebp) 1: @@ -289,7 +289,7 @@ SYM_FUNC_START(startup_32) pushl %eax /* Enter paged protected Mode, activating Long Mode */ - movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */ + movl $CR0_STATE, %eax movl %eax, %cr0 /* Jump from 32bit compatibility mode into 64bit mode. */ @@ -447,6 +447,23 @@ SYM_CODE_START(startup_64) call load_stage1_idt popq %rsi +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Now that the stage1 interrupt handlers are set up, #VC exceptions from + * CPUID instructions can be properly handled for SEV-ES guests. + * + * For SEV-SNP, the CPUID table also needs to be set up in advance of any + * CPUID instructions being issued, so go ahead and do that now via + * sev_enable(), which will also handle the rest of the SEV-related + * detection/setup to ensure that has been done in advance of any dependent + * code. + */ + pushq %rsi + movq %rsi, %rdi /* real mode address */ + call sev_enable + popq %rsi +#endif + /* * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. @@ -558,17 +575,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) shrq $3, %rcx rep stosq -/* - * If running as an SEV guest, the encryption mask is required in the - * page-table setup code below. When the guest also has SEV-ES enabled - * set_sev_encryption_mask() will cause #VC exceptions, but the stage2 - * handler can't map its GHCB because the page-table is not set up yet. - * So set up the encryption mask here while still on the stage1 #VC - * handler. Then load stage2 IDT and switch to the kernel's own - * page-table. - */ pushq %rsi - call set_sev_encryption_mask call load_stage2_idt /* Pass boot_params to initialize_identity_maps() */ @@ -642,12 +649,28 @@ SYM_CODE_START(trampoline_32bit_src) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax + /* Avoid writing EFER if no change was made (for TDX guest) */ + jc 1f wrmsr - popl %edx +1: popl %edx popl %ecx +#ifdef CONFIG_X86_MCE + /* + * Preserve CR4.MCE if the kernel will enable #MC support. + * Clearing MCE may fault in some environments (that also force #MC + * support). Any machine check that occurs before #MC support is fully + * configured will crash the system regardless of the CR4.MCE value set + * here. + */ + movl %cr4, %eax + andl $X86_CR4_MCE, %eax +#else + movl $0, %eax +#endif + /* Enable PAE and LA57 (if required) paging modes */ - movl $X86_CR4_PAE, %eax + orl $X86_CR4_PAE, %eax testl %edx, %edx jz 1f orl $X86_CR4_LA57, %eax @@ -661,8 +684,9 @@ SYM_CODE_START(trampoline_32bit_src) pushl $__KERNEL_CS pushl %eax - /* Enable paging again */ - movl $(X86_CR0_PG | X86_CR0_PE), %eax + /* Enable paging again. */ + movl %cr0, %eax + btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret |