From d07f46f9f51afc7fc9f021eae19eba3c2e7870ac Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:15:03 +0200 Subject: KVM: SVM: Add GHCB definitions Extend the vmcb_safe_area with SEV-ES fields and add a new 'struct ghcb' which will be used for guest-hypervisor communication. Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-3-joro@8bytes.org --- arch/x86/include/asm/svm.h | 51 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 8a1f5382a4ea..acac55d6f941 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -200,13 +200,60 @@ struct __attribute__ ((__packed__)) vmcb_save_area { u64 br_to; u64 last_excp_from; u64 last_excp_to; + + /* + * The following part of the save area is valid only for + * SEV-ES guests when referenced through the GHCB. + */ + u8 reserved_7[104]; + u64 reserved_8; /* rax already available at 0x01f8 */ + u64 rcx; + u64 rdx; + u64 rbx; + u64 reserved_9; /* rsp already available at 0x01d8 */ + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u8 reserved_10[16]; + u64 sw_exit_code; + u64 sw_exit_info_1; + u64 sw_exit_info_2; + u64 sw_scratch; + u8 reserved_11[56]; + u64 xcr0; + u8 valid_bitmap[16]; + u64 x87_state_gpa; }; +struct ghcb { + struct vmcb_save_area save; + u8 reserved_save[2048 - sizeof(struct vmcb_save_area)]; + + u8 shared_buffer[2032]; + + u8 reserved_1[10]; + u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */ + u32 ghcb_usage; +} __packed; + + +#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032 +#define EXPECTED_VMCB_CONTROL_AREA_SIZE 256 +#define EXPECTED_GHCB_SIZE PAGE_SIZE static inline void __unused_size_checks(void) { - BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298); - BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256); + BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE); + BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); + BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); } struct __attribute__ ((__packed__)) vmcb { -- cgit From 3702c2f4eed2188440f65ecdfc89165106fe565d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:04 +0200 Subject: KVM: SVM: Add GHCB Accessor functions Building a correct GHCB for the hypervisor requires setting valid bits in the GHCB. Simplify that process by providing accessor functions to set values and to update the valid bitmap and to check the valid bitmap in KVM. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-4-joro@8bytes.org --- arch/x86/include/asm/svm.h | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index acac55d6f941..06e52585aed3 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -345,4 +345,47 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) +/* GHCB Accessor functions */ + +#define GHCB_BITMAP_IDX(field) \ + (offsetof(struct vmcb_save_area, field) / sizeof(u64)) + +#define DEFINE_GHCB_ACCESSORS(field) \ + static inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&ghcb->save.valid_bitmap); \ + } \ + \ + static inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \ + { \ + __set_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&ghcb->save.valid_bitmap); \ + ghcb->save.field = value; \ + } + +DEFINE_GHCB_ACCESSORS(cpl) +DEFINE_GHCB_ACCESSORS(rip) +DEFINE_GHCB_ACCESSORS(rsp) +DEFINE_GHCB_ACCESSORS(rax) +DEFINE_GHCB_ACCESSORS(rcx) +DEFINE_GHCB_ACCESSORS(rdx) +DEFINE_GHCB_ACCESSORS(rbx) +DEFINE_GHCB_ACCESSORS(rbp) +DEFINE_GHCB_ACCESSORS(rsi) +DEFINE_GHCB_ACCESSORS(rdi) +DEFINE_GHCB_ACCESSORS(r8) +DEFINE_GHCB_ACCESSORS(r9) +DEFINE_GHCB_ACCESSORS(r10) +DEFINE_GHCB_ACCESSORS(r11) +DEFINE_GHCB_ACCESSORS(r12) +DEFINE_GHCB_ACCESSORS(r13) +DEFINE_GHCB_ACCESSORS(r14) +DEFINE_GHCB_ACCESSORS(r15) +DEFINE_GHCB_ACCESSORS(sw_exit_code) +DEFINE_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_GHCB_ACCESSORS(sw_scratch) +DEFINE_GHCB_ACCESSORS(xcr0) + #endif -- cgit From 976bc5e2aceedef13e0ba1f0e6e372a22164aa0c Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 7 Sep 2020 15:15:05 +0200 Subject: KVM: SVM: Use __packed shorthand Use the shorthand to make it more readable. No functional changes. Signed-off-by: Borislav Petkov Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-5-joro@8bytes.org --- arch/x86/include/asm/svm.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 06e52585aed3..cf13f9e78585 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -150,14 +150,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_NESTED_CTL_NP_ENABLE BIT(0) #define SVM_NESTED_CTL_SEV_ENABLE BIT(1) -struct __attribute__ ((__packed__)) vmcb_seg { +struct vmcb_seg { u16 selector; u16 attrib; u32 limit; u64 base; -}; +} __packed; -struct __attribute__ ((__packed__)) vmcb_save_area { +struct vmcb_save_area { struct vmcb_seg es; struct vmcb_seg cs; struct vmcb_seg ss; @@ -231,7 +231,7 @@ struct __attribute__ ((__packed__)) vmcb_save_area { u64 xcr0; u8 valid_bitmap[16]; u64 x87_state_gpa; -}; +} __packed; struct ghcb { struct vmcb_save_area save; @@ -256,11 +256,11 @@ static inline void __unused_size_checks(void) BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); } -struct __attribute__ ((__packed__)) vmcb { +struct vmcb { struct vmcb_control_area control; u8 reserved_control[1024 - sizeof(struct vmcb_control_area)]; struct vmcb_save_area save; -}; +} __packed; #define SVM_CPUID_FUNC 0x8000000a -- cgit From 360e7c5c4ca4fd8e627781ed42f95d58bc3bb732 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:15:06 +0200 Subject: x86/cpufeatures: Add SEV-ES CPU feature Add CPU feature detection for Secure Encrypted Virtualization with Encrypted State. This feature enhances SEV by also encrypting the guest register state, making it in-accessible to the hypervisor. Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-6-joro@8bytes.org --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 83fc9d38eb1f..1205c1b6991a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -236,6 +236,7 @@ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ +#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ -- cgit From 05a2fdf3230306daee1def019b8f52cd06bd2e48 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:07 +0200 Subject: x86/traps: Move pf error codes to Move the definition of the x86 page-fault error code bits to a new header file asm/trap_pf.h. This makes it easier to include them into pre-decompression boot code. No functional changes. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-7-joro@8bytes.org --- arch/x86/include/asm/trap_pf.h | 24 ++++++++++++++++++++++++ arch/x86/include/asm/traps.h | 19 +------------------ 2 files changed, 25 insertions(+), 18 deletions(-) create mode 100644 arch/x86/include/asm/trap_pf.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h new file mode 100644 index 000000000000..305bc1214aef --- /dev/null +++ b/arch/x86/include/asm/trap_pf.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_TRAP_PF_H +#define _ASM_X86_TRAP_PF_H + +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access + */ +enum x86_pf_error_code { + X86_PF_PROT = 1 << 0, + X86_PF_WRITE = 1 << 1, + X86_PF_USER = 1 << 2, + X86_PF_RSVD = 1 << 3, + X86_PF_INSTR = 1 << 4, + X86_PF_PK = 1 << 5, +}; + +#endif /* _ASM_X86_TRAP_PF_H */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 714b1a30e7b0..6a308355ea29 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -8,6 +8,7 @@ #include #include #include /* TRAP_TRACE, ... */ +#include #ifdef CONFIG_X86_64 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); @@ -41,22 +42,4 @@ void __noreturn handle_stack_overflow(const char *message, unsigned long fault_address); #endif -/* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - * bit 5 == 1: protection keys block access - */ -enum x86_pf_error_code { - X86_PF_PROT = 1 << 0, - X86_PF_WRITE = 1 << 1, - X86_PF_USER = 1 << 2, - X86_PF_RSVD = 1 << 3, - X86_PF_INSTR = 1 << 4, - X86_PF_PK = 1 << 5, -}; #endif /* _ASM_X86_TRAPS_H */ -- cgit From 172b75e56b08846e6fb07a88e5685ce4e24f4620 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:09 +0200 Subject: x86/umip: Factor out instruction fetch Factor out the code to fetch the instruction from user-space to a helper function. No functional changes. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-9-joro@8bytes.org --- arch/x86/include/asm/insn-eval.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 2b6ccf2c49f1..b8b9ef1bbd06 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -19,5 +19,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); +int insn_fetch_from_user(struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE]); #endif /* _ASM_X86_INSN_EVAL_H */ -- cgit From 172639d79977ca7b5ce6f84f6606262f4081718f Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:10 +0200 Subject: x86/umip: Factor out instruction decoding Factor out the code used to decode an instruction with the correct address and operand sizes to a helper function. No functional changes. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-10-joro@8bytes.org --- arch/x86/include/asm/insn-eval.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index b8b9ef1bbd06..392b4fe377f9 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -21,5 +21,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); +bool insn_decode(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size); #endif /* _ASM_X86_INSN_EVAL_H */ -- cgit From 7af1bd822dd45a669fc178a35cc8183922333d56 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:11 +0200 Subject: x86/insn: Add insn_get_modrm_reg_off() Add a function to the instruction decoder which returns the pt_regs offset of the register specified in the reg field of the modrm byte. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Acked-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200907131613.12703-11-joro@8bytes.org --- arch/x86/include/asm/insn-eval.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 392b4fe377f9..f748f57f1491 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -17,6 +17,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); +int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); int insn_fetch_from_user(struct pt_regs *regs, -- cgit From 5901781a11175a5e5ee91746ec8627f18d47eebd Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:12 +0200 Subject: x86/insn: Add insn_has_rep_prefix() helper Add a function to check whether an instruction has a REP prefix. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Reviewed-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200907131613.12703-12-joro@8bytes.org --- arch/x86/include/asm/insn-eval.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index f748f57f1491..a0f839aa144d 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -15,6 +15,7 @@ #define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) #define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) +bool insn_has_rep_prefix(struct insn *insn); void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); -- cgit From 64e682638eb51070ba6044535b250aad43c5564e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:14 +0200 Subject: x86/boot/compressed/64: Add IDT Infrastructure Add code needed to setup an IDT in the early pre-decompression boot-code. The IDT is loaded first in startup_64, which is after EfiExitBootServices() has been called, and later reloaded when the kernel image has been relocated to the end of the decompression area. This allows to setup different IDT handlers before and after the relocation. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-14-joro@8bytes.org --- arch/x86/include/asm/desc_defs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index a91f3b6e4f2a..5621fb3f2d1a 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -109,6 +109,9 @@ struct desc_ptr { #endif /* !__ASSEMBLY__ */ +/* Boot IDT definitions */ +#define BOOT_IDT_ENTRIES 32 + /* Access rights as returned by LAR */ #define AR_TYPE_RODATA (0 * (1 << 9)) #define AR_TYPE_RWDATA (1 * (1 << 9)) -- cgit From 29dcc60f6a19fb0aaee97bd1ae2ed8a7dc6f0cfe Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:20 +0200 Subject: x86/boot/compressed/64: Add stage1 #VC handler Add the first handler for #VC exceptions. At stage 1 there is no GHCB yet because the kernel might still be running on the EFI page table. The stage 1 handler is limited to the MSR-based protocol to talk to the hypervisor and can only support CPUID exit-codes, but that is enough to get to stage 2. [ bp: Zap superfluous newlines after rd/wrmsr instruction mnemonics. ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-20-joro@8bytes.org --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/include/asm/sev-es.h | 37 +++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/trapnr.h | 1 + 3 files changed, 39 insertions(+) create mode 100644 arch/x86/include/asm/sev-es.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 2859ee4f39a8..da34fdba7c5a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -466,6 +466,7 @@ #define MSR_AMD64_IBSBRTARGET 0xc001103b #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV_ENABLED_BIT 0 #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h new file mode 100644 index 000000000000..48a44038b5d1 --- /dev/null +++ b/arch/x86/include/asm/sev-es.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Encrypted Register State Support + * + * Author: Joerg Roedel + */ + +#ifndef __ASM_ENCRYPTED_STATE_H +#define __ASM_ENCRYPTED_STATE_H + +#include + +#define GHCB_SEV_CPUID_REQ 0x004UL +#define GHCB_CPUID_REQ_EAX 0 +#define GHCB_CPUID_REQ_EBX 1 +#define GHCB_CPUID_REQ_ECX 2 +#define GHCB_CPUID_REQ_EDX 3 +#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \ + (((unsigned long)reg & 3) << 30) | \ + (((unsigned long)fn) << 32)) + +#define GHCB_SEV_CPUID_RESP 0x005UL +#define GHCB_SEV_TERMINATE 0x100UL + +#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff) +#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } + +void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); + +static inline u64 lower_bits(u64 val, unsigned int bits) +{ + u64 mask = (1ULL << bits) - 1; + + return (val & mask); +} + +#endif diff --git a/arch/x86/include/asm/trapnr.h b/arch/x86/include/asm/trapnr.h index 082f45631fa9..f5d2325aa0b7 100644 --- a/arch/x86/include/asm/trapnr.h +++ b/arch/x86/include/asm/trapnr.h @@ -26,6 +26,7 @@ #define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */ #define X86_TRAP_VE 20 /* Virtualization Exception */ #define X86_TRAP_CP 21 /* Control Protection Exception */ +#define X86_TRAP_VC 29 /* VMM Communication Exception */ #define X86_TRAP_IRET 32 /* IRET Exception */ #endif -- cgit From 597cfe48212a3f110ab0f918bf59791f453e65b7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:24 +0200 Subject: x86/boot/compressed/64: Setup a GHCB-based VC Exception handler Install an exception handler for #VC exception that uses a GHCB. Also add the infrastructure for handling different exit-codes by decoding the instruction that caused the exception and error handling. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-24-joro@8bytes.org --- arch/x86/include/asm/sev-es.h | 39 +++++++++++++++++++++++++++++++++++++++ arch/x86/include/uapi/asm/svm.h | 1 + 2 files changed, 40 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 48a44038b5d1..6dc52440c4b4 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -9,7 +9,14 @@ #define __ASM_ENCRYPTED_STATE_H #include +#include +#define GHCB_SEV_INFO 0x001UL +#define GHCB_SEV_INFO_REQ 0x002UL +#define GHCB_INFO(v) ((v) & 0xfffUL) +#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL) +#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL) +#define GHCB_PROTO_OUR 0x0001UL #define GHCB_SEV_CPUID_REQ 0x004UL #define GHCB_CPUID_REQ_EAX 0 #define GHCB_CPUID_REQ_EBX 1 @@ -19,12 +26,44 @@ (((unsigned long)reg & 3) << 30) | \ (((unsigned long)fn) << 32)) +#define GHCB_PROTOCOL_MAX 0x0001UL +#define GHCB_DEFAULT_USAGE 0x0000UL + #define GHCB_SEV_CPUID_RESP 0x005UL #define GHCB_SEV_TERMINATE 0x100UL +#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \ + (((((u64)reason_set) & 0x7) << 12) | \ + ((((u64)reason_val) & 0xff) << 16)) +#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0 +#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1 #define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff) #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } +enum es_result { + ES_OK, /* All good */ + ES_UNSUPPORTED, /* Requested operation not supported */ + ES_VMM_ERROR, /* Unexpected state from the VMM */ + ES_DECODE_FAILED, /* Instruction decoding failed */ + ES_EXCEPTION, /* Instruction caused exception */ + ES_RETRY, /* Retry instruction emulation */ +}; + +struct es_fault_info { + unsigned long vector; + unsigned long error_code; + unsigned long cr2; +}; + +struct pt_regs; + +/* ES instruction emulation context */ +struct es_em_ctxt { + struct pt_regs *regs; + struct insn insn; + struct es_fault_info fi; +}; + void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); static inline u64 lower_bits(u64 val, unsigned int bits) diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 2e8a30f06c74..c68d1618c9b0 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -29,6 +29,7 @@ #define SVM_EXIT_WRITE_DR6 0x036 #define SVM_EXIT_WRITE_DR7 0x037 #define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_LAST_EXCP 0x05f #define SVM_EXIT_INTR 0x060 #define SVM_EXIT_NMI 0x061 #define SVM_EXIT_SMI 0x062 -- cgit From 1b4fb8545f2b00f2844c4b7619d64d98440a477c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:27 +0200 Subject: x86/fpu: Move xgetbv()/xsetbv() into a separate header The xgetbv() function is needed in the pre-decompression boot code, but asm/fpu/internal.h can't be included there directly. Doing so opens the door to include-hell due to various include-magic in boot/compressed/misc.h. Avoid that by moving xgetbv()/xsetbv() to a separate header file and include it instead. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-27-joro@8bytes.org --- arch/x86/include/asm/fpu/internal.h | 30 +----------------------------- arch/x86/include/asm/fpu/xcr.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 29 deletions(-) create mode 100644 arch/x86/include/asm/fpu/xcr.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 21a8b5259477..ceeba9f63172 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -585,33 +586,4 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) __write_pkru(pkru_val); } -/* - * MXCSR and XCR definitions: - */ - -static inline void ldmxcsr(u32 mxcsr) -{ - asm volatile("ldmxcsr %0" :: "m" (mxcsr)); -} - -extern unsigned int mxcsr_feature_mask; - -#define XCR_XFEATURE_ENABLED_MASK 0x00000000 - -static inline u64 xgetbv(u32 index) -{ - u32 eax, edx; - - asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index)); - return eax + ((u64)edx << 32); -} - -static inline void xsetbv(u32 index, u64 value) -{ - u32 eax = value; - u32 edx = value >> 32; - - asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); -} - #endif /* _ASM_X86_FPU_INTERNAL_H */ diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h new file mode 100644 index 000000000000..1c7ab8d95da5 --- /dev/null +++ b/arch/x86/include/asm/fpu/xcr.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_FPU_XCR_H +#define _ASM_X86_FPU_XCR_H + +/* + * MXCSR and XCR definitions: + */ + +static inline void ldmxcsr(u32 mxcsr) +{ + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); +} + +extern unsigned int mxcsr_feature_mask; + +#define XCR_XFEATURE_ENABLED_MASK 0x00000000 + +static inline u64 xgetbv(u32 index) +{ + u32 eax, edx; + + asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index)); + return eax + ((u64)edx << 32); +} + +static inline void xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); +} + +#endif /* _ASM_X86_FPU_XCR_H */ -- cgit From 866b556efa1295934ed0bc20c2f208c93a873fb0 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:30 +0200 Subject: x86/head/64: Install startup GDT Handling exceptions during boot requires a working GDT. The kernel GDT can't be used on the direct mapping, so load a startup GDT and setup segments. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-30-joro@8bytes.org --- arch/x86/include/asm/setup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 84b645cc8bc9..5c2fd05bd52c 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -48,6 +48,7 @@ extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); extern unsigned long __startup_secondary_64(void); +extern void startup_64_setup_env(unsigned long physbase); extern int early_make_pgtable(unsigned long address); #ifdef CONFIG_X86_INTEL_MID -- cgit From f5963ba7a45fc6ff298a34976064354be437e1d8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:34 +0200 Subject: x86/head/64: Install a CPU bringup IDT Add a separate bringup IDT for the CPU bringup code that will be used until the kernel switches to the idt_table. There are two reasons for a separate IDT: 1) When the idt_table is set up and the secondary CPUs are booted, it contains entries (e.g. IST entries) which require certain CPU state to be set up. This includes a working TSS (for IST), MSR_GS_BASE (for stack protector) or CR4.FSGSBASE (for paranoid_entry) path. By using a dedicated IDT for early boot this state need not to be set up early. 2) The idt_table is static to idt.c, so any function using/modifying must be in idt.c too. That means that all compiler driven instrumentation like tracing or KASAN is also active in this code. But during early CPU bringup the environment is not set up for this instrumentation to work correctly. To avoid all of these hassles and make early exception handling robust, use a dedicated bringup IDT. The IDT is loaded two times, first on the boot CPU while the kernel is still running on direct mapped addresses, and again later after the switch to kernel addresses has happened. The second IDT load happens on the boot and secondary CPUs. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-34-joro@8bytes.org --- arch/x86/include/asm/setup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 5c2fd05bd52c..4b3ca5ade2fd 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -50,6 +50,7 @@ extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp extern unsigned long __startup_secondary_64(void); extern void startup_64_setup_env(unsigned long physbase); extern int early_make_pgtable(unsigned long address); +extern void early_setup_idt(void); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); -- cgit From 097ee5b778b8970e1c2ed3ca1631b297d90acd61 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:35 +0200 Subject: x86/idt: Make IDT init functions static inlines Move these two functions from kernel/idt.c to include/asm/desc.h: * init_idt_data() * idt_init_desc() These functions are needed to setup IDT entries very early and need to be called from head64.c. To be usable this early, these functions need to be compiled without instrumentation and the stack-protector feature. These features need to be kept enabled for kernel/idt.c, so head64.c must use its own versions. [ bp: Take Kees' suggested patch title and add his Rev-by. ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20200907131613.12703-35-joro@8bytes.org --- arch/x86/include/asm/desc.h | 27 +++++++++++++++++++++++++++ arch/x86/include/asm/desc_defs.h | 7 +++++++ 2 files changed, 34 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 1ced11d31932..476082a83d1c 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -383,6 +383,33 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) void alloc_intr_gate(unsigned int n, const void *addr); +static inline void init_idt_data(struct idt_data *data, unsigned int n, + const void *addr) +{ + BUG_ON(n > 0xFF); + + memset(data, 0, sizeof(*data)); + data->vector = n; + data->addr = addr; + data->segment = __KERNEL_CS; + data->bits.type = GATE_INTERRUPT; + data->bits.p = 1; +} + +static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) +{ + unsigned long addr = (unsigned long) d->addr; + + gate->offset_low = (u16) addr; + gate->segment = (u16) d->segment; + gate->bits = d->bits; + gate->offset_middle = (u16) (addr >> 16); +#ifdef CONFIG_X86_64 + gate->offset_high = (u32) (addr >> 32); + gate->reserved = 0; +#endif +} + extern unsigned long system_vectors[]; extern void load_current_idt(void); diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index 5621fb3f2d1a..f7e7099af595 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -74,6 +74,13 @@ struct idt_bits { p : 1; } __attribute__((packed)); +struct idt_data { + unsigned int vector; + unsigned int segment; + struct idt_bits bits; + const void *addr; +}; + struct gate_struct { u16 offset_low; u16 segment; -- cgit From 4b47cdbda6f1ad73b08dc7d497bac12b8f26ae0d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:36 +0200 Subject: x86/head/64: Move early exception dispatch to C code Move the assembly coded dispatch between page-faults and all other exceptions to C code to make it easier to maintain and extend. Also change the return-type of early_make_pgtable() to bool and make it static. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-36-joro@8bytes.org --- arch/x86/include/asm/pgtable.h | 2 +- arch/x86/include/asm/setup.h | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index b836138ce852..7b8f2127de37 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,7 +28,7 @@ #include extern pgd_t early_top_pgt[PTRS_PER_PGD]; -int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); +bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm); void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 4b3ca5ade2fd..7d7a064af6ff 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -39,6 +39,8 @@ void vsmp_init(void); static inline void vsmp_init(void) { } #endif +struct pt_regs; + void setup_bios_corruption_check(void); void early_platform_quirks(void); @@ -49,8 +51,8 @@ extern void i386_reserve_resources(void); extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); extern unsigned long __startup_secondary_64(void); extern void startup_64_setup_env(unsigned long physbase); -extern int early_make_pgtable(unsigned long address); extern void early_setup_idt(void); +extern void __init do_early_exception(struct pt_regs *regs, int trapnr); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); -- cgit From b57de6cd16395be1ebdaa9b489ffbf462bb585c4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:37 +0200 Subject: x86/sev-es: Add SEV-ES Feature Detection Add a sev_es_active() function for checking whether SEV-ES is enabled. Also cache the value of MSR_AMD64_SEV at boot to speed up the feature checking in the running code. [ bp: Remove "!!" in sev_active() too. ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20200907131613.12703-37-joro@8bytes.org --- arch/x86/include/asm/mem_encrypt.h | 3 +++ arch/x86/include/asm/msr-index.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 5049f6c22683..4e72b73a9cb5 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -19,6 +19,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT extern u64 sme_me_mask; +extern u64 sev_status; extern bool sev_enabled; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, @@ -50,6 +51,7 @@ void __init mem_encrypt_init(void); bool sme_active(void); bool sev_active(void); +bool sev_es_active(void); #define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) @@ -72,6 +74,7 @@ static inline void __init sme_enable(struct boot_params *bp) { } static inline bool sme_active(void) { return false; } static inline bool sev_active(void) { return false; } +static inline bool sev_es_active(void) { return false; } static inline int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index da34fdba7c5a..249a4147c4b2 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -469,7 +469,9 @@ #define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV_ENABLED_BIT 0 +#define MSR_AMD64_SEV_ES_ENABLED_BIT 1 #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) +#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f -- cgit From 74d8d9d531b4cc945a9f75aa2fc21d99ca5a9fe3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 8 Sep 2020 14:35:17 +0200 Subject: x86/sev-es: Setup an early #VC handler Setup an early handler for #VC exceptions. There is no GHCB mapped yet, so just re-use the vc_no_ghcb_handler(). It can only handle CPUID exit-codes, but that should be enough to get the kernel through verify_cpu() and __startup_64() until it runs on virtual addresses. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov [ boot failure Error: kernel_ident_mapping_init() failed. ] Reported-by: kernel test robot Link: https://lkml.kernel.org/r/20200908123517.GA3764@8bytes.org --- arch/x86/include/asm/sev-es.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 6dc52440c4b4..7175d432ebfe 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -73,4 +73,7 @@ static inline u64 lower_bits(u64 val, unsigned int bits) return (val & mask); } +/* Early IDT entry points for #VC handler */ +extern void vc_no_ghcb(void); + #endif -- cgit From 1aa9aa8ee517e0443b06e816a4fd2d15f2113615 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 8 Sep 2020 14:38:16 +0200 Subject: x86/sev-es: Setup GHCB-based boot #VC handler Add the infrastructure to handle #VC exceptions when the kernel runs on virtual addresses and has mapped a GHCB. This handler will be used until the runtime #VC handler takes over. Since the handler runs very early, disable instrumentation for sev-es.c. [ bp: Make vc_ghcb_invalidate() __always_inline so that it can be inlined in noinstr functions like __sev_es_nmi_complete(). ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200908123816.GB3764@8bytes.org --- arch/x86/include/asm/realmode.h | 3 +++ arch/x86/include/asm/segment.h | 2 +- arch/x86/include/asm/sev-es.h | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index b35030eeec36..96118fb041b8 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -57,6 +57,9 @@ extern unsigned char real_mode_blob_end[]; extern unsigned long initial_code; extern unsigned long initial_gs; extern unsigned long initial_stack; +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern unsigned long initial_vc_handler; +#endif extern unsigned char real_mode_blob[]; extern unsigned char real_mode_relocs[]; diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 9646c300f128..4e8dec387112 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -230,7 +230,7 @@ #define NUM_EXCEPTION_VECTORS 32 /* Bitmask of exception vectors which push an error code on the stack: */ -#define EXCEPTION_ERRCODE_MASK 0x00027d00 +#define EXCEPTION_ERRCODE_MASK 0x20027d00 #define GDT_SIZE (GDT_ENTRIES*8) #define GDT_ENTRY_TLS_ENTRIES 3 diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 7175d432ebfe..9fbeedaa66ee 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -75,5 +75,7 @@ static inline u64 lower_bits(u64 val, unsigned int bits) /* Early IDT entry points for #VC handler */ extern void vc_no_ghcb(void); +extern void vc_boot_ghcb(void); +extern bool handle_vc_boot_ghcb(struct pt_regs *regs); #endif -- cgit From 885689e47dfa1499b756a07237eb645234d93cf9 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:15:42 +0200 Subject: x86/sev-es: Setup per-CPU GHCBs for the runtime handler The runtime handler needs one GHCB per-CPU. Set them up and map them unencrypted. [ bp: Touchups and simplification. ] Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-42-joro@8bytes.org --- arch/x86/include/asm/mem_encrypt.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 4e72b73a9cb5..c9f5df0a1c10 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -49,6 +49,7 @@ void __init mem_encrypt_free_decrypted_mem(void); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void __init sev_es_init_vc_handling(void); bool sme_active(void); bool sev_active(void); bool sev_es_active(void); @@ -72,6 +73,7 @@ static inline void __init sme_early_init(void) { } static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } +static inline void sev_es_init_vc_handling(void) { } static inline bool sme_active(void) { return false; } static inline bool sev_active(void) { return false; } static inline bool sev_es_active(void) { return false; } -- cgit From 02772fb9b68e6a72a5e17f994048df832fe2b15e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:43 +0200 Subject: x86/sev-es: Allocate and map an IST stack for #VC handler Allocate and map an IST stack and an additional fall-back stack for the #VC handler. The memory for the stacks is allocated only when SEV-ES is active. The #VC handler needs to use an IST stack because a #VC exception can be raised from kernel space with unsafe stack, e.g. in the SYSCALL entry path. Since the #VC exception can be nested, the #VC handler switches back to the interrupted stack when entered from kernel space. If switching back is not possible, the fall-back stack is used. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-43-joro@8bytes.org --- arch/x86/include/asm/cpu_entry_area.h | 33 +++++++++++++++++++++------------ arch/x86/include/asm/page_64_types.h | 1 + 2 files changed, 22 insertions(+), 12 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 8902fdb7de13..3d52b094850a 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -11,25 +11,29 @@ #ifdef CONFIG_X86_64 /* Macro to enforce the same ordering and stack sizes */ -#define ESTACKS_MEMBERS(guardsize) \ - char DF_stack_guard[guardsize]; \ - char DF_stack[EXCEPTION_STKSZ]; \ - char NMI_stack_guard[guardsize]; \ - char NMI_stack[EXCEPTION_STKSZ]; \ - char DB_stack_guard[guardsize]; \ - char DB_stack[EXCEPTION_STKSZ]; \ - char MCE_stack_guard[guardsize]; \ - char MCE_stack[EXCEPTION_STKSZ]; \ - char IST_top_guard[guardsize]; \ +#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \ + char DF_stack_guard[guardsize]; \ + char DF_stack[EXCEPTION_STKSZ]; \ + char NMI_stack_guard[guardsize]; \ + char NMI_stack[EXCEPTION_STKSZ]; \ + char DB_stack_guard[guardsize]; \ + char DB_stack[EXCEPTION_STKSZ]; \ + char MCE_stack_guard[guardsize]; \ + char MCE_stack[EXCEPTION_STKSZ]; \ + char VC_stack_guard[guardsize]; \ + char VC_stack[optional_stack_size]; \ + char VC2_stack_guard[guardsize]; \ + char VC2_stack[optional_stack_size]; \ + char IST_top_guard[guardsize]; \ /* The exception stacks' physical storage. No guard pages required */ struct exception_stacks { - ESTACKS_MEMBERS(0) + ESTACKS_MEMBERS(0, 0) }; /* The effective cpu entry area mapping with guard pages. */ struct cea_exception_stacks { - ESTACKS_MEMBERS(PAGE_SIZE) + ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ) }; /* @@ -40,6 +44,8 @@ enum exception_stack_ordering { ESTACK_NMI, ESTACK_DB, ESTACK_MCE, + ESTACK_VC, + ESTACK_VC2, N_EXCEPTION_STACKS }; @@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) #define __this_cpu_ist_top_va(name) \ CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) +#define __this_cpu_ist_bottom_va(name) \ + CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name) + #endif diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 288b065955b7..d0c6c10c18a0 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -28,6 +28,7 @@ #define IST_INDEX_NMI 1 #define IST_INDEX_DB 2 #define IST_INDEX_MCE 3 +#define IST_INDEX_VC 4 /* * Set __PAGE_OFFSET to the most negative possible address + -- cgit From 315562c9af3d583502b35c4b223a08d95ce69864 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:44 +0200 Subject: x86/sev-es: Adjust #VC IST Stack on entering NMI handler When an NMI hits in the #VC handler entry code before it has switched to another stack, any subsequent #VC exception in the NMI code-path will overwrite the interrupted #VC handler's stack. Make sure this doesn't happen by explicitly adjusting the #VC IST entry in the NMI handler for the time it can cause #VC exceptions. [ bp: Touchups, spelling fixes. ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-44-joro@8bytes.org --- arch/x86/include/asm/sev-es.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 9fbeedaa66ee..59176e8c6b81 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -78,4 +78,23 @@ extern void vc_no_ghcb(void); extern void vc_boot_ghcb(void); extern bool handle_vc_boot_ghcb(struct pt_regs *regs); +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern struct static_key_false sev_es_enable_key; +extern void __sev_es_ist_enter(struct pt_regs *regs); +extern void __sev_es_ist_exit(void); +static __always_inline void sev_es_ist_enter(struct pt_regs *regs) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_ist_enter(regs); +} +static __always_inline void sev_es_ist_exit(void) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_ist_exit(); +} +#else +static inline void sev_es_ist_enter(struct pt_regs *regs) { } +static inline void sev_es_ist_exit(void) { } +#endif + #endif -- cgit From 6b27edd74a5e9669120f7bd0ae1f475d124c1042 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:45 +0200 Subject: x86/dumpstack/64: Add noinstr version of get_stack_info() The get_stack_info() functionality is needed in the entry code for the #VC exception handler. Provide a version of it in the .text.noinstr section which can be called safely from there. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-45-joro@8bytes.org --- arch/x86/include/asm/stacktrace.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 5ae5a68e469d..49600643faba 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -35,6 +35,8 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info); int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); +bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, + struct stack_info *info); const char *stack_type_name(enum stack_type type); -- cgit From a13644f3a53de4e95a7bce6459f834e832ea44c5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:15:46 +0200 Subject: x86/entry/64: Add entry code for #VC handler The #VC handler needs special entry code because: 1. It runs on an IST stack 2. It needs to be able to handle nested #VC exceptions To make this work, the entry code is implemented to pretend it doesn't use an IST stack. When entered from user-mode or early SYSCALL entry path it switches to the task stack. If entered from kernel-mode it tries to switch back to the previous stack in the IRET frame. The stack found in the IRET frame is validated first, and if it is not safe to use it for the #VC handler, the code will switch to a fall-back stack (the #VC2 IST stack). From there, it can cause nested exceptions again. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-46-joro@8bytes.org --- arch/x86/include/asm/idtentry.h | 44 +++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/proto.h | 1 + arch/x86/include/asm/traps.h | 1 + 3 files changed, 46 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index a43366191212..840faaf57708 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -308,6 +308,18 @@ static __always_inline void __##func(struct pt_regs *regs) DECLARE_IDTENTRY_RAW(vector, func); \ __visible void noist_##func(struct pt_regs *regs) +/** + * DECLARE_IDTENTRY_VC - Declare functions for the VC entry point + * @vector: Vector number (ignored for C) + * @func: Function name of the entry point + * + * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the + * safe_stack C handler. + */ +#define DECLARE_IDTENTRY_VC(vector, func) \ + DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \ + __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code) + /** * DEFINE_IDTENTRY_IST - Emit code for IST entry points * @func: Function name of the entry point @@ -347,6 +359,35 @@ static __always_inline void __##func(struct pt_regs *regs) #define DEFINE_IDTENTRY_DF(func) \ DEFINE_IDTENTRY_RAW_ERRORCODE(func) +/** + * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler + which runs on a safe stack. + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func) + +/** + * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler + which runs on the VC fall-back stack + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC_IST(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func) + +/** + * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(func) + #else /* CONFIG_X86_64 */ /** @@ -433,6 +474,9 @@ __visible noinstr void func(struct pt_regs *regs, \ # define DECLARE_IDTENTRY_XENCB(vector, func) \ DECLARE_IDTENTRY(vector, func) +# define DECLARE_IDTENTRY_VC(vector, func) \ + idtentry_vc vector asm_##func func + #else # define DECLARE_IDTENTRY_MCE(vector, func) \ DECLARE_IDTENTRY(vector, func) diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 28996fe19301..2c35f1c01a2d 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -10,6 +10,7 @@ void syscall_init(void); #ifdef CONFIG_X86_64 void entry_SYSCALL_64(void); +void entry_SYSCALL_64_safe_stack(void); long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2); #endif diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 6a308355ea29..1b86bb3abc56 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -15,6 +15,7 @@ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); asmlinkage __visible notrace struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); void __init trap_init(void); +asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); #endif #ifdef CONFIG_X86_F00F_BUG -- cgit From 0786138c78e79343c7b015d77507cbf9d5f15d00 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:15:47 +0200 Subject: x86/sev-es: Add a Runtime #VC Exception Handler Add the handlers for #VC exceptions invoked at runtime. Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-47-joro@8bytes.org --- arch/x86/include/asm/idtentry.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 840faaf57708..58a793b46a00 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -318,6 +318,7 @@ static __always_inline void __##func(struct pt_regs *regs) */ #define DECLARE_IDTENTRY_VC(vector, func) \ DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \ + __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \ __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code) /** @@ -608,6 +609,11 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); /* #DF */ DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault); +/* #VC */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication); +#endif + #ifdef CONFIG_XEN_PV DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback); #endif -- cgit From 51ee7d6e3d2b70a3f232cceffab5084a2abd6719 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:15:50 +0200 Subject: x86/sev-es: Handle MMIO events Add a handler for #VC exceptions caused by MMIO intercepts. These intercepts come along as nested page faults on pages with reserved bits set. Signed-off-by: Tom Lendacky [ jroedel@suse.de: Adapt to VC handling framework ] Co-developed-by: Joerg Roedel Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-50-joro@8bytes.org --- arch/x86/include/uapi/asm/svm.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index c68d1618c9b0..8f36ae021a7f 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -81,6 +81,11 @@ #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 +/* SEV-ES software-defined VMGEXIT events */ +#define SVM_VMGEXIT_MMIO_READ 0x80000001 +#define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff + #define SVM_EXIT_ERR -1 #define SVM_EXIT_REASONS \ -- cgit From f6a9f8a45810d2914ea422ff39bfe2e0251c50f2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:03 +0200 Subject: x86/paravirt: Allow hypervisor-specific VMMCALL handling under SEV-ES Add two new paravirt callbacks to provide hypervisor-specific processor state in the GHCB and to copy state from the hypervisor back to the processor. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-63-joro@8bytes.org --- arch/x86/include/asm/x86_init.h | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6807153c0410..0304e2931cd3 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -4,8 +4,10 @@ #include +struct ghcb; struct mpc_bus; struct mpc_cpu; +struct pt_regs; struct mpc_table; struct cpuinfo_x86; @@ -236,10 +238,22 @@ struct x86_legacy_features { /** * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks * - * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) + * @pin_vcpu: pin current vcpu to specified physical + * cpu (run rarely) + * @sev_es_hcall_prepare: Load additional hypervisor-specific + * state into the GHCB when doing a VMMCALL under + * SEV-ES. Called from the #VC exception handler. + * @sev_es_hcall_finish: Copies state from the GHCB back into the + * processor (or pt_regs). Also runs checks on the + * state returned from the hypervisor after a + * VMMCALL under SEV-ES. Needs to return 'false' + * if the checks fail. Called from the #VC + * exception handler. */ struct x86_hyper_runtime { void (*pin_vcpu)(int cpu); + void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs); + bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs); }; /** -- cgit From bf5ff276448f64f1f9ef9ffc9e231026e3887d3d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:06 +0200 Subject: x86/realmode: Add SEV-ES specific trampoline entry point The code at the trampoline entry point is executed in real-mode. In real-mode, #VC exceptions can't be handled so anything that might cause such an exception must be avoided. In the standard trampoline entry code this is the WBINVD instruction and the call to verify_cpu(), which are both not needed anyway when running as an SEV-ES guest. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-66-joro@8bytes.org --- arch/x86/include/asm/realmode.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 96118fb041b8..4d4d853f6841 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -21,6 +21,9 @@ struct real_mode_header { /* SMP trampoline */ u32 trampoline_start; u32 trampoline_header; +#ifdef CONFIG_AMD_MEM_ENCRYPT + u32 sev_es_trampoline_start; +#endif #ifdef CONFIG_X86_64 u32 trampoline_pgd; #endif -- cgit From 8940ac9ced8bc1c48c4e28b0784e3234c9d14469 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:16:07 +0200 Subject: x86/realmode: Setup AP jump table As part of the GHCB specification, the booting of APs under SEV-ES requires an AP jump table when transitioning from one layer of code to another (e.g. when going from UEFI to the OS). As a result, each layer that parks an AP must provide the physical address of an AP jump table to the next layer via the hypervisor. Upon booting of the kernel, read the AP jump table address from the hypervisor. Under SEV-ES, APs are started using the INIT-SIPI-SIPI sequence. Before issuing the first SIPI request for an AP, the start CS and IP is programmed into the AP jump table. Upon issuing the SIPI request, the AP will awaken and jump to that start CS:IP address. Signed-off-by: Tom Lendacky [ jroedel@suse.de: - Adapted to different code base - Moved AP table setup from SIPI sending path to real-mode setup code - Fix sparse warnings ] Co-developed-by: Joerg Roedel Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-67-joro@8bytes.org --- arch/x86/include/asm/sev-es.h | 5 +++++ arch/x86/include/uapi/asm/svm.h | 3 +++ 2 files changed, 8 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 59176e8c6b81..db88e1c3442d 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -73,6 +73,9 @@ static inline u64 lower_bits(u64 val, unsigned int bits) return (val & mask); } +struct real_mode_header; +enum stack_type; + /* Early IDT entry points for #VC handler */ extern void vc_no_ghcb(void); extern void vc_boot_ghcb(void); @@ -92,9 +95,11 @@ static __always_inline void sev_es_ist_exit(void) if (static_branch_unlikely(&sev_es_enable_key)) __sev_es_ist_exit(); } +extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } +static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } #endif #endif diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 8f36ae021a7f..346b8a7155e8 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -84,6 +84,9 @@ /* SEV-ES software-defined VMGEXIT events */ #define SVM_VMGEXIT_MMIO_READ 0x80000001 #define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 +#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 +#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1 #define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff #define SVM_EXIT_ERR -1 -- cgit From 520d030852b4c9babfce9a79d8b5320b6b5545e6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:08 +0200 Subject: x86/smpboot: Load TSS and getcpu GDT entry before loading IDT The IDT on 64-bit contains vectors which use paranoid_entry() and/or IST stacks. To make these vectors work, the TSS and the getcpu GDT entry need to be set up before the IDT is loaded. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-68-joro@8bytes.org --- arch/x86/include/asm/processor.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 97143d87994c..615dd440bed8 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -696,6 +696,7 @@ extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); extern void load_percpu_segment(int); extern void cpu_init(void); +extern void cpu_init_exception_handling(void); extern void cr4_init(void); static inline unsigned long get_debugctlmsr(void) -- cgit From 3ecacdbd23956a549d93023f86adc87b4a9d6520 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:09 +0200 Subject: x86/head/64: Don't call verify_cpu() on starting APs The APs are not ready to handle exceptions when verify_cpu() is called in secondary_startup_64(). Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20200907131613.12703-69-joro@8bytes.org --- arch/x86/include/asm/realmode.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 4d4d853f6841..5db5d083c873 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -72,6 +72,7 @@ extern unsigned char startup_32_smp[]; extern unsigned char boot_gdt[]; #else extern unsigned char secondary_startup_64[]; +extern unsigned char secondary_startup_64_no_verify[]; #endif static inline size_t real_mode_size_needed(void) -- cgit From 094794f59720d7e877a1eeb372ecedeed6b441ab Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:10 +0200 Subject: x86/sev-es: Support CPU offline/online Add a play_dead handler when running under SEV-ES. This is needed because the hypervisor can't deliver an SIPI request to restart the AP. Instead, the kernel has to issue a VMGEXIT to halt the VCPU until the hypervisor wakes it up again. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-70-joro@8bytes.org --- arch/x86/include/uapi/asm/svm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 346b8a7155e8..c1dcf3e114e4 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -84,6 +84,7 @@ /* SEV-ES software-defined VMGEXIT events */ #define SVM_VMGEXIT_MMIO_READ 0x80000001 #define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004 #define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 #define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1 -- cgit From 4ca68e023b11e4d5908bf9ee326fab01111d77d5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 7 Sep 2020 15:16:11 +0200 Subject: x86/sev-es: Handle NMI State When running under SEV-ES, the kernel has to tell the hypervisor when to open the NMI window again after an NMI was injected. This is done with an NMI-complete message to the hypervisor. Add code to the kernel's NMI handler to send this message right at the beginning of do_nmi(). This always allows nesting NMIs. [ bp: Mark __sev_es_nmi_complete() noinstr: vmlinux.o: warning: objtool: exc_nmi()+0x17: call to __sev_es_nmi_complete() leaves .noinstr.text section While at it, use __pa_nodebug() for the same reason due to CONFIG_DEBUG_VIRTUAL=y: vmlinux.o: warning: objtool: __sev_es_nmi_complete()+0xd9: call to __phys_addr() leaves .noinstr.text section ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200907131613.12703-71-joro@8bytes.org --- arch/x86/include/asm/sev-es.h | 7 +++++++ arch/x86/include/uapi/asm/svm.h | 1 + 2 files changed, 8 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index db88e1c3442d..e919f09ae33c 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -96,10 +96,17 @@ static __always_inline void sev_es_ist_exit(void) __sev_es_ist_exit(); } extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh); +extern void __sev_es_nmi_complete(void); +static __always_inline void sev_es_nmi_complete(void) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_nmi_complete(); +} #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } +static inline void sev_es_nmi_complete(void) { } #endif #endif diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index c1dcf3e114e4..a7a3403645e5 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -84,6 +84,7 @@ /* SEV-ES software-defined VMGEXIT events */ #define SVM_VMGEXIT_MMIO_READ 0x80000001 #define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003 #define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004 #define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 -- cgit From 39336f4ffb2478ad384075cf4ba7ef2e5db2bbd7 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 7 Sep 2020 15:16:12 +0200 Subject: x86/efi: Add GHCB mappings when SEV-ES is active Calling down to EFI runtime services can result in the firmware performing VMGEXIT calls. The firmware is likely to use the GHCB of the OS (e.g., for setting EFI variables), so each GHCB in the system needs to be identity-mapped in the EFI page tables, as unencrypted, to avoid page faults. Signed-off-by: Tom Lendacky [ jroedel@suse.de: Moved GHCB mapping loop to sev-es.c ] Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Acked-by: Ard Biesheuvel Link: https://lkml.kernel.org/r/20200907131613.12703-72-joro@8bytes.org --- arch/x86/include/asm/sev-es.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index e919f09ae33c..cf1d957c7091 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -102,11 +102,13 @@ static __always_inline void sev_es_nmi_complete(void) if (static_branch_unlikely(&sev_es_enable_key)) __sev_es_nmi_complete(); } +extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } static inline void sev_es_nmi_complete(void) { } +static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } #endif #endif -- cgit