diff options
Diffstat (limited to 'arch/x86/coco')
| -rw-r--r-- | arch/x86/coco/Makefile | 1 | ||||
| -rw-r--r-- | arch/x86/coco/core.c | 155 | ||||
| -rw-r--r-- | arch/x86/coco/sev/Makefile | 10 | ||||
| -rw-r--r-- | arch/x86/coco/sev/core.c | 2431 | ||||
| -rw-r--r-- | arch/x86/coco/sev/noinstr.c | 182 | ||||
| -rw-r--r-- | arch/x86/coco/sev/vc-handle.c | 1080 | ||||
| -rw-r--r-- | arch/x86/coco/sev/vc-shared.c | 656 | ||||
| -rw-r--r-- | arch/x86/coco/tdx/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/coco/tdx/debug.c | 69 | ||||
| -rw-r--r-- | arch/x86/coco/tdx/tdcall.S | 208 | ||||
| -rw-r--r-- | arch/x86/coco/tdx/tdx-shared.c | 91 | ||||
| -rw-r--r-- | arch/x86/coco/tdx/tdx.c | 706 |
12 files changed, 5223 insertions, 368 deletions
diff --git a/arch/x86/coco/Makefile b/arch/x86/coco/Makefile index c816acf78b6a..eabdc7486538 100644 --- a/arch/x86/coco/Makefile +++ b/arch/x86/coco/Makefile @@ -6,3 +6,4 @@ CFLAGS_core.o += -fno-stack-protector obj-y += core.o obj-$(CONFIG_INTEL_TDX_GUEST) += tdx/ +obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev/ diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c index 49b44f881484..989ca9f72ba3 100644 --- a/arch/x86/coco/core.c +++ b/arch/x86/coco/core.c @@ -3,24 +3,50 @@ * Confidential Computing Platform Capability checks * * Copyright (C) 2021 Advanced Micro Devices, Inc. + * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * Author: Tom Lendacky <thomas.lendacky@amd.com> */ #include <linux/export.h> #include <linux/cc_platform.h> +#include <linux/string.h> +#include <linux/random.h> +#include <asm/archrandom.h> #include <asm/coco.h> #include <asm/processor.h> -static enum cc_vendor vendor __ro_after_init; -static u64 cc_mask __ro_after_init; +enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE; +SYM_PIC_ALIAS(cc_vendor); +u64 cc_mask __ro_after_init; +SYM_PIC_ALIAS(cc_mask); -static bool intel_cc_platform_has(enum cc_attr attr) +static struct cc_attr_flags { + __u64 host_sev_snp : 1, + __resv : 63; +} cc_flags; + +static bool noinstr intel_cc_platform_has(enum cc_attr attr) { switch (attr) { case CC_ATTR_GUEST_UNROLL_STRING_IO: - case CC_ATTR_HOTPLUG_DISABLED: + case CC_ATTR_GUEST_MEM_ENCRYPT: + case CC_ATTR_MEM_ENCRYPT: + return true; + default: + return false; + } +} + +/* + * Handle the SEV-SNP vTOM case where sme_me_mask is zero, and + * the other levels of SME/SEV functionality, including C-bit + * based SEV-SNP, are not enabled. + */ +static __maybe_unused __always_inline bool amd_cc_platform_vtom(enum cc_attr attr) +{ + switch (attr) { case CC_ATTR_GUEST_MEM_ENCRYPT: case CC_ATTR_MEM_ENCRYPT: return true; @@ -41,9 +67,13 @@ static bool intel_cc_platform_has(enum cc_attr attr) * up under SME the trampoline area cannot be encrypted, whereas under SEV * the trampoline area must be encrypted. */ -static bool amd_cc_platform_has(enum cc_attr attr) +static bool noinstr amd_cc_platform_has(enum cc_attr attr) { #ifdef CONFIG_AMD_MEM_ENCRYPT + + if (sev_status & MSR_AMD64_SNP_VTOM) + return amd_cc_platform_vtom(attr); + switch (attr) { case CC_ATTR_MEM_ENCRYPT: return sme_me_mask; @@ -68,6 +98,15 @@ static bool amd_cc_platform_has(enum cc_attr attr) case CC_ATTR_GUEST_SEV_SNP: return sev_status & MSR_AMD64_SEV_SNP_ENABLED; + case CC_ATTR_GUEST_SNP_SECURE_TSC: + return sev_status & MSR_AMD64_SNP_SECURE_TSC; + + case CC_ATTR_HOST_SEV_SNP: + return cc_flags.host_sev_snp; + + case CC_ATTR_SNP_SECURE_AVIC: + return sev_status & MSR_AMD64_SNP_SECURE_AVIC; + default: return false; } @@ -76,20 +115,13 @@ static bool amd_cc_platform_has(enum cc_attr attr) #endif } -static bool hyperv_cc_platform_has(enum cc_attr attr) +bool noinstr cc_platform_has(enum cc_attr attr) { - return attr == CC_ATTR_GUEST_MEM_ENCRYPT; -} - -bool cc_platform_has(enum cc_attr attr) -{ - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: return amd_cc_platform_has(attr); case CC_VENDOR_INTEL: return intel_cc_platform_has(attr); - case CC_VENDOR_HYPERV: - return hyperv_cc_platform_has(attr); default: return false; } @@ -103,11 +135,14 @@ u64 cc_mkenc(u64 val) * encryption status of the page. * * - for AMD, bit *set* means the page is encrypted - * - for Intel *clear* means encrypted. + * - for AMD with vTOM and for Intel, *clear* means encrypted */ - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: - return val | cc_mask; + if (sev_status & MSR_AMD64_SNP_VTOM) + return val & ~cc_mask; + else + return val | cc_mask; case CC_VENDOR_INTEL: return val & ~cc_mask; default: @@ -118,9 +153,12 @@ u64 cc_mkenc(u64 val) u64 cc_mkdec(u64 val) { /* See comment in cc_mkenc() */ - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: - return val & ~cc_mask; + if (sev_status & MSR_AMD64_SNP_VTOM) + return val | cc_mask; + else + return val & ~cc_mask; case CC_VENDOR_INTEL: return val | cc_mask; default: @@ -129,12 +167,83 @@ u64 cc_mkdec(u64 val) } EXPORT_SYMBOL_GPL(cc_mkdec); -__init void cc_set_vendor(enum cc_vendor v) +static void amd_cc_platform_clear(enum cc_attr attr) { - vendor = v; + switch (attr) { + case CC_ATTR_HOST_SEV_SNP: + cc_flags.host_sev_snp = 0; + break; + default: + break; + } } -__init void cc_set_mask(u64 mask) +void cc_platform_clear(enum cc_attr attr) { - cc_mask = mask; + switch (cc_vendor) { + case CC_VENDOR_AMD: + amd_cc_platform_clear(attr); + break; + default: + break; + } +} + +static void amd_cc_platform_set(enum cc_attr attr) +{ + switch (attr) { + case CC_ATTR_HOST_SEV_SNP: + cc_flags.host_sev_snp = 1; + break; + default: + break; + } +} + +void cc_platform_set(enum cc_attr attr) +{ + switch (cc_vendor) { + case CC_VENDOR_AMD: + amd_cc_platform_set(attr); + break; + default: + break; + } +} + +__init void cc_random_init(void) +{ + /* + * The seed is 32 bytes (in units of longs), which is 256 bits, which + * is the security level that the RNG is targeting. + */ + unsigned long rng_seed[32 / sizeof(long)]; + size_t i, longs; + + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return; + + /* + * Since the CoCo threat model includes the host, the only reliable + * source of entropy that can be neither observed nor manipulated is + * RDRAND. Usually, RDRAND failure is considered tolerable, but since + * CoCo guests have no other unobservable source of entropy, it's + * important to at least ensure the RNG gets some initial random seeds. + */ + for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) { + longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i); + + /* + * A zero return value means that the guest doesn't have RDRAND + * or the CPU is physically broken, and in both cases that + * means most crypto inside of the CoCo instance will be + * broken, defeating the purpose of CoCo in the first place. So + * just panic here because it's absolutely unsafe to continue + * executing. + */ + if (longs == 0) + panic("RDRAND is defective."); + } + add_device_randomness(rng_seed, sizeof(rng_seed)); + memzero_explicit(rng_seed, sizeof(rng_seed)); } diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile new file mode 100644 index 000000000000..3b8ae214a6a6 --- /dev/null +++ b/arch/x86/coco/sev/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y += core.o noinstr.o vc-handle.o + +# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining +UBSAN_SANITIZE_noinstr.o := n + +# GCC may fail to respect __no_sanitize_address or __no_kcsan when inlining +KASAN_SANITIZE_noinstr.o := n +KCSAN_SANITIZE_noinstr.o := n diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c new file mode 100644 index 000000000000..9ae3b11754e6 --- /dev/null +++ b/arch/x86/coco/sev/core.c @@ -0,0 +1,2431 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/sched/debug.h> /* For show_regs() */ +#include <linux/percpu-defs.h> +#include <linux/cc_platform.h> +#include <linux/printk.h> +#include <linux/mm_types.h> +#include <linux/set_memory.h> +#include <linux/memblock.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/cpumask.h> +#include <linux/efi.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/psp-sev.h> +#include <linux/dmi.h> +#include <uapi/linux/sev-guest.h> +#include <crypto/gcm.h> + +#include <asm/init.h> +#include <asm/cpu_entry_area.h> +#include <asm/stacktrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> +#include <asm/insn-eval.h> +#include <asm/fpu/xcr.h> +#include <asm/processor.h> +#include <asm/realmode.h> +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/svm.h> +#include <asm/smp.h> +#include <asm/cpu.h> +#include <asm/apic.h> +#include <asm/cpuid/api.h> +#include <asm/cmdline.h> +#include <asm/msr.h> + +/* Bitmap of SEV features supported by the hypervisor */ +u64 sev_hv_features __ro_after_init; +SYM_PIC_ALIAS(sev_hv_features); + +/* Secrets page physical address from the CC blob */ +u64 sev_secrets_pa __ro_after_init; +SYM_PIC_ALIAS(sev_secrets_pa); + +/* For early boot SVSM communication */ +struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); +SYM_PIC_ALIAS(boot_svsm_ca_page); + +/* + * SVSM related information: + * During boot, the page tables are set up as identity mapped and later + * changed to use kernel virtual addresses. Maintain separate virtual and + * physical addresses for the CAA to allow SVSM functions to be used during + * early boot, both with identity mapped virtual addresses and proper kernel + * virtual addresses. + */ +u64 boot_svsm_caa_pa __ro_after_init; +SYM_PIC_ALIAS(boot_svsm_caa_pa); + +DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); +DEFINE_PER_CPU(u64, svsm_caa_pa); + +static inline struct svsm_ca *svsm_get_caa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa); + else + return rip_rel_ptr(&boot_svsm_ca_page); +} + +static inline u64 svsm_get_caa_pa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa_pa); + else + return boot_svsm_caa_pa; +} + +/* AP INIT values as documented in the APM2 section "Processor Initialization State" */ +#define AP_INIT_CS_LIMIT 0xffff +#define AP_INIT_DS_LIMIT 0xffff +#define AP_INIT_LDTR_LIMIT 0xffff +#define AP_INIT_GDTR_LIMIT 0xffff +#define AP_INIT_IDTR_LIMIT 0xffff +#define AP_INIT_TR_LIMIT 0xffff +#define AP_INIT_RFLAGS_DEFAULT 0x2 +#define AP_INIT_DR6_DEFAULT 0xffff0ff0 +#define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL +#define AP_INIT_XCR0_DEFAULT 0x1 +#define AP_INIT_X87_FTW_DEFAULT 0x5555 +#define AP_INIT_X87_FCW_DEFAULT 0x0040 +#define AP_INIT_CR0_DEFAULT 0x60000010 +#define AP_INIT_MXCSR_DEFAULT 0x1f80 + +static const char * const sev_status_feat_names[] = { + [MSR_AMD64_SEV_ENABLED_BIT] = "SEV", + [MSR_AMD64_SEV_ES_ENABLED_BIT] = "SEV-ES", + [MSR_AMD64_SEV_SNP_ENABLED_BIT] = "SEV-SNP", + [MSR_AMD64_SNP_VTOM_BIT] = "vTom", + [MSR_AMD64_SNP_REFLECT_VC_BIT] = "ReflectVC", + [MSR_AMD64_SNP_RESTRICTED_INJ_BIT] = "RI", + [MSR_AMD64_SNP_ALT_INJ_BIT] = "AI", + [MSR_AMD64_SNP_DEBUG_SWAP_BIT] = "DebugSwap", + [MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT] = "NoHostIBS", + [MSR_AMD64_SNP_BTB_ISOLATION_BIT] = "BTBIsol", + [MSR_AMD64_SNP_VMPL_SSS_BIT] = "VmplSSS", + [MSR_AMD64_SNP_SECURE_TSC_BIT] = "SecureTSC", + [MSR_AMD64_SNP_VMGEXIT_PARAM_BIT] = "VMGExitParam", + [MSR_AMD64_SNP_IBS_VIRT_BIT] = "IBSVirt", + [MSR_AMD64_SNP_VMSA_REG_PROT_BIT] = "VMSARegProt", + [MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt", + [MSR_AMD64_SNP_SECURE_AVIC_BIT] = "SecureAVIC", +}; + +/* + * For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and + * initializes snp_tsc_scale and snp_tsc_offset. These values are replicated + * across the APs VMSA fields (TSC_SCALE and TSC_OFFSET). + */ +static u64 snp_tsc_scale __ro_after_init; +static u64 snp_tsc_offset __ro_after_init; +static unsigned long snp_tsc_freq_khz __ro_after_init; + +DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); +DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); + +/* + * SVSM related information: + * When running under an SVSM, the VMPL that Linux is executing at must be + * non-zero. The VMPL is therefore used to indicate the presence of an SVSM. + */ +u8 snp_vmpl __ro_after_init; +EXPORT_SYMBOL_GPL(snp_vmpl); +SYM_PIC_ALIAS(snp_vmpl); + +/* + * Since feature negotiation related variables are set early in the boot + * process they must reside in the .data section so as not to be zeroed + * out when the .bss section is later cleared. + * + * GHCB protocol version negotiated with the hypervisor. + */ +u16 ghcb_version __ro_after_init; +SYM_PIC_ALIAS(ghcb_version); + +/* For early boot hypervisor communication in SEV-ES enabled guests */ +static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); + +/* + * Needs to be in the .data section because we need it NULL before bss is + * cleared + */ +struct ghcb *boot_ghcb __section(".data"); + +static u64 __init get_snp_jump_table_addr(void) +{ + struct snp_secrets_page *secrets; + void __iomem *mem; + u64 addr; + + mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE); + if (!mem) { + pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); + return 0; + } + + secrets = (__force struct snp_secrets_page *)mem; + + addr = secrets->os_area.ap_jump_table_pa; + iounmap(mem); + + return addr; +} + +static u64 __init get_jump_table_addr(void) +{ + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + u64 ret = 0; + + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return get_snp_jump_table_addr(); + + local_irq_save(flags); + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + if (ghcb_sw_exit_info_1_is_valid(ghcb) && + ghcb_sw_exit_info_2_is_valid(ghcb)) + ret = ghcb->save.sw_exit_info_2; + + __sev_put_ghcb(&state); + + local_irq_restore(flags); + + return ret; +} + +static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) +{ + struct es_em_ctxt ctxt; + u8 pending = 0; + + vc_ghcb_invalidate(ghcb); + + /* + * Fill in protocol and format specifiers. This can be called very early + * in the boot, so use rip-relative references as needed. + */ + ghcb->protocol_version = ghcb_version; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + + svsm_issue_call(call, &pending); + + if (pending) + return -EINVAL; + + switch (verify_exception_info(ghcb, &ctxt)) { + case ES_OK: + break; + case ES_EXCEPTION: + vc_forward_exception(&ctxt); + fallthrough; + default: + return -EINVAL; + } + + return svsm_process_result_codes(call); +} + +static int svsm_perform_call_protocol(struct svsm_call *call) +{ + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + int ret; + + flags = native_local_irq_save(); + + if (sev_cfg.ghcbs_initialized) + ghcb = __sev_get_ghcb(&state); + else if (boot_ghcb) + ghcb = boot_ghcb; + else + ghcb = NULL; + + do { + ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) + : __pi_svsm_perform_msr_protocol(call); + } while (ret == -EAGAIN); + + if (sev_cfg.ghcbs_initialized) + __sev_put_ghcb(&state); + + native_local_irq_restore(flags); + + return ret; +} + +static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, + int ret, u64 svsm_ret) +{ + WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", + pfn, action, page_size, ret, svsm_ret); + + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); +} + +static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) +{ + unsigned int page_size; + bool action; + u64 pfn; + + pfn = pc->entry[pc->cur_index].pfn; + action = pc->entry[pc->cur_index].action; + page_size = pc->entry[pc->cur_index].page_size; + + __pval_terminate(pfn, action, page_size, ret, svsm_ret); +} + +static void pval_pages(struct snp_psc_desc *desc) +{ + struct psc_entry *e; + unsigned long vaddr; + unsigned int size; + unsigned int i; + bool validate; + u64 pfn; + int rc; + + for (i = 0; i <= desc->hdr.end_entry; i++) { + e = &desc->entries[i]; + + pfn = e->gfn; + vaddr = (unsigned long)pfn_to_kaddr(pfn); + size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; + validate = e->operation == SNP_PAGE_STATE_PRIVATE; + + rc = pvalidate(vaddr, size, validate); + if (!rc) + continue; + + if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) { + unsigned long vaddr_end = vaddr + PMD_SIZE; + + for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) { + rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); + if (rc) + __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0); + } + } else { + __pval_terminate(pfn, validate, size, rc, 0); + } + } +} + +static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, + struct svsm_pvalidate_call *pc) +{ + struct svsm_pvalidate_entry *pe; + + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; + + pe = &pc->entry[0]; + + while (pfn < pfn_end) { + pe->page_size = RMP_PG_SIZE_4K; + pe->action = action; + pe->ignore_cf = 0; + pe->rsvd = 0; + pe->pfn = pfn; + + pe++; + pfn++; + + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; + } + + return pfn; +} + +static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, + struct svsm_pvalidate_call *pc) +{ + struct svsm_pvalidate_entry *pe; + struct psc_entry *e; + + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; + + pe = &pc->entry[0]; + e = &desc->entries[desc_entry]; + + while (desc_entry <= desc->hdr.end_entry) { + pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; + pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; + pe->ignore_cf = 0; + pe->rsvd = 0; + pe->pfn = e->gfn; + + pe++; + e++; + + desc_entry++; + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; + } + + return desc_entry; +} + +static void svsm_pval_pages(struct snp_psc_desc *desc) +{ + struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; + unsigned int i, pv_4k_count = 0; + struct svsm_pvalidate_call *pc; + struct svsm_call call = {}; + unsigned long flags; + bool action; + u64 pc_pa; + int ret; + + /* + * This can be called very early in the boot, use native functions in + * order to avoid paravirt issues. + */ + flags = native_local_irq_save(); + + /* + * The SVSM calling area (CA) can support processing 510 entries at a + * time. Loop through the Page State Change descriptor until the CA is + * full or the last entry in the descriptor is reached, at which time + * the SVSM is invoked. This repeats until all entries in the descriptor + * are processed. + */ + call.caa = svsm_get_caa(); + + pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; + pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); + + /* Protocol 0, Call ID 1 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); + call.rcx = pc_pa; + + for (i = 0; i <= desc->hdr.end_entry;) { + i = svsm_build_ca_from_psc_desc(desc, i, pc); + + do { + ret = svsm_perform_call_protocol(&call); + if (!ret) + continue; + + /* + * Check if the entry failed because of an RMP mismatch (a + * PVALIDATE at 2M was requested, but the page is mapped in + * the RMP as 4K). + */ + + if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && + pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { + /* Save this entry for post-processing at 4K */ + pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; + + /* Skip to the next one unless at the end of the list */ + pc->cur_index++; + if (pc->cur_index < pc->num_entries) + ret = -EAGAIN; + else + ret = 0; + } + } while (ret == -EAGAIN); + + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } + + /* Process any entries that failed to be validated at 2M and validate them at 4K */ + for (i = 0; i < pv_4k_count; i++) { + u64 pfn, pfn_end; + + action = pv_4k[i].action; + pfn = pv_4k[i].pfn; + pfn_end = pfn + 512; + + while (pfn < pfn_end) { + pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); + + ret = svsm_perform_call_protocol(&call); + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } + } + + native_local_irq_restore(flags); +} + +static void pvalidate_pages(struct snp_psc_desc *desc) +{ + struct psc_entry *e; + unsigned int i; + + if (snp_vmpl) + svsm_pval_pages(desc); + else + pval_pages(desc); + + /* + * If not affected by the cache-coherency vulnerability there is no need + * to perform the cache eviction mitigation. + */ + if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO)) + return; + + for (i = 0; i <= desc->hdr.end_entry; i++) { + e = &desc->entries[i]; + + /* + * If validating memory (making it private) perform the cache + * eviction mitigation. + */ + if (e->operation == SNP_PAGE_STATE_PRIVATE) + sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1); + } +} + +static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc) +{ + int cur_entry, end_entry, ret = 0; + struct snp_psc_desc *data; + struct es_em_ctxt ctxt; + + vc_ghcb_invalidate(ghcb); + + /* Copy the input desc into GHCB shared buffer */ + data = (struct snp_psc_desc *)ghcb->shared_buffer; + memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); + + /* + * As per the GHCB specification, the hypervisor can resume the guest + * before processing all the entries. Check whether all the entries + * are processed. If not, then keep retrying. Note, the hypervisor + * will update the data memory directly to indicate the status, so + * reference the data->hdr everywhere. + * + * The strategy here is to wait for the hypervisor to change the page + * state in the RMP table before guest accesses the memory pages. If the + * page state change was not successful, then later memory access will + * result in a crash. + */ + cur_entry = data->hdr.cur_entry; + end_entry = data->hdr.end_entry; + + while (data->hdr.cur_entry <= data->hdr.end_entry) { + ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); + + /* This will advance the shared buffer data points to. */ + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0); + + /* + * Page State Change VMGEXIT can pass error code through + * exit_info_2. + */ + if (WARN(ret || ghcb->save.sw_exit_info_2, + "SNP: PSC failed ret=%d exit_info_2=%llx\n", + ret, ghcb->save.sw_exit_info_2)) { + ret = 1; + goto out; + } + + /* Verify that reserved bit is not set */ + if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { + ret = 1; + goto out; + } + + /* + * Sanity check that entry processing is not going backwards. + * This will happen only if hypervisor is tricking us. + */ + if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, +"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", + end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { + ret = 1; + goto out; + } + } + +out: + return ret; +} + +static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr, + unsigned long vaddr_end, int op) +{ + struct ghcb_state state; + bool use_large_entry; + struct psc_hdr *hdr; + struct psc_entry *e; + unsigned long flags; + unsigned long pfn; + struct ghcb *ghcb; + int i; + + hdr = &data->hdr; + e = data->entries; + + memset(data, 0, sizeof(*data)); + i = 0; + + while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) { + hdr->end_entry = i; + + if (is_vmalloc_addr((void *)vaddr)) { + pfn = vmalloc_to_pfn((void *)vaddr); + use_large_entry = false; + } else { + pfn = __pa(vaddr) >> PAGE_SHIFT; + use_large_entry = true; + } + + e->gfn = pfn; + e->operation = op; + + if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) && + (vaddr_end - vaddr) >= PMD_SIZE) { + e->pagesize = RMP_PG_SIZE_2M; + vaddr += PMD_SIZE; + } else { + e->pagesize = RMP_PG_SIZE_4K; + vaddr += PAGE_SIZE; + } + + e++; + i++; + } + + /* Page validation must be rescinded before changing to shared */ + if (op == SNP_PAGE_STATE_SHARED) + pvalidate_pages(data); + + local_irq_save(flags); + + if (sev_cfg.ghcbs_initialized) + ghcb = __sev_get_ghcb(&state); + else + ghcb = boot_ghcb; + + /* Invoke the hypervisor to perform the page state changes */ + if (!ghcb || vmgexit_psc(ghcb, data)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); + + if (sev_cfg.ghcbs_initialized) + __sev_put_ghcb(&state); + + local_irq_restore(flags); + + /* Page validation must be performed after changing to private */ + if (op == SNP_PAGE_STATE_PRIVATE) + pvalidate_pages(data); + + return vaddr; +} + +static void set_pages_state(unsigned long vaddr, unsigned long npages, int op) +{ + struct snp_psc_desc desc; + unsigned long vaddr_end; + + /* Use the MSR protocol when a GHCB is not available. */ + if (!boot_ghcb) { + struct psc_desc d = { op, svsm_get_caa(), svsm_get_caa_pa() }; + + return early_set_pages_state(vaddr, __pa(vaddr), npages, &d); + } + + vaddr = vaddr & PAGE_MASK; + vaddr_end = vaddr + (npages << PAGE_SHIFT); + + while (vaddr < vaddr_end) + vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op); +} + +void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED); +} + +void snp_set_memory_private(unsigned long vaddr, unsigned long npages) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); +} + +void snp_accept_memory(phys_addr_t start, phys_addr_t end) +{ + unsigned long vaddr, npages; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + vaddr = (unsigned long)__va(start); + npages = (end - start) >> PAGE_SHIFT; + + set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); +} + +static int vmgexit_ap_control(u64 event, struct sev_es_save_area *vmsa, u32 apic_id) +{ + bool create = event != SVM_VMGEXIT_AP_DESTROY; + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + int ret = 0; + + local_irq_save(flags); + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + + if (create) + ghcb_set_rax(ghcb, vmsa->sev_features); + + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); + ghcb_set_sw_exit_info_1(ghcb, + ((u64)apic_id << 32) | + ((u64)snp_vmpl << 16) | + event); + ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + if (!ghcb_sw_exit_info_1_is_valid(ghcb) || + lower_32_bits(ghcb->save.sw_exit_info_1)) { + pr_err("SNP AP %s error\n", (create ? "CREATE" : "DESTROY")); + ret = -EINVAL; + } + + __sev_put_ghcb(&state); + + local_irq_restore(flags); + + return ret; +} + +static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) +{ + int ret; + + if (snp_vmpl) { + struct svsm_call call = {}; + unsigned long flags; + + local_irq_save(flags); + + call.caa = this_cpu_read(svsm_caa); + call.rcx = __pa(va); + + if (make_vmsa) { + /* Protocol 0, Call ID 2 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU); + call.rdx = __pa(caa); + call.r8 = apic_id; + } else { + /* Protocol 0, Call ID 3 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU); + } + + ret = svsm_perform_call_protocol(&call); + + local_irq_restore(flags); + } else { + /* + * If the kernel runs at VMPL0, it can change the VMSA + * bit for a page using the RMPADJUST instruction. + * However, for the instruction to succeed it must + * target the permissions of a lesser privileged (higher + * numbered) VMPL level, so use VMPL1. + */ + u64 attrs = 1; + + if (make_vmsa) + attrs |= RMPADJUST_VMSA_PAGE_BIT; + + ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); + } + + return ret; +} + +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id) +{ + int err; + + err = snp_set_vmsa(vmsa, NULL, apic_id, false); + if (err) + pr_err("clear VMSA page failed (%u), leaking page\n", err); + else + free_page((unsigned long)vmsa); +} + +static void set_pte_enc(pte_t *kpte, int level, void *va) +{ + struct pte_enc_desc d = { + .kpte = kpte, + .pte_level = level, + .va = va, + .encrypt = true + }; + + prepare_pte_enc(&d); + set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); +} + +static void unshare_all_memory(void) +{ + unsigned long addr, end, size, ghcb; + struct sev_es_runtime_data *data; + unsigned int npages, level; + bool skipped_addr; + pte_t *pte; + int cpu; + + /* Unshare the direct mapping. */ + addr = PAGE_OFFSET; + end = PAGE_OFFSET + get_max_mapped(); + + while (addr < end) { + pte = lookup_address(addr, &level); + size = page_level_size(level); + npages = size / PAGE_SIZE; + skipped_addr = false; + + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) { + addr += size; + continue; + } + + /* + * Ensure that all the per-CPU GHCBs are made private at the + * end of the unsharing loop so that the switch to the slower + * MSR protocol happens last. + */ + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = (unsigned long)&data->ghcb_page; + + /* Handle the case of a huge page containing the GHCB page */ + if (addr <= ghcb && ghcb < addr + size) { + skipped_addr = true; + break; + } + } + + if (!skipped_addr) { + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, npages); + } + addr += size; + } + + /* Unshare all bss decrypted memory. */ + addr = (unsigned long)__start_bss_decrypted; + end = (unsigned long)__start_bss_decrypted_unused; + npages = (end - addr) >> PAGE_SHIFT; + + for (; addr < end; addr += PAGE_SIZE) { + pte = lookup_address(addr, &level); + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) + continue; + + set_pte_enc(pte, level, (void *)addr); + } + addr = (unsigned long)__start_bss_decrypted; + snp_set_memory_private(addr, npages); + + __flush_tlb_all(); +} + +/* Stop new private<->shared conversions */ +void snp_kexec_begin(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + /* + * Crash kernel ends up here with interrupts disabled: can't wait for + * conversions to finish. + * + * If race happened, just report and proceed. + */ + if (!set_memory_enc_stop_conversion()) + pr_warn("Failed to stop shared<->private conversions\n"); +} + +/* + * Shutdown all APs except the one handling kexec/kdump and clearing + * the VMSA tag on AP's VMSA pages as they are not being used as + * VMSA page anymore. + */ +static void shutdown_all_aps(void) +{ + struct sev_es_save_area *vmsa; + int apic_id, this_cpu, cpu; + + this_cpu = get_cpu(); + + /* + * APs are already in HLT loop when enc_kexec_finish() callback + * is invoked. + */ + for_each_present_cpu(cpu) { + vmsa = per_cpu(sev_vmsa, cpu); + + /* + * The BSP or offlined APs do not have guest allocated VMSA + * and there is no need to clear the VMSA tag for this page. + */ + if (!vmsa) + continue; + + /* + * Cannot clear the VMSA tag for the currently running vCPU. + */ + if (this_cpu == cpu) { + unsigned long pa; + struct page *p; + + pa = __pa(vmsa); + /* + * Mark the VMSA page of the running vCPU as offline + * so that is excluded and not touched by makedumpfile + * while generating vmcore during kdump. + */ + p = pfn_to_online_page(pa >> PAGE_SHIFT); + if (p) + __SetPageOffline(p); + continue; + } + + apic_id = cpuid_to_apicid[cpu]; + + /* + * Issue AP destroy to ensure AP gets kicked out of guest mode + * to allow using RMPADJUST to remove the VMSA tag on it's + * VMSA page. + */ + vmgexit_ap_control(SVM_VMGEXIT_AP_DESTROY, vmsa, apic_id); + snp_cleanup_vmsa(vmsa, apic_id); + } + + put_cpu(); +} + +void snp_kexec_finish(void) +{ + struct sev_es_runtime_data *data; + unsigned long size, addr; + unsigned int level, cpu; + struct ghcb *ghcb; + pte_t *pte; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + shutdown_all_aps(); + + unshare_all_memory(); + + /* + * Switch to using the MSR protocol to change per-CPU GHCBs to + * private. All the per-CPU GHCBs have been switched back to private, + * so can't do any more GHCB calls to the hypervisor beyond this point + * until the kexec'ed kernel starts running. + */ + boot_ghcb = NULL; + sev_cfg.ghcbs_initialized = false; + + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = &data->ghcb_page; + pte = lookup_address((unsigned long)ghcb, &level); + size = page_level_size(level); + /* Handle the case of a huge page containing the GHCB page */ + addr = (unsigned long)ghcb & page_level_mask(level); + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, (size / PAGE_SIZE)); + } +} + +#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK) +#define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK) +#define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK) + +#define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2) +#define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3) + +static void *snp_alloc_vmsa_page(int cpu) +{ + struct page *p; + + /* + * Allocate VMSA page to work around the SNP erratum where the CPU will + * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB) + * collides with the RMP entry of VMSA page. The recommended workaround + * is to not use a large page. + * + * Allocate an 8k page which is also 8k-aligned. + */ + p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); + if (!p) + return NULL; + + split_page(p, 1); + + /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */ + __free_page(p); + + return page_address(p + 1); +} + +static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip, unsigned int cpu) +{ + struct sev_es_save_area *cur_vmsa, *vmsa; + struct svsm_ca *caa; + u8 sipi_vector; + int ret; + u64 cr4; + + /* + * The hypervisor SNP feature support check has happened earlier, just check + * the AP_CREATION one here. + */ + if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION)) + return -EOPNOTSUPP; + + /* + * Verify the desired start IP against the known trampoline start IP + * to catch any future new trampolines that may be introduced that + * would require a new protected guest entry point. + */ + if (WARN_ONCE(start_ip != real_mode_header->trampoline_start, + "Unsupported SNP start_ip: %lx\n", start_ip)) + return -EINVAL; + + /* Override start_ip with known protected guest start IP */ + start_ip = real_mode_header->sev_es_trampoline_start; + cur_vmsa = per_cpu(sev_vmsa, cpu); + + /* + * A new VMSA is created each time because there is no guarantee that + * the current VMSA is the kernels or that the vCPU is not running. If + * an attempt was done to use the current VMSA with a running vCPU, a + * #VMEXIT of that vCPU would wipe out all of the settings being done + * here. + */ + vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu); + if (!vmsa) + return -ENOMEM; + + /* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */ + caa = per_cpu(svsm_caa, cpu); + + /* CR4 should maintain the MCE value */ + cr4 = native_read_cr4() & X86_CR4_MCE; + + /* Set the CS value based on the start_ip converted to a SIPI vector */ + sipi_vector = (start_ip >> 12); + vmsa->cs.base = sipi_vector << 12; + vmsa->cs.limit = AP_INIT_CS_LIMIT; + vmsa->cs.attrib = INIT_CS_ATTRIBS; + vmsa->cs.selector = sipi_vector << 8; + + /* Set the RIP value based on start_ip */ + vmsa->rip = start_ip & 0xfff; + + /* Set AP INIT defaults as documented in the APM */ + vmsa->ds.limit = AP_INIT_DS_LIMIT; + vmsa->ds.attrib = INIT_DS_ATTRIBS; + vmsa->es = vmsa->ds; + vmsa->fs = vmsa->ds; + vmsa->gs = vmsa->ds; + vmsa->ss = vmsa->ds; + + vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT; + vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT; + vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS; + vmsa->idtr.limit = AP_INIT_IDTR_LIMIT; + vmsa->tr.limit = AP_INIT_TR_LIMIT; + vmsa->tr.attrib = INIT_TR_ATTRIBS; + + vmsa->cr4 = cr4; + vmsa->cr0 = AP_INIT_CR0_DEFAULT; + vmsa->dr7 = DR7_RESET_VALUE; + vmsa->dr6 = AP_INIT_DR6_DEFAULT; + vmsa->rflags = AP_INIT_RFLAGS_DEFAULT; + vmsa->g_pat = AP_INIT_GPAT_DEFAULT; + vmsa->xcr0 = AP_INIT_XCR0_DEFAULT; + vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT; + vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT; + vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT; + + if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC)) + vmsa->vintr_ctrl |= V_GIF_MASK | V_NMI_ENABLE_MASK; + + /* SVME must be set. */ + vmsa->efer = EFER_SVME; + + /* + * Set the SNP-specific fields for this VMSA: + * VMPL level + * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) + */ + vmsa->vmpl = snp_vmpl; + vmsa->sev_features = sev_status >> 2; + + /* Populate AP's TSC scale/offset to get accurate TSC values. */ + if (cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) { + vmsa->tsc_scale = snp_tsc_scale; + vmsa->tsc_offset = snp_tsc_offset; + } + + /* Switch the page over to a VMSA page now that it is initialized */ + ret = snp_set_vmsa(vmsa, caa, apic_id, true); + if (ret) { + pr_err("set VMSA page failed (%u)\n", ret); + free_page((unsigned long)vmsa); + + return -EINVAL; + } + + /* Issue VMGEXIT AP Creation NAE event */ + ret = vmgexit_ap_control(SVM_VMGEXIT_AP_CREATE, vmsa, apic_id); + if (ret) { + snp_cleanup_vmsa(vmsa, apic_id); + vmsa = NULL; + } + + /* Free up any previous VMSA page */ + if (cur_vmsa) + snp_cleanup_vmsa(cur_vmsa, apic_id); + + /* Record the current VMSA page */ + per_cpu(sev_vmsa, cpu) = vmsa; + + return ret; +} + +void __init snp_set_wakeup_secondary_cpu(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + /* + * Always set this override if SNP is enabled. This makes it the + * required method to start APs under SNP. If the hypervisor does + * not support AP creation, then no APs will be started. + */ + apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit); +} + +int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh) +{ + u16 startup_cs, startup_ip; + phys_addr_t jump_table_pa; + u64 jump_table_addr; + u16 __iomem *jump_table; + + jump_table_addr = get_jump_table_addr(); + + /* On UP guests there is no jump table so this is not a failure */ + if (!jump_table_addr) + return 0; + + /* Check if AP Jump Table is page-aligned */ + if (jump_table_addr & ~PAGE_MASK) + return -EINVAL; + + jump_table_pa = jump_table_addr & PAGE_MASK; + + startup_cs = (u16)(rmh->trampoline_start >> 4); + startup_ip = (u16)(rmh->sev_es_trampoline_start - + rmh->trampoline_start); + + jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE); + if (!jump_table) + return -EIO; + + writew(startup_ip, &jump_table[0]); + writew(startup_cs, &jump_table[1]); + + iounmap(jump_table); + + return 0; +} + +/* + * This is needed by the OVMF UEFI firmware which will use whatever it finds in + * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu + * runtime GHCBs used by the kernel are also mapped in the EFI page-table. + * + * When running under SVSM the CA page is needed too, so map it as well. + */ +int __init sev_es_efi_map_ghcbs_cas(pgd_t *pgd) +{ + unsigned long address, pflags, pflags_enc; + struct sev_es_runtime_data *data; + int cpu; + u64 pfn; + + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + return 0; + + pflags = _PAGE_NX | _PAGE_RW; + pflags_enc = cc_mkenc(pflags); + + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + + address = __pa(&data->ghcb_page); + pfn = address >> PAGE_SHIFT; + + if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags)) + return 1; + + if (snp_vmpl) { + address = per_cpu(svsm_caa_pa, cpu); + if (!address) + return 1; + + pfn = address >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags_enc)) + return 1; + } + } + + return 0; +} + +u64 savic_ghcb_msr_read(u32 reg) +{ + u64 msr = APIC_BASE_MSR + (reg >> 4); + struct pt_regs regs = { .cx = msr }; + struct es_em_ctxt ctxt = { .regs = ®s }; + struct ghcb_state state; + enum es_result res; + struct ghcb *ghcb; + + guard(irqsave)(); + + ghcb = __sev_get_ghcb(&state); + vc_ghcb_invalidate(ghcb); + + res = sev_es_ghcb_handle_msr(ghcb, &ctxt, false); + if (res != ES_OK) { + pr_err("Secure AVIC MSR (0x%llx) read returned error (%d)\n", msr, res); + /* MSR read failures are treated as fatal errors */ + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SAVIC_FAIL); + } + + __sev_put_ghcb(&state); + + return regs.ax | regs.dx << 32; +} + +void savic_ghcb_msr_write(u32 reg, u64 value) +{ + u64 msr = APIC_BASE_MSR + (reg >> 4); + struct pt_regs regs = { + .cx = msr, + .ax = lower_32_bits(value), + .dx = upper_32_bits(value) + }; + struct es_em_ctxt ctxt = { .regs = ®s }; + struct ghcb_state state; + enum es_result res; + struct ghcb *ghcb; + + guard(irqsave)(); + + ghcb = __sev_get_ghcb(&state); + vc_ghcb_invalidate(ghcb); + + res = sev_es_ghcb_handle_msr(ghcb, &ctxt, true); + if (res != ES_OK) { + pr_err("Secure AVIC MSR (0x%llx) write returned error (%d)\n", msr, res); + /* MSR writes should never fail. Any failure is fatal error for SNP guest */ + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SAVIC_FAIL); + } + + __sev_put_ghcb(&state); +} + +enum es_result savic_register_gpa(u64 gpa) +{ + struct ghcb_state state; + struct es_em_ctxt ctxt; + enum es_result res; + struct ghcb *ghcb; + + guard(irqsave)(); + + ghcb = __sev_get_ghcb(&state); + vc_ghcb_invalidate(ghcb); + + ghcb_set_rax(ghcb, SVM_VMGEXIT_SAVIC_SELF_GPA); + ghcb_set_rbx(ghcb, gpa); + res = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_SAVIC, + SVM_VMGEXIT_SAVIC_REGISTER_GPA, 0); + + __sev_put_ghcb(&state); + + return res; +} + +enum es_result savic_unregister_gpa(u64 *gpa) +{ + struct ghcb_state state; + struct es_em_ctxt ctxt; + enum es_result res; + struct ghcb *ghcb; + + guard(irqsave)(); + + ghcb = __sev_get_ghcb(&state); + vc_ghcb_invalidate(ghcb); + + ghcb_set_rax(ghcb, SVM_VMGEXIT_SAVIC_SELF_GPA); + res = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_SAVIC, + SVM_VMGEXIT_SAVIC_UNREGISTER_GPA, 0); + if (gpa && res == ES_OK) + *gpa = ghcb->save.rbx; + + __sev_put_ghcb(&state); + + return res; +} + +static void snp_register_per_cpu_ghcb(void) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + snp_register_ghcb_early(__pa(ghcb)); +} + +void setup_ghcb(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + return; + + /* + * Check whether the runtime #VC exception handler is active. It uses + * the per-CPU GHCB page which is set up by sev_es_init_vc_handling(). + * + * If SNP is active, register the per-CPU GHCB page so that the runtime + * exception handler can use it. + */ + if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) { + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + snp_register_per_cpu_ghcb(); + + sev_cfg.ghcbs_initialized = true; + + return; + } + + /* + * Make sure the hypervisor talks a supported protocol. + * This gets called only in the BSP boot phase. + */ + if (!sev_es_negotiate_protocol()) + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); + + /* + * Clear the boot_ghcb. The first exception comes in before the bss + * section is cleared. + */ + memset(&boot_ghcb_page, 0, PAGE_SIZE); + + /* Alright - Make the boot-ghcb public */ + boot_ghcb = &boot_ghcb_page; + + /* SNP guest requires that GHCB GPA must be registered. */ + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + snp_register_ghcb_early(__pa(&boot_ghcb_page)); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void sev_es_ap_hlt_loop(void) +{ + struct ghcb_state state; + struct ghcb *ghcb; + + ghcb = __sev_get_ghcb(&state); + + while (true) { + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + /* Wakeup signal? */ + if (ghcb_sw_exit_info_2_is_valid(ghcb) && + ghcb->save.sw_exit_info_2) + break; + } + + __sev_put_ghcb(&state); +} + +/* + * Play_dead handler when running under SEV-ES. This is needed because + * the hypervisor can't deliver an SIPI request to restart the AP. + * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the + * hypervisor wakes it up again. + */ +static void sev_es_play_dead(void) +{ + play_dead_common(); + + /* IRQs now disabled */ + + sev_es_ap_hlt_loop(); + + /* + * If we get here, the VCPU was woken up again. Jump to CPU + * startup code to get it back online. + */ + soft_restart_cpu(); +} +#else /* CONFIG_HOTPLUG_CPU */ +#define sev_es_play_dead native_play_dead +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_SMP +static void __init sev_es_setup_play_dead(void) +{ + smp_ops.play_dead = sev_es_play_dead; +} +#else +static inline void sev_es_setup_play_dead(void) { } +#endif + +static void __init alloc_runtime_data(int cpu) +{ + struct sev_es_runtime_data *data; + + data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu)); + if (!data) + panic("Can't allocate SEV-ES runtime data"); + + per_cpu(runtime_data, cpu) = data; + + if (snp_vmpl) { + struct svsm_ca *caa; + + /* Allocate the SVSM CA page if an SVSM is present */ + caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE) + : &boot_svsm_ca_page; + + per_cpu(svsm_caa, cpu) = caa; + per_cpu(svsm_caa_pa, cpu) = __pa(caa); + } +} + +static void __init init_ghcb(int cpu) +{ + struct sev_es_runtime_data *data; + int err; + + data = per_cpu(runtime_data, cpu); + + err = early_set_memory_decrypted((unsigned long)&data->ghcb_page, + sizeof(data->ghcb_page)); + if (err) + panic("Can't map GHCBs unencrypted"); + + memset(&data->ghcb_page, 0, sizeof(data->ghcb_page)); + + data->ghcb_active = false; + data->backup_ghcb_active = false; +} + +void __init sev_es_init_vc_handling(void) +{ + int cpu; + + BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); + + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + return; + + if (!sev_es_check_cpu_features()) + panic("SEV-ES CPU Features missing"); + + /* + * SNP is supported in v2 of the GHCB spec which mandates support for HV + * features. + */ + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { + sev_hv_features = get_hv_features(); + + if (!(sev_hv_features & GHCB_HV_FT_SNP)) + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); + } + + /* Initialize per-cpu GHCB pages */ + for_each_possible_cpu(cpu) { + alloc_runtime_data(cpu); + init_ghcb(cpu); + } + + if (snp_vmpl) + sev_cfg.use_cas = true; + + sev_es_setup_play_dead(); + + /* Secondary CPUs use the runtime #VC handler */ + initial_vc_handler = (unsigned long)kernel_exc_vmm_communication; +} + +/* + * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are + * enabled, as the alternative (fallback) logic for DMI probing in the legacy + * ROM region can cause a crash since this region is not pre-validated. + */ +void __init snp_dmi_setup(void) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + dmi_setup(); +} + +static void dump_cpuid_table(void) +{ + const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); + int i = 0; + + pr_info("count=%d reserved=0x%x reserved2=0x%llx\n", + cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2); + + for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) { + const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; + + pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n", + i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx, + fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved); + } +} + +/* + * It is useful from an auditing/testing perspective to provide an easy way + * for the guest owner to know that the CPUID table has been initialized as + * expected, but that initialization happens too early in boot to print any + * sort of indicator, and there's not really any other good place to do it, + * so do it here. + * + * If running as an SNP guest, report the current VM privilege level (VMPL). + */ +static int __init report_snp_info(void) +{ + const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); + + if (cpuid_table->count) { + pr_info("Using SNP CPUID table, %d entries present.\n", + cpuid_table->count); + + if (sev_cfg.debug) + dump_cpuid_table(); + } + + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + pr_info("SNP running at VMPL%u.\n", snp_vmpl); + + return 0; +} +arch_initcall(report_snp_info); + +static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) +{ + /* If (new) lengths have been returned, propagate them up */ + if (call->rcx_out != call->rcx) + input->manifest_buf.len = call->rcx_out; + + if (call->rdx_out != call->rdx) + input->certificates_buf.len = call->rdx_out; + + if (call->r8_out != call->r8) + input->report_buf.len = call->r8_out; +} + +int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, + struct svsm_attest_call *input) +{ + struct svsm_attest_call *ac; + unsigned long flags; + u64 attest_call_pa; + int ret; + + if (!snp_vmpl) + return -EINVAL; + + local_irq_save(flags); + + call->caa = svsm_get_caa(); + + ac = (struct svsm_attest_call *)call->caa->svsm_buffer; + attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); + + *ac = *input; + + /* + * Set input registers for the request and set RDX and R8 to known + * values in order to detect length values being returned in them. + */ + call->rax = call_id; + call->rcx = attest_call_pa; + call->rdx = -1; + call->r8 = -1; + ret = svsm_perform_call_protocol(call); + update_attest_input(call, input); + + local_irq_restore(flags); + + return ret; +} +EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); + +static int snp_issue_guest_request(struct snp_guest_req *req) +{ + struct snp_req_data *input = &req->input; + struct ghcb_state state; + struct es_em_ctxt ctxt; + unsigned long flags; + struct ghcb *ghcb; + int ret; + + req->exitinfo2 = SEV_RET_NO_FW_CALL; + + /* + * __sev_get_ghcb() needs to run with IRQs disabled because it is using + * a per-CPU GHCB. + */ + local_irq_save(flags); + + ghcb = __sev_get_ghcb(&state); + if (!ghcb) { + ret = -EIO; + goto e_restore_irq; + } + + vc_ghcb_invalidate(ghcb); + + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { + ghcb_set_rax(ghcb, input->data_gpa); + ghcb_set_rbx(ghcb, input->data_npages); + } + + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa); + if (ret) + goto e_put; + + req->exitinfo2 = ghcb->save.sw_exit_info_2; + switch (req->exitinfo2) { + case 0: + break; + + case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY): + ret = -EAGAIN; + break; + + case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN): + /* Number of expected pages are returned in RBX */ + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { + input->data_npages = ghcb_get_rbx(ghcb); + ret = -ENOSPC; + break; + } + fallthrough; + default: + ret = -EIO; + break; + } + +e_put: + __sev_put_ghcb(&state); +e_restore_irq: + local_irq_restore(flags); + + return ret; +} + +/** + * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device + * + * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND + * which is the only request used so far. + * + * Return: true if the platform provides a vTPM SVSM device, false otherwise. + */ +static bool snp_svsm_vtpm_probe(void) +{ + struct svsm_call call = {}; + + /* The vTPM device is available only if a SVSM is present */ + if (!snp_vmpl) + return false; + + call.caa = svsm_get_caa(); + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY); + + if (svsm_perform_call_protocol(&call)) + return false; + + /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */ + return call.rcx_out & BIT_ULL(8); +} + +/** + * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM + * @buffer: A buffer used to both send the command and receive the response. + * + * Execute a SVSM_VTPM_CMD call as defined by + * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00 + * + * All command request/response buffers have a common structure as specified by + * the following table: + * Byte Size    In/Out    Description + * Offset    (Bytes) + * 0x000     4          In        Platform command + *                        Out       Platform command response size + * + * Each command can build upon this common request/response structure to create + * a structure specific to the command. See include/linux/tpm_svsm.h for more + * details. + * + * Return: 0 on success, -errno on failure + */ +int snp_svsm_vtpm_send_command(u8 *buffer) +{ + struct svsm_call call = {}; + + call.caa = svsm_get_caa(); + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD); + call.rcx = __pa(buffer); + + return svsm_perform_call_protocol(&call); +} +EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command); + +static struct platform_device sev_guest_device = { + .name = "sev-guest", + .id = -1, +}; + +static struct platform_device tpm_svsm_device = { + .name = "tpm-svsm", + .id = -1, +}; + +static int __init snp_init_platform_device(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return -ENODEV; + + if (platform_device_register(&sev_guest_device)) + return -ENODEV; + + if (snp_svsm_vtpm_probe() && + platform_device_register(&tpm_svsm_device)) + return -ENODEV; + + pr_info("SNP guest platform devices initialized.\n"); + return 0; +} +device_initcall(snp_init_platform_device); + +void sev_show_status(void) +{ + int i; + + pr_info("Status: "); + for (i = 0; i < MSR_AMD64_SNP_RESV_BIT; i++) { + if (sev_status & BIT_ULL(i)) { + if (!sev_status_feat_names[i]) + continue; + + pr_cont("%s ", sev_status_feat_names[i]); + } + } + pr_cont("\n"); +} + +#ifdef CONFIG_SYSFS +static ssize_t vmpl_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", snp_vmpl); +} + +static struct kobj_attribute vmpl_attr = __ATTR_RO(vmpl); + +static struct attribute *vmpl_attrs[] = { + &vmpl_attr.attr, + NULL +}; + +static struct attribute_group sev_attr_group = { + .attrs = vmpl_attrs, +}; + +static int __init sev_sysfs_init(void) +{ + struct kobject *sev_kobj; + struct device *dev_root; + int ret; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return -ENODEV; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (!dev_root) + return -ENODEV; + + sev_kobj = kobject_create_and_add("sev", &dev_root->kobj); + put_device(dev_root); + + if (!sev_kobj) + return -ENOMEM; + + ret = sysfs_create_group(sev_kobj, &sev_attr_group); + if (ret) + kobject_put(sev_kobj); + + return ret; +} +arch_initcall(sev_sysfs_init); +#endif // CONFIG_SYSFS + +static void free_shared_pages(void *buf, size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + int ret; + + if (!buf) + return; + + ret = set_memory_encrypted((unsigned long)buf, npages); + if (ret) { + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); + return; + } + + __free_pages(virt_to_page(buf), get_order(sz)); +} + +static void *alloc_shared_pages(size_t sz) +{ + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + struct page *page; + int ret; + + page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); + if (!page) + return NULL; + + ret = set_memory_decrypted((unsigned long)page_address(page), npages); + if (ret) { + pr_err("failed to mark page shared, ret=%d\n", ret); + __free_pages(page, get_order(sz)); + return NULL; + } + + return page_address(page); +} + +static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno) +{ + u8 *key = NULL; + + switch (id) { + case 0: + *seqno = &secrets->os_area.msg_seqno_0; + key = secrets->vmpck0; + break; + case 1: + *seqno = &secrets->os_area.msg_seqno_1; + key = secrets->vmpck1; + break; + case 2: + *seqno = &secrets->os_area.msg_seqno_2; + key = secrets->vmpck2; + break; + case 3: + *seqno = &secrets->os_area.msg_seqno_3; + key = secrets->vmpck3; + break; + default: + break; + } + + return key; +} + +static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen) +{ + struct aesgcm_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) { + pr_err("Crypto context initialization failed\n"); + kfree(ctx); + return NULL; + } + + return ctx; +} + +int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id) +{ + /* Adjust the default VMPCK key based on the executing VMPL level */ + if (vmpck_id == -1) + vmpck_id = snp_vmpl; + + mdesc->vmpck = get_vmpck(vmpck_id, mdesc->secrets, &mdesc->os_area_msg_seqno); + if (!mdesc->vmpck) { + pr_err("Invalid VMPCK%d communication key\n", vmpck_id); + return -EINVAL; + } + + /* Verify that VMPCK is not zero. */ + if (!memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) { + pr_err("Empty VMPCK%d communication key\n", vmpck_id); + return -EINVAL; + } + + mdesc->vmpck_id = vmpck_id; + + mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN); + if (!mdesc->ctx) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(snp_msg_init); + +struct snp_msg_desc *snp_msg_alloc(void) +{ + struct snp_msg_desc *mdesc; + void __iomem *mem; + + BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE); + + mdesc = kzalloc(sizeof(struct snp_msg_desc), GFP_KERNEL); + if (!mdesc) + return ERR_PTR(-ENOMEM); + + mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE); + if (!mem) + goto e_free_mdesc; + + mdesc->secrets = (__force struct snp_secrets_page *)mem; + + /* Allocate the shared page used for the request and response message. */ + mdesc->request = alloc_shared_pages(sizeof(struct snp_guest_msg)); + if (!mdesc->request) + goto e_unmap; + + mdesc->response = alloc_shared_pages(sizeof(struct snp_guest_msg)); + if (!mdesc->response) + goto e_free_request; + + return mdesc; + +e_free_request: + free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); +e_unmap: + iounmap(mem); +e_free_mdesc: + kfree(mdesc); + + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL_GPL(snp_msg_alloc); + +void snp_msg_free(struct snp_msg_desc *mdesc) +{ + if (!mdesc) + return; + + kfree(mdesc->ctx); + free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); + free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); + iounmap((__force void __iomem *)mdesc->secrets); + + memset(mdesc, 0, sizeof(*mdesc)); + kfree(mdesc); +} +EXPORT_SYMBOL_GPL(snp_msg_free); + +/* Mutex to serialize the shared buffer access and command handling. */ +static DEFINE_MUTEX(snp_cmd_mutex); + +/* + * If an error is received from the host or AMD Secure Processor (ASP) there + * are two options. Either retry the exact same encrypted request or discontinue + * using the VMPCK. + * + * This is because in the current encryption scheme GHCB v2 uses AES-GCM to + * encrypt the requests. The IV for this scheme is the sequence number. GCM + * cannot tolerate IV reuse. + * + * The ASP FW v1.51 only increments the sequence numbers on a successful + * guest<->ASP back and forth and only accepts messages at its exact sequence + * number. + * + * So if the sequence number were to be reused the encryption scheme is + * vulnerable. If the sequence number were incremented for a fresh IV the ASP + * will reject the request. + */ +static void snp_disable_vmpck(struct snp_msg_desc *mdesc) +{ + pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n", + mdesc->vmpck_id); + memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN); + mdesc->vmpck = NULL; +} + +static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc) +{ + u64 count; + + lockdep_assert_held(&snp_cmd_mutex); + + /* Read the current message sequence counter from secrets pages */ + count = *mdesc->os_area_msg_seqno; + + return count + 1; +} + +/* Return a non-zero on success */ +static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc) +{ + u64 count = __snp_get_msg_seqno(mdesc); + + /* + * The message sequence counter for the SNP guest request is a 64-bit + * value but the version 2 of GHCB specification defines a 32-bit storage + * for it. If the counter exceeds the 32-bit value then return zero. + * The caller should check the return value, but if the caller happens to + * not check the value and use it, then the firmware treats zero as an + * invalid number and will fail the message request. + */ + if (count >= UINT_MAX) { + pr_err("request message sequence counter overflow\n"); + return 0; + } + + return count; +} + +static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc) +{ + /* + * The counter is also incremented by the PSP, so increment it by 2 + * and save in secrets page. + */ + *mdesc->os_area_msg_seqno += 2; +} + +static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req) +{ + struct snp_guest_msg *resp_msg = &mdesc->secret_response; + struct snp_guest_msg *req_msg = &mdesc->secret_request; + struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr; + struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr; + struct aesgcm_ctx *ctx = mdesc->ctx; + u8 iv[GCM_AES_IV_SIZE] = {}; + + pr_debug("response [seqno %lld type %d version %d sz %d]\n", + resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version, + resp_msg_hdr->msg_sz); + + /* Copy response from shared memory to encrypted memory. */ + memcpy(resp_msg, mdesc->response, sizeof(*resp_msg)); + + /* Verify that the sequence counter is incremented by 1 */ + if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1))) + return -EBADMSG; + + /* Verify response message type and version number. */ + if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) || + resp_msg_hdr->msg_version != req_msg_hdr->msg_version) + return -EBADMSG; + + /* + * If the message size is greater than our buffer length then return + * an error. + */ + if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz)) + return -EBADMSG; + + /* Decrypt the payload */ + memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno))); + if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz, + &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag)) + return -EBADMSG; + + return 0; +} + +static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req) +{ + struct snp_guest_msg *msg = &mdesc->secret_request; + struct snp_guest_msg_hdr *hdr = &msg->hdr; + struct aesgcm_ctx *ctx = mdesc->ctx; + u8 iv[GCM_AES_IV_SIZE] = {}; + + memset(msg, 0, sizeof(*msg)); + + hdr->algo = SNP_AEAD_AES_256_GCM; + hdr->hdr_version = MSG_HDR_VER; + hdr->hdr_sz = sizeof(*hdr); + hdr->msg_type = req->msg_type; + hdr->msg_version = req->msg_version; + hdr->msg_seqno = seqno; + hdr->msg_vmpck = req->vmpck_id; + hdr->msg_sz = req->req_sz; + + /* Verify the sequence number is non-zero */ + if (!hdr->msg_seqno) + return -ENOSR; + + pr_debug("request [seqno %lld type %d version %d sz %d]\n", + hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); + + if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload))) + return -EBADMSG; + + memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno))); + aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo, + AAD_LEN, iv, hdr->authtag); + + return 0; +} + +static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req) +{ + unsigned long req_start = jiffies; + unsigned int override_npages = 0; + u64 override_err = 0; + int rc; + +retry_request: + /* + * Call firmware to process the request. In this function the encrypted + * message enters shared memory with the host. So after this call the + * sequence number must be incremented or the VMPCK must be deleted to + * prevent reuse of the IV. + */ + rc = snp_issue_guest_request(req); + switch (rc) { + case -ENOSPC: + /* + * If the extended guest request fails due to having too + * small of a certificate data buffer, retry the same + * guest request without the extended data request in + * order to increment the sequence number and thus avoid + * IV reuse. + */ + override_npages = req->input.data_npages; + req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; + + /* + * Override the error to inform callers the given extended + * request buffer size was too small and give the caller the + * required buffer size. + */ + override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN); + + /* + * If this call to the firmware succeeds, the sequence number can + * be incremented allowing for continued use of the VMPCK. If + * there is an error reflected in the return value, this value + * is checked further down and the result will be the deletion + * of the VMPCK and the error code being propagated back to the + * user as an ioctl() return code. + */ + goto retry_request; + + /* + * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been + * throttled. Retry in the driver to avoid returning and reusing the + * message sequence number on a different message. + */ + case -EAGAIN: + if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) { + rc = -ETIMEDOUT; + break; + } + schedule_timeout_killable(SNP_REQ_RETRY_DELAY); + goto retry_request; + } + + /* + * Increment the message sequence number. There is no harm in doing + * this now because decryption uses the value stored in the response + * structure and any failure will wipe the VMPCK, preventing further + * use anyway. + */ + snp_inc_msg_seqno(mdesc); + + if (override_err) { + req->exitinfo2 = override_err; + + /* + * If an extended guest request was issued and the supplied certificate + * buffer was not large enough, a standard guest request was issued to + * prevent IV reuse. If the standard request was successful, return -EIO + * back to the caller as would have originally been returned. + */ + if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) + rc = -EIO; + } + + if (override_npages) + req->input.data_npages = override_npages; + + return rc; +} + +int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req) +{ + u64 seqno; + int rc; + + /* + * enc_payload() calls aesgcm_encrypt(), which can potentially offload to HW. + * The offload's DMA SG list of data to encrypt has to be in linear mapping. + */ + if (!virt_addr_valid(req->req_buf) || !virt_addr_valid(req->resp_buf)) { + pr_warn("AES-GSM buffers must be in linear mapping"); + return -EINVAL; + } + + guard(mutex)(&snp_cmd_mutex); + + /* Check if the VMPCK is not empty */ + if (!mdesc->vmpck || !memchr_inv(mdesc->vmpck, 0, VMPCK_KEY_LEN)) { + pr_err_ratelimited("VMPCK is disabled\n"); + return -ENOTTY; + } + + /* Get message sequence and verify that its a non-zero */ + seqno = snp_get_msg_seqno(mdesc); + if (!seqno) + return -EIO; + + /* Clear shared memory's response for the host to populate. */ + memset(mdesc->response, 0, sizeof(struct snp_guest_msg)); + + /* Encrypt the userspace provided payload in mdesc->secret_request. */ + rc = enc_payload(mdesc, seqno, req); + if (rc) + return rc; + + /* + * Write the fully encrypted request to the shared unencrypted + * request page. + */ + memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request)); + + /* Initialize the input address for guest request */ + req->input.req_gpa = __pa(mdesc->request); + req->input.resp_gpa = __pa(mdesc->response); + req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0; + + rc = __handle_guest_request(mdesc, req); + if (rc) { + if (rc == -EIO && + req->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) + return rc; + + pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", + rc, req->exitinfo2); + + snp_disable_vmpck(mdesc); + return rc; + } + + rc = verify_and_dec_payload(mdesc, req); + if (rc) { + pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc); + snp_disable_vmpck(mdesc); + return rc; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snp_send_guest_request); + +static int __init snp_get_tsc_info(void) +{ + struct snp_tsc_info_resp *tsc_resp; + struct snp_tsc_info_req *tsc_req; + struct snp_msg_desc *mdesc; + struct snp_guest_req req = {}; + int rc = -ENOMEM; + + tsc_req = kzalloc(sizeof(*tsc_req), GFP_KERNEL); + if (!tsc_req) + return rc; + + /* + * The intermediate response buffer is used while decrypting the + * response payload. Make sure that it has enough space to cover + * the authtag. + */ + tsc_resp = kzalloc(sizeof(*tsc_resp) + AUTHTAG_LEN, GFP_KERNEL); + if (!tsc_resp) + goto e_free_tsc_req; + + mdesc = snp_msg_alloc(); + if (IS_ERR_OR_NULL(mdesc)) + goto e_free_tsc_resp; + + rc = snp_msg_init(mdesc, snp_vmpl); + if (rc) + goto e_free_mdesc; + + req.msg_version = MSG_HDR_VER; + req.msg_type = SNP_MSG_TSC_INFO_REQ; + req.vmpck_id = snp_vmpl; + req.req_buf = tsc_req; + req.req_sz = sizeof(*tsc_req); + req.resp_buf = (void *)tsc_resp; + req.resp_sz = sizeof(*tsc_resp) + AUTHTAG_LEN; + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; + + rc = snp_send_guest_request(mdesc, &req); + if (rc) + goto e_request; + + pr_debug("%s: response status 0x%x scale 0x%llx offset 0x%llx factor 0x%x\n", + __func__, tsc_resp->status, tsc_resp->tsc_scale, tsc_resp->tsc_offset, + tsc_resp->tsc_factor); + + if (!tsc_resp->status) { + snp_tsc_scale = tsc_resp->tsc_scale; + snp_tsc_offset = tsc_resp->tsc_offset; + } else { + pr_err("Failed to get TSC info, response status 0x%x\n", tsc_resp->status); + rc = -EIO; + } + +e_request: + /* The response buffer contains sensitive data, explicitly clear it. */ + memzero_explicit(tsc_resp, sizeof(*tsc_resp) + AUTHTAG_LEN); +e_free_mdesc: + snp_msg_free(mdesc); +e_free_tsc_resp: + kfree(tsc_resp); +e_free_tsc_req: + kfree(tsc_req); + + return rc; +} + +void __init snp_secure_tsc_prepare(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) + return; + + if (snp_get_tsc_info()) { + pr_alert("Unable to retrieve Secure TSC info from ASP\n"); + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC); + } + + pr_debug("SecureTSC enabled"); +} + +static unsigned long securetsc_get_tsc_khz(void) +{ + return snp_tsc_freq_khz; +} + +void __init snp_secure_tsc_init(void) +{ + struct snp_secrets_page *secrets; + unsigned long tsc_freq_mhz; + void *mem; + + if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) + return; + + mem = early_memremap_encrypted(sev_secrets_pa, PAGE_SIZE); + if (!mem) { + pr_err("Unable to get TSC_FACTOR: failed to map the SNP secrets page.\n"); + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC); + } + + secrets = (__force struct snp_secrets_page *)mem; + + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); + rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); + + /* Extract the GUEST TSC MHZ from BIT[17:0], rest is reserved space */ + tsc_freq_mhz &= GENMASK_ULL(17, 0); + + snp_tsc_freq_khz = SNP_SCALE_TSC_FREQ(tsc_freq_mhz * 1000, secrets->tsc_factor); + + x86_platform.calibrate_cpu = securetsc_get_tsc_khz; + x86_platform.calibrate_tsc = securetsc_get_tsc_khz; + + early_memunmap(mem, PAGE_SIZE); +} diff --git a/arch/x86/coco/sev/noinstr.c b/arch/x86/coco/sev/noinstr.c new file mode 100644 index 000000000000..b527eafb6312 --- /dev/null +++ b/arch/x86/coco/sev/noinstr.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/bug.h> +#include <linux/kernel.h> + +#include <asm/cpu_entry_area.h> +#include <asm/msr.h> +#include <asm/ptrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> + +static __always_inline bool on_vc_stack(struct pt_regs *regs) +{ + unsigned long sp = regs->sp; + + /* User-mode RSP is not trusted */ + if (user_mode(regs)) + return false; + + /* SYSCALL gap still has user-mode RSP */ + if (ip_within_syscall_gap(regs)) + return false; + + return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); +} + +/* + * This function handles the case when an NMI is raised in the #VC + * exception handler entry code, before the #VC handler has switched off + * its IST stack. In this case, the IST entry for #VC must be adjusted, + * so that any nested #VC exception will not overwrite the stack + * contents of the interrupted #VC handler. + * + * The IST entry is adjusted unconditionally so that it can be also be + * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a + * nested sev_es_ist_exit() call may adjust back the IST entry too + * early. + * + * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run + * on the NMI IST stack, as they are only called from NMI handling code + * right now. + */ +void noinstr __sev_es_ist_enter(struct pt_regs *regs) +{ + unsigned long old_ist, new_ist; + + /* Read old IST entry */ + new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + /* + * If NMI happened while on the #VC IST stack, set the new IST + * value below regs->sp, so that the interrupted stack frame is + * not overwritten by subsequent #VC exceptions. + */ + if (on_vc_stack(regs)) + new_ist = regs->sp; + + /* + * Reserve additional 8 bytes and store old IST value so this + * adjustment can be unrolled in __sev_es_ist_exit(). + */ + new_ist -= sizeof(old_ist); + *(unsigned long *)new_ist = old_ist; + + /* Set new IST entry */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); +} + +void noinstr __sev_es_ist_exit(void) +{ + unsigned long ist; + + /* Read IST entry */ + ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) + return; + + /* Read back old IST entry and write it to the TSS */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); +} + +void noinstr __sev_es_nmi_complete(void) +{ + struct ghcb_state state; + struct ghcb *ghcb; + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); + VMGEXIT(); + + __sev_put_ghcb(&state); +} + +/* + * Nothing shall interrupt this code path while holding the per-CPU + * GHCB. The backup GHCB is only for NMIs interrupting this path. + * + * Callers must disable local interrupts around it. + */ +noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + WARN_ON(!irqs_disabled()); + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (unlikely(data->ghcb_active)) { + /* GHCB is already in use - save its contents */ + + if (unlikely(data->backup_ghcb_active)) { + /* + * Backup-GHCB is also already in use. There is no way + * to continue here so just kill the machine. To make + * panic() work, mark GHCBs inactive so that messages + * can be printed out. + */ + data->ghcb_active = false; + data->backup_ghcb_active = false; + + instrumentation_begin(); + panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); + instrumentation_end(); + } + + /* Mark backup_ghcb active before writing to it */ + data->backup_ghcb_active = true; + + state->ghcb = &data->backup_ghcb; + + /* Backup GHCB content */ + *state->ghcb = *ghcb; + } else { + state->ghcb = NULL; + data->ghcb_active = true; + } + + return ghcb; +} + +noinstr void __sev_put_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + WARN_ON(!irqs_disabled()); + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (state->ghcb) { + /* Restore GHCB from Backup */ + *ghcb = *state->ghcb; + data->backup_ghcb_active = false; + state->ghcb = NULL; + } else { + /* + * Invalidate the GHCB so a VMGEXIT instruction issued + * from userspace won't appear to be valid. + */ + vc_ghcb_invalidate(ghcb); + data->ghcb_active = false; + } +} diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c new file mode 100644 index 000000000000..f08c7505ed82 --- /dev/null +++ b/arch/x86/coco/sev/vc-handle.c @@ -0,0 +1,1080 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/sched/debug.h> /* For show_regs() */ +#include <linux/cc_platform.h> +#include <linux/printk.h> +#include <linux/mm_types.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/psp-sev.h> +#include <linux/efi.h> +#include <uapi/linux/sev-guest.h> + +#include <asm/init.h> +#include <asm/stacktrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> +#include <asm/insn-eval.h> +#include <asm/fpu/xcr.h> +#include <asm/processor.h> +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/svm.h> +#include <asm/smp.h> +#include <asm/cpu.h> +#include <asm/apic.h> +#include <asm/cpuid/api.h> + +static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned long vaddr, phys_addr_t *paddr) +{ + unsigned long va = (unsigned long)vaddr; + unsigned int level; + phys_addr_t pa; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd = &pgd[pgd_index(va)]; + pte = lookup_address_in_pgd(pgd, va, &level); + if (!pte) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.cr2 = vaddr; + ctxt->fi.error_code = 0; + + if (user_mode(ctxt->regs)) + ctxt->fi.error_code |= X86_PF_USER; + + return ES_EXCEPTION; + } + + if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) + /* Emulated MMIO to/from encrypted memory not supported */ + return ES_UNSUPPORTED; + + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; + pa |= va & ~page_level_mask(level); + + *paddr = pa; + + return ES_OK; +} + +static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) +{ + BUG_ON(size > 4); + + if (user_mode(ctxt->regs)) { + struct thread_struct *t = ¤t->thread; + struct io_bitmap *iobm = t->io_bitmap; + size_t idx; + + if (!iobm) + goto fault; + + for (idx = port; idx < port + size; ++idx) { + if (test_bit(idx, iobm->bitmap)) + goto fault; + } + } + + return ES_OK; + +fault: + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + + return ES_EXCEPTION; +} + +void vc_forward_exception(struct es_em_ctxt *ctxt) +{ + long error_code = ctxt->fi.error_code; + int trapnr = ctxt->fi.vector; + + ctxt->regs->orig_ax = ctxt->fi.error_code; + + switch (trapnr) { + case X86_TRAP_GP: + exc_general_protection(ctxt->regs, error_code); + break; + case X86_TRAP_UD: + exc_invalid_op(ctxt->regs); + break; + case X86_TRAP_PF: + write_cr2(ctxt->fi.cr2); + exc_page_fault(ctxt->regs, error_code); + break; + case X86_TRAP_AC: + exc_alignment_check(ctxt->regs, error_code); + break; + default: + pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); + BUG(); + } +} + +static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, + unsigned char *buffer) +{ + return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); +} + +static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int insn_bytes; + + insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); + if (insn_bytes == 0) { + /* Nothing could be copied */ + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } else if (insn_bytes == -EINVAL) { + /* Effective RIP could not be calculated */ + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + ctxt->fi.cr2 = 0; + return ES_EXCEPTION; + } + + if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) + return ES_DECODE_FAILED; + + if (ctxt->insn.immediate.got) + return ES_OK; + else + return ES_DECODE_FAILED; +} + +static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int res, ret; + + res = vc_fetch_insn_kernel(ctxt, buffer); + if (res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } + + ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); + if (ret < 0) + return ES_DECODE_FAILED; + else + return ES_OK; +} + +/* + * User instruction decoding is also required for the EFI runtime. Even though + * the EFI runtime is running in kernel mode, it uses special EFI virtual + * address mappings that require the use of efi_mm to properly address and + * decode. + */ +static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + if (user_mode(ctxt->regs) || mm_is_efi(current->active_mm)) + return __vc_decode_user_insn(ctxt); + else + return __vc_decode_kern_insn(ctxt); +} + +static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, + char *dst, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; + + /* + * This function uses __put_user() independent of whether kernel or user + * memory is accessed. This works fine because __put_user() does no + * sanity checks of the pointer being accessed. All that it does is + * to report when the access failed. + * + * Also, this function runs in atomic context, so __put_user() is not + * allowed to sleep. The page-fault handler detects that it is running + * in atomic context and will not try to take mmap_sem and handle the + * fault, so additional pagefault_enable()/disable() calls are not + * needed. + * + * The access can't be done via copy_to_user() here because + * vc_write_mem() must not use string instructions to access unsafe + * memory. The reason is that MOVS is emulated by the #VC handler by + * splitting the move up into a read and a write and taking a nested #VC + * exception on whatever of them is the MMIO access. Using string + * instructions here would cause infinite nesting. + */ + switch (size) { + case 1: { + u8 d1; + u8 __user *target = (u8 __user *)dst; + + memcpy(&d1, buf, 1); + if (__put_user(d1, target)) + goto fault; + break; + } + case 2: { + u16 d2; + u16 __user *target = (u16 __user *)dst; + + memcpy(&d2, buf, 2); + if (__put_user(d2, target)) + goto fault; + break; + } + case 4: { + u32 d4; + u32 __user *target = (u32 __user *)dst; + + memcpy(&d4, buf, 4); + if (__put_user(d4, target)) + goto fault; + break; + } + case 8: { + u64 d8; + u64 __user *target = (u64 __user *)dst; + + memcpy(&d8, buf, 8); + if (__put_user(d8, target)) + goto fault; + break; + } + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)dst; + + return ES_EXCEPTION; +} + +static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + char *src, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT; + + /* + * This function uses __get_user() independent of whether kernel or user + * memory is accessed. This works fine because __get_user() does no + * sanity checks of the pointer being accessed. All that it does is + * to report when the access failed. + * + * Also, this function runs in atomic context, so __get_user() is not + * allowed to sleep. The page-fault handler detects that it is running + * in atomic context and will not try to take mmap_sem and handle the + * fault, so additional pagefault_enable()/disable() calls are not + * needed. + * + * The access can't be done via copy_from_user() here because + * vc_read_mem() must not use string instructions to access unsafe + * memory. The reason is that MOVS is emulated by the #VC handler by + * splitting the move up into a read and a write and taking a nested #VC + * exception on whatever of them is the MMIO access. Using string + * instructions here would cause infinite nesting. + */ + switch (size) { + case 1: { + u8 d1; + u8 __user *s = (u8 __user *)src; + + if (__get_user(d1, s)) + goto fault; + memcpy(buf, &d1, 1); + break; + } + case 2: { + u16 d2; + u16 __user *s = (u16 __user *)src; + + if (__get_user(d2, s)) + goto fault; + memcpy(buf, &d2, 2); + break; + } + case 4: { + u32 d4; + u32 __user *s = (u32 __user *)src; + + if (__get_user(d4, s)) + goto fault; + memcpy(buf, &d4, 4); + break; + } + case 8: { + u64 d8; + u64 __user *s = (u64 __user *)src; + if (__get_user(d8, s)) + goto fault; + memcpy(buf, &d8, 8); + break; + } + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)src; + + return ES_EXCEPTION; +} + +#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__) +#define error(v) + +#include "vc-shared.c" + +/* Writes to the SVSM CAA MSR are ignored */ +static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) +{ + if (write) + return ES_OK; + + regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); + regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); + + return ES_OK; +} + +/* + * TSC related accesses should not exit to the hypervisor when a guest is + * executing with Secure TSC enabled, so special handling is required for + * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. + */ +static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write) +{ + struct pt_regs *regs = ctxt->regs; + u64 tsc; + + /* + * Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to + * return undefined values, and GUEST_TSC_FREQ is read-only. Generate + * a #GP on all writes. + */ + if (write) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + /* + * GUEST_TSC_FREQ read should not be intercepted when Secure TSC is + * enabled. Terminate the guest if a read is attempted. + */ + if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) + return ES_VMM_ERROR; + + /* Reads of MSR_IA32_TSC should return the current TSC value. */ + tsc = rdtsc_ordered(); + regs->ax = lower_32_bits(tsc); + regs->dx = upper_32_bits(tsc); + + return ES_OK; +} + +enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write) +{ + struct pt_regs *regs = ctxt->regs; + enum es_result ret; + + switch (regs->cx) { + case MSR_SVSM_CAA: + return __vc_handle_msr_caa(regs, write); + case MSR_IA32_TSC: + case MSR_AMD64_GUEST_TSC_FREQ: + if (sev_status & MSR_AMD64_SNP_SECURE_TSC) + return __vc_handle_secure_tsc_msrs(ctxt, write); + break; + case MSR_AMD64_SAVIC_CONTROL: + /* + * AMD64_SAVIC_CONTROL should not be intercepted when + * Secure AVIC is enabled. Terminate the Secure AVIC guest + * if the interception is enabled. + */ + if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC)) + return ES_VMM_ERROR; + break; + default: + break; + } + + ghcb_set_rcx(ghcb, regs->cx); + if (write) { + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rdx(ghcb, regs->dx); + } + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0); + + if ((ret == ES_OK) && !write) { + regs->ax = ghcb->save.rax; + regs->dx = ghcb->save.rdx; + } + + return ret; +} + +static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + return sev_es_ghcb_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30); +} + +static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) +{ + int trapnr = ctxt->fi.vector; + + if (trapnr == X86_TRAP_PF) + native_write_cr2(ctxt->fi.cr2); + + ctxt->regs->orig_ax = ctxt->fi.error_code; + do_early_exception(ctxt->regs, trapnr); +} + +static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) +{ + long *reg_array; + int offset; + + reg_array = (long *)ctxt->regs; + offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); + + if (offset < 0) + return NULL; + + offset /= sizeof(long); + + return reg_array + offset; +} +static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned int bytes, bool read) +{ + u64 exit_code, exit_info_1, exit_info_2; + unsigned long ghcb_pa = __pa(ghcb); + enum es_result res; + phys_addr_t paddr; + void __user *ref; + + ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); + if (ref == (void __user *)-1L) + return ES_UNSUPPORTED; + + exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; + + res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); + if (res != ES_OK) { + if (res == ES_EXCEPTION && !read) + ctxt->fi.error_code |= X86_PF_WRITE; + + return res; + } + + exit_info_1 = paddr; + /* Can never be greater than 8 */ + exit_info_2 = bytes; + + ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); + + return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); +} + +/* + * The MOVS instruction has two memory operands, which raises the + * problem that it is not known whether the access to the source or the + * destination caused the #VC exception (and hence whether an MMIO read + * or write operation needs to be emulated). + * + * Instead of playing games with walking page-tables and trying to guess + * whether the source or destination is an MMIO range, split the move + * into two operations, a read and a write with only one memory operand. + * This will cause a nested #VC exception on the MMIO address which can + * then be handled. + * + * This implementation has the benefit that it also supports MOVS where + * source _and_ destination are MMIO regions. + * + * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a + * rare operation. If it turns out to be a performance problem the split + * operations can be moved to memcpy_fromio() and memcpy_toio(). + */ +static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, + unsigned int bytes) +{ + unsigned long ds_base, es_base; + unsigned char *src, *dst; + unsigned char buffer[8]; + enum es_result ret; + bool rep; + int off; + + ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + if (ds_base == -1L || es_base == -1L) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + src = ds_base + (unsigned char *)ctxt->regs->si; + dst = es_base + (unsigned char *)ctxt->regs->di; + + ret = vc_read_mem(ctxt, src, buffer, bytes); + if (ret != ES_OK) + return ret; + + ret = vc_write_mem(ctxt, dst, buffer, bytes); + if (ret != ES_OK) + return ret; + + if (ctxt->regs->flags & X86_EFLAGS_DF) + off = -bytes; + else + off = bytes; + + ctxt->regs->si += off; + ctxt->regs->di += off; + + rep = insn_has_rep_prefix(&ctxt->insn); + if (rep) + ctxt->regs->cx -= 1; + + if (!rep || ctxt->regs->cx == 0) + return ES_OK; + else + return ES_RETRY; +} + +static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + enum insn_mmio_type mmio; + unsigned int bytes = 0; + enum es_result ret; + u8 sign_byte; + long *reg_data; + + mmio = insn_decode_mmio(insn, &bytes); + if (mmio == INSN_MMIO_DECODE_FAILED) + return ES_DECODE_FAILED; + + if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { + reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); + if (!reg_data) + return ES_DECODE_FAILED; + } + + if (user_mode(ctxt->regs)) + return ES_UNSUPPORTED; + + switch (mmio) { + case INSN_MMIO_WRITE: + memcpy(ghcb->shared_buffer, reg_data, bytes); + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + case INSN_MMIO_WRITE_IMM: + memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + case INSN_MMIO_READ: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero-extend for 32-bit operation */ + if (bytes == 4) + *reg_data = 0; + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_READ_ZERO_EXTEND: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero extend based on operand size */ + memset(reg_data, 0, insn->opnd_bytes); + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_READ_SIGN_EXTEND: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + if (bytes == 1) { + u8 *val = (u8 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x80) ? 0xff : 0x00; + } else { + u16 *val = (u16 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x8000) ? 0xff : 0x00; + } + + /* Sign extend based on operand size */ + memset(reg_data, sign_byte, insn->opnd_bytes); + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_MOVS: + ret = vc_handle_mmio_movs(ctxt, bytes); + break; + default: + ret = ES_UNSUPPORTED; + break; + } + + return ret; +} + +static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long val, *reg = vc_insn_get_rm(ctxt); + enum es_result ret; + + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + + if (!reg) + return ES_DECODE_FAILED; + + val = *reg; + + /* Upper 32 bits must be written as zeroes */ + if (val >> 32) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + /* Clear out other reserved bits and set bit 10 */ + val = (val & 0xffff23ffL) | BIT(10); + + /* Early non-zero writes to DR7 are not supported */ + if (!data && (val & ~DR7_RESET_VALUE)) + return ES_UNSUPPORTED; + + /* Using a value of 0 for ExitInfo1 means RAX holds the value */ + ghcb_set_rax(ghcb, val); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); + if (ret != ES_OK) + return ret; + + if (data) + data->dr7 = val; + + return ES_OK; +} + +static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long *reg = vc_insn_get_rm(ctxt); + + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + + if (!reg) + return ES_DECODE_FAILED; + + if (data) + *reg = data->dr7; + else + *reg = DR7_RESET_VALUE; + + return ES_OK; +} + +static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); +} + +static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rcx(ghcb, ctxt->regs->cx); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_monitor(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Treat it as a NOP and do not leak a physical address to the + * hypervisor. + */ + return ES_OK; +} + +static enum es_result vc_handle_mwait(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* Treat the same as MONITOR/MONITORX */ + return ES_OK; +} + +static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rax(ghcb, ctxt->regs->ax); + ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); + + if (x86_platform.hyper.sev_es_hcall_prepare) + x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); + if (ret != ES_OK) + return ret; + + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + + /* + * Call sev_es_hcall_finish() after regs->ax is already set. + * This allows the hypervisor handler to overwrite it again if + * necessary. + */ + if (x86_platform.hyper.sev_es_hcall_finish && + !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) + return ES_VMM_ERROR; + + return ES_OK; +} + +static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Calling ecx_alignment_check() directly does not work, because it + * enables IRQs and the GHCB is active. Forward the exception and call + * it later from vc_forward_exception(). + */ + ctxt->fi.vector = X86_TRAP_AC; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; +} + +static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, + struct ghcb *ghcb, + unsigned long exit_code) +{ + enum es_result result = vc_check_opcode_bytes(ctxt, exit_code); + + if (result != ES_OK) + return result; + + switch (exit_code) { + case SVM_EXIT_READ_DR7: + result = vc_handle_dr7_read(ghcb, ctxt); + break; + case SVM_EXIT_WRITE_DR7: + result = vc_handle_dr7_write(ghcb, ctxt); + break; + case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: + result = vc_handle_trap_ac(ghcb, ctxt); + break; + case SVM_EXIT_RDTSC: + case SVM_EXIT_RDTSCP: + result = vc_handle_rdtsc(ghcb, ctxt, exit_code); + break; + case SVM_EXIT_RDPMC: + result = vc_handle_rdpmc(ghcb, ctxt); + break; + case SVM_EXIT_INVD: + pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); + result = ES_UNSUPPORTED; + break; + case SVM_EXIT_CPUID: + result = vc_handle_cpuid(ghcb, ctxt); + break; + case SVM_EXIT_IOIO: + result = vc_handle_ioio(ghcb, ctxt); + break; + case SVM_EXIT_MSR: + result = vc_handle_msr(ghcb, ctxt); + break; + case SVM_EXIT_VMMCALL: + result = vc_handle_vmmcall(ghcb, ctxt); + break; + case SVM_EXIT_WBINVD: + result = vc_handle_wbinvd(ghcb, ctxt); + break; + case SVM_EXIT_MONITOR: + result = vc_handle_monitor(ghcb, ctxt); + break; + case SVM_EXIT_MWAIT: + result = vc_handle_mwait(ghcb, ctxt); + break; + case SVM_EXIT_NPF: + result = vc_handle_mmio(ghcb, ctxt); + break; + default: + /* + * Unexpected #VC exception + */ + result = ES_UNSUPPORTED; + } + + return result; +} + +static __always_inline bool is_vc2_stack(unsigned long sp) +{ + return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); +} + +static __always_inline bool vc_from_invalid_context(struct pt_regs *regs) +{ + unsigned long sp, prev_sp; + + sp = (unsigned long)regs; + prev_sp = regs->sp; + + /* + * If the code was already executing on the VC2 stack when the #VC + * happened, let it proceed to the normal handling routine. This way the + * code executing on the VC2 stack can cause #VC exceptions to get handled. + */ + return is_vc2_stack(sp) && !is_vc2_stack(prev_sp); +} + +static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code) +{ + struct ghcb_state state; + struct es_em_ctxt ctxt; + enum es_result result; + struct ghcb *ghcb; + bool ret = true; + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + result = vc_init_em_ctxt(&ctxt, regs, error_code); + + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, ghcb, error_code); + + __sev_put_ghcb(&state); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_VMM_ERROR: + pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_DECODE_FAILED: + pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_EXCEPTION: + vc_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + pr_emerg("Unknown result in %s():%d\n", __func__, result); + /* + * Emulating the instruction which caused the #VC exception + * failed - can't continue so print debug information + */ + BUG(); + } + + return ret; +} + +static __always_inline bool vc_is_db(unsigned long error_code) +{ + return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB; +} + +/* + * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode + * and will panic when an error happens. + */ +DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication) +{ + irqentry_state_t irq_state; + + /* + * With the current implementation it is always possible to switch to a + * safe stack because #VC exceptions only happen at known places, like + * intercepted instructions or accesses to MMIO areas/IO ports. They can + * also happen with code instrumentation when the hypervisor intercepts + * #DB, but the critical paths are forbidden to be instrumented, so #DB + * exceptions currently also only happen in safe places. + * + * But keep this here in case the noinstr annotations are violated due + * to bug elsewhere. + */ + if (unlikely(vc_from_invalid_context(regs))) { + instrumentation_begin(); + panic("Can't handle #VC exception from unsupported context\n"); + instrumentation_end(); + } + + /* + * Handle #DB before calling into !noinstr code to avoid recursive #DB. + */ + if (vc_is_db(error_code)) { + exc_debug(regs); + return; + } + + irq_state = irqentry_nmi_enter(regs); + + instrumentation_begin(); + + if (!vc_raw_handle_exception(regs, error_code)) { + /* Show some debug info */ + show_regs(regs); + + /* Ask hypervisor to sev_es_terminate */ + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); + + /* If that fails and we get here - just panic */ + panic("Returned from Terminate-Request to Hypervisor\n"); + } + + instrumentation_end(); + irqentry_nmi_exit(regs, irq_state); +} + +/* + * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode + * and will kill the current task with SIGBUS when an error happens. + */ +DEFINE_IDTENTRY_VC_USER(exc_vmm_communication) +{ + /* + * Handle #DB before calling into !noinstr code to avoid recursive #DB. + */ + if (vc_is_db(error_code)) { + noist_exc_debug(regs); + return; + } + + irqentry_enter_from_user_mode(regs); + instrumentation_begin(); + + if (!vc_raw_handle_exception(regs, error_code)) { + /* + * Do not kill the machine if user-space triggered the + * exception. Send SIGBUS instead and let user-space deal with + * it. + */ + force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); + } + + instrumentation_end(); + irqentry_exit_to_user_mode(regs); +} + +bool __init handle_vc_boot_ghcb(struct pt_regs *regs) +{ + unsigned long exit_code = regs->orig_ax; + struct es_em_ctxt ctxt; + enum es_result result; + + vc_ghcb_invalidate(boot_ghcb); + + result = vc_init_em_ctxt(&ctxt, regs, exit_code); + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_VMM_ERROR: + early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_DECODE_FAILED: + early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_EXCEPTION: + vc_early_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + BUG(); + } + + return true; + +fail: + show_regs(regs); + + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); +} + diff --git a/arch/x86/coco/sev/vc-shared.c b/arch/x86/coco/sev/vc-shared.c new file mode 100644 index 000000000000..58b2f985d546 --- /dev/null +++ b/arch/x86/coco/sev/vc-shared.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef __BOOT_COMPRESSED +#define has_cpuflag(f) cpu_feature_enabled(f) +#endif + +static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, + unsigned long exit_code) +{ + unsigned int opcode = (unsigned int)ctxt->insn.opcode.value; + u8 modrm = ctxt->insn.modrm.value; + + switch (exit_code) { + + case SVM_EXIT_IOIO: + case SVM_EXIT_NPF: + /* handled separately */ + return ES_OK; + + case SVM_EXIT_CPUID: + if (opcode == 0xa20f) + return ES_OK; + break; + + case SVM_EXIT_INVD: + if (opcode == 0x080f) + return ES_OK; + break; + + case SVM_EXIT_MONITOR: + /* MONITOR and MONITORX instructions generate the same error code */ + if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa)) + return ES_OK; + break; + + case SVM_EXIT_MWAIT: + /* MWAIT and MWAITX instructions generate the same error code */ + if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb)) + return ES_OK; + break; + + case SVM_EXIT_MSR: + /* RDMSR */ + if (opcode == 0x320f || + /* WRMSR */ + opcode == 0x300f) + return ES_OK; + break; + + case SVM_EXIT_RDPMC: + if (opcode == 0x330f) + return ES_OK; + break; + + case SVM_EXIT_RDTSC: + if (opcode == 0x310f) + return ES_OK; + break; + + case SVM_EXIT_RDTSCP: + if (opcode == 0x010f && modrm == 0xf9) + return ES_OK; + break; + + case SVM_EXIT_READ_DR7: + if (opcode == 0x210f && + X86_MODRM_REG(ctxt->insn.modrm.value) == 7) + return ES_OK; + break; + + case SVM_EXIT_VMMCALL: + if (opcode == 0x010f && modrm == 0xd9) + return ES_OK; + + break; + + case SVM_EXIT_WRITE_DR7: + if (opcode == 0x230f && + X86_MODRM_REG(ctxt->insn.modrm.value) == 7) + return ES_OK; + break; + + case SVM_EXIT_WBINVD: + if (opcode == 0x90f) + return ES_OK; + break; + + default: + break; + } + + sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n", + opcode, exit_code, ctxt->regs->ip); + + return ES_UNSUPPORTED; +} + +static bool vc_decoding_needed(unsigned long exit_code) +{ + /* Exceptions don't require to decode the instruction */ + return !(exit_code >= SVM_EXIT_EXCP_BASE && + exit_code <= SVM_EXIT_LAST_EXCP); +} + +static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, + struct pt_regs *regs, + unsigned long exit_code) +{ + enum es_result ret = ES_OK; + + memset(ctxt, 0, sizeof(*ctxt)); + ctxt->regs = regs; + + if (vc_decoding_needed(exit_code)) + ret = vc_decode_insn(ctxt); + + return ret; +} + +static void vc_finish_insn(struct es_em_ctxt *ctxt) +{ + ctxt->regs->ip += ctxt->insn.length; +} + +static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt, + unsigned long address, + bool write) +{ + if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_USER; + ctxt->fi.cr2 = address; + if (write) + ctxt->fi.error_code |= X86_PF_WRITE; + + return ES_EXCEPTION; + } + + return ES_OK; +} + +static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, + void *src, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, b = backwards ? -1 : 1; + unsigned long address = (unsigned long)src; + enum es_result ret; + + ret = vc_insn_string_check(ctxt, address, false); + if (ret != ES_OK) + return ret; + + for (i = 0; i < count; i++) { + void *s = src + (i * data_size * b); + char *d = buf + (i * data_size); + + ret = vc_read_mem(ctxt, s, d, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, + void *dst, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, s = backwards ? -1 : 1; + unsigned long address = (unsigned long)dst; + enum es_result ret; + + ret = vc_insn_string_check(ctxt, address, true); + if (ret != ES_OK) + return ret; + + for (i = 0; i < count; i++) { + void *d = dst + (i * data_size * s); + char *b = buf + (i * data_size); + + ret = vc_write_mem(ctxt, d, b, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +#define IOIO_TYPE_STR BIT(2) +#define IOIO_TYPE_IN 1 +#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) +#define IOIO_TYPE_OUT 0 +#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) + +#define IOIO_REP BIT(3) + +#define IOIO_ADDR_64 BIT(9) +#define IOIO_ADDR_32 BIT(8) +#define IOIO_ADDR_16 BIT(7) + +#define IOIO_DATA_32 BIT(6) +#define IOIO_DATA_16 BIT(5) +#define IOIO_DATA_8 BIT(4) + +#define IOIO_SEG_ES (0 << 10) +#define IOIO_SEG_DS (3 << 10) + +static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) +{ + struct insn *insn = &ctxt->insn; + size_t size; + u64 port; + + *exitinfo = 0; + + switch (insn->opcode.bytes[0]) { + /* INS opcodes */ + case 0x6c: + case 0x6d: + *exitinfo |= IOIO_TYPE_INS; + *exitinfo |= IOIO_SEG_ES; + port = ctxt->regs->dx & 0xffff; + break; + + /* OUTS opcodes */ + case 0x6e: + case 0x6f: + *exitinfo |= IOIO_TYPE_OUTS; + *exitinfo |= IOIO_SEG_DS; + port = ctxt->regs->dx & 0xffff; + break; + + /* IN immediate opcodes */ + case 0xe4: + case 0xe5: + *exitinfo |= IOIO_TYPE_IN; + port = (u8)insn->immediate.value & 0xffff; + break; + + /* OUT immediate opcodes */ + case 0xe6: + case 0xe7: + *exitinfo |= IOIO_TYPE_OUT; + port = (u8)insn->immediate.value & 0xffff; + break; + + /* IN register opcodes */ + case 0xec: + case 0xed: + *exitinfo |= IOIO_TYPE_IN; + port = ctxt->regs->dx & 0xffff; + break; + + /* OUT register opcodes */ + case 0xee: + case 0xef: + *exitinfo |= IOIO_TYPE_OUT; + port = ctxt->regs->dx & 0xffff; + break; + + default: + return ES_DECODE_FAILED; + } + + *exitinfo |= port << 16; + + switch (insn->opcode.bytes[0]) { + case 0x6c: + case 0x6e: + case 0xe4: + case 0xe6: + case 0xec: + case 0xee: + /* Single byte opcodes */ + *exitinfo |= IOIO_DATA_8; + size = 1; + break; + default: + /* Length determined by instruction parsing */ + *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 + : IOIO_DATA_32; + size = (insn->opnd_bytes == 2) ? 2 : 4; + } + + switch (insn->addr_bytes) { + case 2: + *exitinfo |= IOIO_ADDR_16; + break; + case 4: + *exitinfo |= IOIO_ADDR_32; + break; + case 8: + *exitinfo |= IOIO_ADDR_64; + break; + } + + if (insn_has_rep_prefix(insn)) + *exitinfo |= IOIO_REP; + + return vc_ioio_check(ctxt, (u16)port, size); +} + +static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u64 exit_info_1, exit_info_2; + enum es_result ret; + + ret = vc_ioio_exitinfo(ctxt, &exit_info_1); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_STR) { + + /* (REP) INS/OUTS */ + + bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); + unsigned int io_bytes, exit_bytes; + unsigned int ghcb_count, op_count; + unsigned long es_base; + u64 sw_scratch; + + /* + * For the string variants with rep prefix the amount of in/out + * operations per #VC exception is limited so that the kernel + * has a chance to take interrupts and re-schedule while the + * instruction is emulated. + */ + io_bytes = (exit_info_1 >> 4) & 0x7; + ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; + + op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; + exit_info_2 = min(op_count, ghcb_count); + exit_bytes = exit_info_2 * io_bytes; + + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + /* Read bytes of OUTS into the shared buffer */ + if (!(exit_info_1 & IOIO_TYPE_IN)) { + ret = vc_insn_string_read(ctxt, + (void *)(es_base + regs->si), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + } + + /* + * Issue an VMGEXIT to the HV to consume the bytes from the + * shared buffer or to have it write them into the shared buffer + * depending on the instruction: OUTS or INS. + */ + sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); + ghcb_set_sw_scratch(ghcb, sw_scratch); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, + exit_info_1, exit_info_2); + if (ret != ES_OK) + return ret; + + /* Read bytes from shared buffer into the guest's destination. */ + if (exit_info_1 & IOIO_TYPE_IN) { + ret = vc_insn_string_write(ctxt, + (void *)(es_base + regs->di), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + + if (df) + regs->di -= exit_bytes; + else + regs->di += exit_bytes; + } else { + if (df) + regs->si -= exit_bytes; + else + regs->si += exit_bytes; + } + + if (exit_info_1 & IOIO_REP) + regs->cx -= exit_info_2; + + ret = regs->cx ? ES_RETRY : ES_OK; + + } else { + + /* IN/OUT into/from rAX */ + + int bits = (exit_info_1 & 0x70) >> 1; + u64 rax = 0; + + if (!(exit_info_1 & IOIO_TYPE_IN)) + rax = lower_bits(regs->ax, bits); + + ghcb_set_rax(ghcb, rax); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_IN) { + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + regs->ax = lower_bits(ghcb->save.rax, bits); + } + } + + return ret; +} + +enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + u32 ret; + + ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); + if (!ret) + return ES_OK; + + if (ret == 1) { + u64 info = ghcb->save.sw_exit_info_2; + unsigned long v = info & SVM_EVTINJ_VEC_MASK; + + /* Check if exception information from hypervisor is sane. */ + if ((info & SVM_EVTINJ_VALID) && + ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && + ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { + ctxt->fi.vector = v; + + if (info & SVM_EVTINJ_VALID_ERR) + ctxt->fi.error_code = info >> 32; + + return ES_EXCEPTION; + } + } + + return ES_VMM_ERROR; +} + +enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + u64 exit_code, u64 exit_info_1, + u64 exit_info_2) +{ + /* Fill in protocol and format specifiers */ + ghcb->protocol_version = ghcb_version; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, exit_code); + ghcb_set_sw_exit_info_1(ghcb, exit_info_1); + ghcb_set_sw_exit_info_2(ghcb, exit_info_2); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + return verify_exception_info(ghcb, ctxt); +} + +static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) +{ + u32 cr4 = native_read_cr4(); + int ret; + + ghcb_set_rax(ghcb, leaf->fn); + ghcb_set_rcx(ghcb, leaf->subfn); + + if (cr4 & X86_CR4_OSXSAVE) + /* Safe to read xcr0 */ + ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); + else + /* xgetbv will cause #UD - use reset value for xcr0 */ + ghcb_set_xcr0(ghcb, 1); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && + ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + leaf->eax = ghcb->save.rax; + leaf->ebx = ghcb->save.rbx; + leaf->ecx = ghcb->save.rcx; + leaf->edx = ghcb->save.rdx; + + return ES_OK; +} + +struct cpuid_ctx { + struct ghcb *ghcb; + struct es_em_ctxt *ctxt; +}; + +static void snp_cpuid_hv_ghcb(void *p, struct cpuid_leaf *leaf) +{ + struct cpuid_ctx *ctx = p; + + if (__sev_cpuid_hv_ghcb(ctx->ghcb, ctx->ctxt, leaf)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); +} + +static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct cpuid_ctx ctx = { ghcb, ctxt }; + struct pt_regs *regs = ctxt->regs; + struct cpuid_leaf leaf; + int ret; + + leaf.fn = regs->ax; + leaf.subfn = regs->cx; + ret = snp_cpuid(snp_cpuid_hv_ghcb, &ctx, &leaf); + if (!ret) { + regs->ax = leaf.eax; + regs->bx = leaf.ebx; + regs->cx = leaf.ecx; + regs->dx = leaf.edx; + } + + return ret; +} + +static enum es_result vc_handle_cpuid(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u32 cr4 = native_read_cr4(); + enum es_result ret; + int snp_cpuid_ret; + + snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt); + if (!snp_cpuid_ret) + return ES_OK; + if (snp_cpuid_ret != -EOPNOTSUPP) + return ES_VMM_ERROR; + + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rcx(ghcb, regs->cx); + + if (cr4 & X86_CR4_OSXSAVE) + /* Safe to read xcr0 */ + ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); + else + /* xgetbv will cause #GP - use reset value for xcr0 */ + ghcb_set_xcr0(ghcb, 1); + + if (has_cpuflag(X86_FEATURE_SHSTK) && regs->ax == 0xd && regs->cx == 1) { + struct msr m; + + raw_rdmsr(MSR_IA32_XSS, &m); + ghcb_set_xss(ghcb, m.q); + } + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && + ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + regs->ax = ghcb->save.rax; + regs->bx = ghcb->save.rbx; + regs->cx = ghcb->save.rcx; + regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + unsigned long exit_code) +{ + bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); + enum es_result ret; + + /* + * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure + * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP + * instructions are being intercepted. If this should occur and Secure + * TSC is enabled, guest execution should be terminated as the guest + * cannot rely on the TSC value provided by the hypervisor. + */ + if (sev_status & MSR_AMD64_SNP_SECURE_TSC) + return ES_VMM_ERROR; + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && + (!rdtscp || ghcb_rcx_is_valid(ghcb)))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + if (rdtscp) + ctxt->regs->cx = ghcb->save.rcx; + + return ES_OK; +} + +void snp_register_ghcb_early(unsigned long paddr) +{ + unsigned long pfn = paddr >> PAGE_SHIFT; + u64 val; + + sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn)); + VMGEXIT(); + + val = sev_es_rd_ghcb_msr(); + + /* If the response GPA is not ours then abort the guest */ + if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) || + (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER); +} + +bool __init sev_es_check_cpu_features(void) +{ + if (!has_cpuflag(X86_FEATURE_RDRAND)) { + error("RDRAND instruction not supported - no trusted source of randomness available\n"); + return false; + } + + return true; +} + +bool sev_es_negotiate_protocol(void) +{ + u64 val; + + /* Do the GHCB protocol version negotiation */ + sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + + if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) + return false; + + if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || + GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) + return false; + + ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX); + + return true; +} diff --git a/arch/x86/coco/tdx/Makefile b/arch/x86/coco/tdx/Makefile index 46c55998557d..b3c47d3700e2 100644 --- a/arch/x86/coco/tdx/Makefile +++ b/arch/x86/coco/tdx/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += tdx.o tdcall.o +obj-y += debug.o tdcall.o tdx.o tdx-shared.o diff --git a/arch/x86/coco/tdx/debug.c b/arch/x86/coco/tdx/debug.c new file mode 100644 index 000000000000..cef847c8bb67 --- /dev/null +++ b/arch/x86/coco/tdx/debug.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 + +#undef pr_fmt +#define pr_fmt(fmt) "tdx: " fmt + +#include <linux/array_size.h> +#include <linux/printk.h> +#include <asm/tdx.h> + +#define DEF_TDX_ATTR_NAME(_name) [TDX_ATTR_##_name##_BIT] = __stringify(_name) + +static __initdata const char *tdx_attributes[] = { + DEF_TDX_ATTR_NAME(DEBUG), + DEF_TDX_ATTR_NAME(HGS_PLUS_PROF), + DEF_TDX_ATTR_NAME(PERF_PROF), + DEF_TDX_ATTR_NAME(PMT_PROF), + DEF_TDX_ATTR_NAME(ICSSD), + DEF_TDX_ATTR_NAME(LASS), + DEF_TDX_ATTR_NAME(SEPT_VE_DISABLE), + DEF_TDX_ATTR_NAME(MIGRTABLE), + DEF_TDX_ATTR_NAME(PKS), + DEF_TDX_ATTR_NAME(KL), + DEF_TDX_ATTR_NAME(TPA), + DEF_TDX_ATTR_NAME(PERFMON), +}; + +#define DEF_TD_CTLS_NAME(_name) [TD_CTLS_##_name##_BIT] = __stringify(_name) + +static __initdata const char *tdcs_td_ctls[] = { + DEF_TD_CTLS_NAME(PENDING_VE_DISABLE), + DEF_TD_CTLS_NAME(ENUM_TOPOLOGY), + DEF_TD_CTLS_NAME(VIRT_CPUID2), + DEF_TD_CTLS_NAME(REDUCE_VE), + DEF_TD_CTLS_NAME(LOCK), +}; + +void __init tdx_dump_attributes(u64 td_attr) +{ + pr_info("Attributes:"); + + for (int i = 0; i < ARRAY_SIZE(tdx_attributes); i++) { + if (!tdx_attributes[i]) + continue; + if (td_attr & BIT(i)) + pr_cont(" %s", tdx_attributes[i]); + td_attr &= ~BIT(i); + } + + if (td_attr) + pr_cont(" unknown:%#llx", td_attr); + pr_cont("\n"); + +} + +void __init tdx_dump_td_ctls(u64 td_ctls) +{ + pr_info("TD_CTLS:"); + + for (int i = 0; i < ARRAY_SIZE(tdcs_td_ctls); i++) { + if (!tdcs_td_ctls[i]) + continue; + if (td_ctls & BIT(i)) + pr_cont(" %s", tdcs_td_ctls[i]); + td_ctls &= ~BIT(i); + } + if (td_ctls) + pr_cont(" unknown:%#llx", td_ctls); + pr_cont("\n"); +} diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S index f9eb1134f22d..52d9786da308 100644 --- a/arch/x86/coco/tdx/tdcall.S +++ b/arch/x86/coco/tdx/tdcall.S @@ -1,205 +1,63 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/asm-offsets.h> #include <asm/asm.h> -#include <asm/frame.h> -#include <asm/unwind_hints.h> #include <linux/linkage.h> -#include <linux/bits.h> #include <linux/errno.h> #include "../../virt/vmx/tdx/tdxcall.S" -/* - * Bitmasks of exposed registers (with VMM). - */ -#define TDX_R10 BIT(10) -#define TDX_R11 BIT(11) -#define TDX_R12 BIT(12) -#define TDX_R13 BIT(13) -#define TDX_R14 BIT(14) -#define TDX_R15 BIT(15) - -/* - * These registers are clobbered to hold arguments for each - * TDVMCALL. They are safe to expose to the VMM. - * Each bit in this mask represents a register ID. Bit field - * details can be found in TDX GHCI specification, section - * titled "TDCALL [TDG.VP.VMCALL] leaf". - */ -#define TDVMCALL_EXPOSE_REGS_MASK ( TDX_R10 | TDX_R11 | \ - TDX_R12 | TDX_R13 | \ - TDX_R14 | TDX_R15 ) +.section .noinstr.text, "ax" /* - * __tdx_module_call() - Used by TDX guests to request services from - * the TDX module (does not include VMM services) using TDCALL instruction. - * - * Transforms function call register arguments into the TDCALL register ABI. - * After TDCALL operation, TDX module output is saved in @out (if it is - * provided by the user). - * - *------------------------------------------------------------------------- - * TDCALL ABI: - *------------------------------------------------------------------------- - * Input Registers: - * - * RAX - TDCALL Leaf number. - * RCX,RDX,R8-R9 - TDCALL Leaf specific input registers. + * __tdcall() - Used by TDX guests to request services from the TDX + * module (does not include VMM services) using TDCALL instruction. * - * Output Registers: + * __tdcall() function ABI: * - * RAX - TDCALL instruction error code. - * RCX,RDX,R8-R11 - TDCALL Leaf specific output registers. + * @fn (RDI) - TDCALL Leaf ID, moved to RAX + * @args (RSI) - struct tdx_module_args for input * - *------------------------------------------------------------------------- - * - * __tdx_module_call() function ABI: - * - * @fn (RDI) - TDCALL Leaf ID, moved to RAX - * @rcx (RSI) - Input parameter 1, moved to RCX - * @rdx (RDX) - Input parameter 2, moved to RDX - * @r8 (RCX) - Input parameter 3, moved to R8 - * @r9 (R8) - Input parameter 4, moved to R9 - * - * @out (R9) - struct tdx_module_output pointer - * stored temporarily in R12 (not - * shared with the TDX module). It - * can be NULL. + * Only RCX/RDX/R8-R11 are used as input registers. * * Return status of TDCALL via RAX. */ -SYM_FUNC_START(__tdx_module_call) - FRAME_BEGIN +SYM_FUNC_START(__tdcall) TDX_MODULE_CALL host=0 - FRAME_END - RET -SYM_FUNC_END(__tdx_module_call) +SYM_FUNC_END(__tdcall) /* - * __tdx_hypercall() - Make hypercalls to a TDX VMM using TDVMCALL leaf - * of TDCALL instruction - * - * Transforms values in function call argument struct tdx_hypercall_args @args - * into the TDCALL register ABI. After TDCALL operation, VMM output is saved - * back in @args. + * __tdcall_ret() - Used by TDX guests to request services from the TDX + * module (does not include VMM services) using TDCALL instruction, with + * saving output registers to the 'struct tdx_module_args' used as input. * - *------------------------------------------------------------------------- - * TD VMCALL ABI: - *------------------------------------------------------------------------- + * __tdcall_ret() function ABI: * - * Input Registers: + * @fn (RDI) - TDCALL Leaf ID, moved to RAX + * @args (RSI) - struct tdx_module_args for input and output * - * RAX - TDCALL instruction leaf number (0 - TDG.VP.VMCALL) - * RCX - BITMAP which controls which part of TD Guest GPR - * is passed as-is to the VMM and back. - * R10 - Set 0 to indicate TDCALL follows standard TDX ABI - * specification. Non zero value indicates vendor - * specific ABI. - * R11 - VMCALL sub function number - * RBX, RBP, RDI, RSI - Used to pass VMCALL sub function specific arguments. - * R8-R9, R12-R15 - Same as above. + * Only RCX/RDX/R8-R11 are used as input/output registers. * - * Output Registers: - * - * RAX - TDCALL instruction status (Not related to hypercall - * output). - * R10 - Hypercall output error code. - * R11-R15 - Hypercall sub function specific output values. + * Return status of TDCALL via RAX. + */ +SYM_FUNC_START(__tdcall_ret) + TDX_MODULE_CALL host=0 ret=1 +SYM_FUNC_END(__tdcall_ret) + +/* + * __tdcall_saved_ret() - Used by TDX guests to request services from the + * TDX module (including VMM services) using TDCALL instruction, with + * saving output registers to the 'struct tdx_module_args' used as input. * - *------------------------------------------------------------------------- + * __tdcall_saved_ret() function ABI: * - * __tdx_hypercall() function ABI: + * @fn (RDI) - TDCALL leaf ID, moved to RAX + * @args (RSI) - struct tdx_module_args for input/output * - * @args (RDI) - struct tdx_hypercall_args for input and output - * @flags (RSI) - TDX_HCALL_* flags + * All registers in @args are used as input/output registers. * * On successful completion, return the hypercall error code. */ -SYM_FUNC_START(__tdx_hypercall) - FRAME_BEGIN - - /* Save callee-saved GPRs as mandated by the x86_64 ABI */ - push %r15 - push %r14 - push %r13 - push %r12 - - /* Mangle function call ABI into TDCALL ABI: */ - /* Set TDCALL leaf ID (TDVMCALL (0)) in RAX */ - xor %eax, %eax - - /* Copy hypercall registers from arg struct: */ - movq TDX_HYPERCALL_r10(%rdi), %r10 - movq TDX_HYPERCALL_r11(%rdi), %r11 - movq TDX_HYPERCALL_r12(%rdi), %r12 - movq TDX_HYPERCALL_r13(%rdi), %r13 - movq TDX_HYPERCALL_r14(%rdi), %r14 - movq TDX_HYPERCALL_r15(%rdi), %r15 - - movl $TDVMCALL_EXPOSE_REGS_MASK, %ecx - - /* - * For the idle loop STI needs to be called directly before the TDCALL - * that enters idle (EXIT_REASON_HLT case). STI instruction enables - * interrupts only one instruction later. If there is a window between - * STI and the instruction that emulates the HALT state, there is a - * chance for interrupts to happen in this window, which can delay the - * HLT operation indefinitely. Since this is the not the desired - * result, conditionally call STI before TDCALL. - */ - testq $TDX_HCALL_ISSUE_STI, %rsi - jz .Lskip_sti - sti -.Lskip_sti: - tdcall - - /* - * RAX==0 indicates a failure of the TDVMCALL mechanism itself and that - * something has gone horribly wrong with the TDX module. - * - * The return status of the hypercall operation is in a separate - * register (in R10). Hypercall errors are a part of normal operation - * and are handled by callers. - */ - testq %rax, %rax - jne .Lpanic - - /* TDVMCALL leaf return code is in R10 */ - movq %r10, %rax - - /* Copy hypercall result registers to arg struct if needed */ - testq $TDX_HCALL_HAS_OUTPUT, %rsi - jz .Lout - - movq %r10, TDX_HYPERCALL_r10(%rdi) - movq %r11, TDX_HYPERCALL_r11(%rdi) - movq %r12, TDX_HYPERCALL_r12(%rdi) - movq %r13, TDX_HYPERCALL_r13(%rdi) - movq %r14, TDX_HYPERCALL_r14(%rdi) - movq %r15, TDX_HYPERCALL_r15(%rdi) -.Lout: - /* - * Zero out registers exposed to the VMM to avoid speculative execution - * with VMM-controlled values. This needs to include all registers - * present in TDVMCALL_EXPOSE_REGS_MASK (except R12-R15). R12-R15 - * context will be restored. - */ - xor %r10d, %r10d - xor %r11d, %r11d - - /* Restore callee-saved GPRs as mandated by the x86_64 ABI */ - pop %r12 - pop %r13 - pop %r14 - pop %r15 - - FRAME_END - - RET -.Lpanic: - call __tdx_hypercall_failed - /* __tdx_hypercall_failed never returns */ - REACHABLE - jmp .Lpanic -SYM_FUNC_END(__tdx_hypercall) +SYM_FUNC_START(__tdcall_saved_ret) + TDX_MODULE_CALL host=0 ret=1 saved=1 +SYM_FUNC_END(__tdcall_saved_ret) diff --git a/arch/x86/coco/tdx/tdx-shared.c b/arch/x86/coco/tdx/tdx-shared.c new file mode 100644 index 000000000000..1655aa56a0a5 --- /dev/null +++ b/arch/x86/coco/tdx/tdx-shared.c @@ -0,0 +1,91 @@ +#include <asm/tdx.h> +#include <asm/pgtable.h> + +static unsigned long try_accept_one(phys_addr_t start, unsigned long len, + enum pg_level pg_level) +{ + unsigned long accept_size = page_level_size(pg_level); + struct tdx_module_args args = {}; + u8 page_size; + + if (!IS_ALIGNED(start, accept_size)) + return 0; + + if (len < accept_size) + return 0; + + /* + * Pass the page physical address to the TDX module to accept the + * pending, private page. + * + * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G. + */ + switch (pg_level) { + case PG_LEVEL_4K: + page_size = TDX_PS_4K; + break; + case PG_LEVEL_2M: + page_size = TDX_PS_2M; + break; + case PG_LEVEL_1G: + page_size = TDX_PS_1G; + break; + default: + return 0; + } + + args.rcx = start | page_size; + if (__tdcall(TDG_MEM_PAGE_ACCEPT, &args)) + return 0; + + return accept_size; +} + +bool tdx_accept_memory(phys_addr_t start, phys_addr_t end) +{ + /* + * For shared->private conversion, accept the page using + * TDG_MEM_PAGE_ACCEPT TDX module call. + */ + while (start < end) { + unsigned long len = end - start; + unsigned long accept_size; + + /* + * Try larger accepts first. It gives chance to VMM to keep + * 1G/2M Secure EPT entries where possible and speeds up + * process by cutting number of hypercalls (if successful). + */ + + accept_size = try_accept_one(start, len, PG_LEVEL_1G); + if (!accept_size) + accept_size = try_accept_one(start, len, PG_LEVEL_2M); + if (!accept_size) + accept_size = try_accept_one(start, len, PG_LEVEL_4K); + if (!accept_size) + return false; + start += accept_size; + } + + return true; +} + +noinstr u64 __tdx_hypercall(struct tdx_module_args *args) +{ + /* + * For TDVMCALL explicitly set RCX to the bitmap of shared registers. + * The caller isn't expected to set @args->rcx anyway. + */ + args->rcx = TDVMCALL_EXPOSE_REGS_MASK; + + /* + * Failure of __tdcall_saved_ret() indicates a failure of the TDVMCALL + * mechanism itself and that something has gone horribly wrong with + * the TDX module. __tdx_hypercall_failed() never returns. + */ + if (__tdcall_saved_ret(TDG_VP_VMCALL, args)) + __tdx_hypercall_failed(); + + /* TDVMCALL leaf return code is in R10 */ + return args->r10; +} diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 669d9e4f2901..7b2833705d47 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -7,21 +7,17 @@ #include <linux/cpufeature.h> #include <linux/export.h> #include <linux/io.h> +#include <linux/kexec.h> #include <asm/coco.h> #include <asm/tdx.h> #include <asm/vmx.h> +#include <asm/ia32.h> #include <asm/insn.h> #include <asm/insn-eval.h> +#include <asm/paravirt_types.h> #include <asm/pgtable.h> - -/* TDX module Call Leaf IDs */ -#define TDX_GET_INFO 1 -#define TDX_GET_VEINFO 3 -#define TDX_GET_REPORT 4 -#define TDX_ACCEPT_PAGE 6 - -/* TDX hypercall Leaf IDs */ -#define TDVMCALL_MAP_GPA 0x10001 +#include <asm/set_memory.h> +#include <asm/traps.h> /* MMIO direction */ #define EPT_READ 0 @@ -37,54 +33,27 @@ #define VE_GET_PORT_NUM(e) ((e) >> 16) #define VE_IS_IO_STRING(e) ((e) & BIT(4)) -#define ATTR_SEPT_VE_DISABLE BIT(28) - /* TDX Module call error codes */ #define TDCALL_RETURN_CODE(a) ((a) >> 32) #define TDCALL_INVALID_OPERAND 0xc0000100 +#define TDCALL_OPERAND_BUSY 0x80000200 #define TDREPORT_SUBTYPE_0 0 -/* - * Wrapper for standard use of __tdx_hypercall with no output aside from - * return code. - */ -static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15) -{ - struct tdx_hypercall_args args = { - .r10 = TDX_HYPERCALL_STANDARD, - .r11 = fn, - .r12 = r12, - .r13 = r13, - .r14 = r14, - .r15 = r15, - }; - - return __tdx_hypercall(&args, 0); -} +static atomic_long_t nr_shared; /* Called from __tdx_hypercall() for unrecoverable failure */ -void __tdx_hypercall_failed(void) +noinstr void __noreturn __tdx_hypercall_failed(void) { + instrumentation_begin(); panic("TDVMCALL failed. TDX module bug?"); } -/* - * The TDG.VP.VMCALL-Instruction-execution sub-functions are defined - * independently from but are currently matched 1:1 with VMX EXIT_REASONs. - * Reusing the KVM EXIT_REASON macros makes it easier to connect the host and - * guest sides of these calls. - */ -static u64 hcall_func(u64 exit_reason) -{ - return exit_reason; -} - #ifdef CONFIG_KVM_GUEST long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, unsigned long p3, unsigned long p4) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = nr, .r11 = p1, .r12 = p2, @@ -92,7 +61,7 @@ long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, .r14 = p4, }; - return __tdx_hypercall(&args, 0); + return __tdx_hypercall(&args); } EXPORT_SYMBOL_GPL(tdx_kvm_hypercall); #endif @@ -102,13 +71,38 @@ EXPORT_SYMBOL_GPL(tdx_kvm_hypercall); * should only be used for calls that have no legitimate reason to fail * or where the kernel can not survive the call failing. */ -static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9, - struct tdx_module_output *out) +static inline void tdcall(u64 fn, struct tdx_module_args *args) { - if (__tdx_module_call(fn, rcx, rdx, r8, r9, out)) + if (__tdcall_ret(fn, args)) panic("TDCALL %lld failed (Buggy TDX module!)\n", fn); } +/* Read TD-scoped metadata */ +static inline u64 tdg_vm_rd(u64 field, u64 *value) +{ + struct tdx_module_args args = { + .rdx = field, + }; + u64 ret; + + ret = __tdcall_ret(TDG_VM_RD, &args); + *value = args.r8; + + return ret; +} + +/* Write TD-scoped metadata */ +static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask) +{ + struct tdx_module_args args = { + .rdx = field, + .r8 = value, + .r9 = mask, + }; + + return __tdcall(TDG_VM_WR, &args); +} + /** * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT * subtype 0) using TDG.MR.REPORT TDCALL. @@ -116,23 +110,29 @@ static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9, * REPORTDATA to be included into TDREPORT. * @tdreport: Address of the output buffer to store TDREPORT. * - * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module - * v1.0 specification for more information on TDG.MR.REPORT TDCALL. + * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module v1.0 + * specification for more information on TDG.MR.REPORT TDCALL. + * * It is used in the TDX guest driver module to get the TDREPORT0. * - * Return 0 on success, -EINVAL for invalid operands, or -EIO on - * other TDCALL failures. + * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation, + * or -EIO on other TDCALL failures. */ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport) { + struct tdx_module_args args = { + .rcx = virt_to_phys(tdreport), + .rdx = virt_to_phys(reportdata), + .r8 = TDREPORT_SUBTYPE_0, + }; u64 ret; - ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport), - virt_to_phys(reportdata), TDREPORT_SUBTYPE_0, - 0, NULL); + ret = __tdcall(TDG_MR_REPORT, &args); if (ret) { if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND) - return -EINVAL; + return -ENXIO; + else if (TDCALL_RETURN_CODE(ret) == TDCALL_OPERAND_BUSY) + return -EBUSY; return -EIO; } @@ -140,9 +140,195 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport) } EXPORT_SYMBOL_GPL(tdx_mcall_get_report0); -static void tdx_parse_tdinfo(u64 *cc_mask) +/** + * tdx_mcall_extend_rtmr() - Wrapper to extend RTMR registers using + * TDG.MR.RTMR.EXTEND TDCALL. + * @index: Index of RTMR register to be extended. + * @data: Address of the input buffer with RTMR register extend data. + * + * Refer to section titled "TDG.MR.RTMR.EXTEND leaf" in the TDX Module v1.0 + * specification for more information on TDG.MR.RTMR.EXTEND TDCALL. + * + * It is used in the TDX guest driver module to allow user to extend the RTMR + * registers. + * + * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation, + * or -EIO on other TDCALL failures. + */ +int tdx_mcall_extend_rtmr(u8 index, u8 *data) +{ + struct tdx_module_args args = { + .rcx = virt_to_phys(data), + .rdx = index, + }; + u64 ret; + + ret = __tdcall(TDG_MR_RTMR_EXTEND, &args); + if (ret) { + if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND) + return -ENXIO; + if (TDCALL_RETURN_CODE(ret) == TDCALL_OPERAND_BUSY) + return -EBUSY; + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(tdx_mcall_extend_rtmr); + +/** + * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote + * hypercall. + * @buf: Address of the directly mapped shared kernel buffer which + * contains TDREPORT. The same buffer will be used by VMM to + * store the generated TD Quote output. + * @size: size of the tdquote buffer (4KB-aligned). + * + * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI + * v1.0 specification for more information on GetQuote hypercall. + * It is used in the TDX guest driver module to get the TD Quote. + * + * Return 0 on success or error code on failure. + */ +u64 tdx_hcall_get_quote(u8 *buf, size_t size) +{ + /* Since buf is a shared memory, set the shared (decrypted) bits */ + return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0); +} +EXPORT_SYMBOL_GPL(tdx_hcall_get_quote); + +static void __noreturn tdx_panic(const char *msg) { - struct tdx_module_output out; + struct tdx_module_args args = { + .r10 = TDX_HYPERCALL_STANDARD, + .r11 = TDVMCALL_REPORT_FATAL_ERROR, + .r12 = 0, /* Error code: 0 is Panic */ + }; + union { + /* Define register order according to the GHCI */ + struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; }; + + char bytes[64] __nonstring; + } message; + + /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */ + strtomem_pad(message.bytes, msg, '\0'); + + args.r8 = message.r8; + args.r9 = message.r9; + args.r14 = message.r14; + args.r15 = message.r15; + args.rdi = message.rdi; + args.rsi = message.rsi; + args.rbx = message.rbx; + args.rdx = message.rdx; + + /* + * This hypercall should never return and it is not safe + * to keep the guest running. Call it forever if it + * happens to return. + */ + while (1) + __tdx_hypercall(&args); +} + +/* + * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure + * that no #VE will be delivered for accesses to TD-private memory. + * + * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM + * controls if the guest will receive such #VE with TD attribute + * TDX_ATTR_SEPT_VE_DISABLE. + * + * Newer TDX modules allow the guest to control if it wants to receive SEPT + * violation #VEs. + * + * Check if the feature is available and disable SEPT #VE if possible. + * + * If the TD is allowed to disable/enable SEPT #VEs, the TDX_ATTR_SEPT_VE_DISABLE + * attribute is no longer reliable. It reflects the initial state of the + * control for the TD, but it will not be updated if someone (e.g. bootloader) + * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to + * determine if SEPT #VEs are enabled or disabled. + */ +static void disable_sept_ve(u64 td_attr) +{ + const char *msg = "TD misconfiguration: SEPT #VE has to be disabled"; + bool debug = td_attr & TDX_ATTR_DEBUG; + u64 config, controls; + + /* Is this TD allowed to disable SEPT #VE */ + tdg_vm_rd(TDCS_CONFIG_FLAGS, &config); + if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) { + /* No SEPT #VE controls for the guest: check the attribute */ + if (td_attr & TDX_ATTR_SEPT_VE_DISABLE) + return; + + /* Relax SEPT_VE_DISABLE check for debug TD for backtraces */ + if (debug) + pr_warn("%s\n", msg); + else + tdx_panic(msg); + return; + } + + /* Check if SEPT #VE has been disabled before us */ + tdg_vm_rd(TDCS_TD_CTLS, &controls); + if (controls & TD_CTLS_PENDING_VE_DISABLE) + return; + + /* Keep #VEs enabled for splats in debugging environments */ + if (debug) + return; + + /* Disable SEPT #VEs */ + tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE, + TD_CTLS_PENDING_VE_DISABLE); +} + +/* + * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and + * 0x1F) and the X2APIC_APICID MSR. The kernel returns all zeros on CPUID #VEs. + * In practice, this means that the kernel can only boot with a plain topology. + * Any complications will cause problems. + * + * The ENUM_TOPOLOGY feature allows the VMM to provide topology information. + * Enabling the feature eliminates topology-related #VEs: the TDX module + * virtualizes accesses to the CPUID leafs and the MSR. + * + * Enable ENUM_TOPOLOGY if it is available. + */ +static void enable_cpu_topology_enumeration(void) +{ + u64 configured; + + /* Has the VMM provided a valid topology configuration? */ + tdg_vm_rd(TDCS_TOPOLOGY_ENUM_CONFIGURED, &configured); + if (!configured) { + pr_err("VMM did not configure X2APIC_IDs properly\n"); + return; + } + + tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_ENUM_TOPOLOGY, TD_CTLS_ENUM_TOPOLOGY); +} + +static void reduce_unnecessary_ve(void) +{ + u64 err = tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_REDUCE_VE, TD_CTLS_REDUCE_VE); + + if (err == TDX_SUCCESS) + return; + + /* + * Enabling REDUCE_VE includes ENUM_TOPOLOGY. Only try to + * enable ENUM_TOPOLOGY if REDUCE_VE was not successful. + */ + enable_cpu_topology_enumeration(); +} + +static void tdx_setup(u64 *cc_mask) +{ + struct tdx_module_args args = {}; unsigned int gpa_width; u64 td_attr; @@ -153,7 +339,7 @@ static void tdx_parse_tdinfo(u64 *cc_mask) * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL * [TDG.VP.INFO]. */ - tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out); + tdcall(TDG_VP_INFO, &args); /* * The highest bit of a guest physical address is the "sharing" bit. @@ -162,17 +348,17 @@ static void tdx_parse_tdinfo(u64 *cc_mask) * The GPA width that comes out of this call is critical. TDX guests * can not meaningfully run without it. */ - gpa_width = out.rcx & GENMASK(5, 0); + gpa_width = args.rcx & GENMASK(5, 0); *cc_mask = BIT_ULL(gpa_width - 1); - /* - * The kernel can not handle #VE's when accessing normal kernel - * memory. Ensure that no #VE will be delivered for accesses to - * TD-private memory. Only VMM-shared memory (MMIO) will #VE. - */ - td_attr = out.rdx; - if (!(td_attr & ATTR_SEPT_VE_DISABLE)) - panic("TD misconfiguration: SEPT_VE_DISABLE attibute must be set.\n"); + td_attr = args.rdx; + + /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */ + tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL); + + disable_sept_ve(td_attr); + + reduce_unnecessary_ve(); } /* @@ -220,9 +406,9 @@ static int ve_instr_len(struct ve_info *ve) } } -static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) +static u64 __cpuidle __halt(const bool irq_disabled) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_HLT), .r12 = irq_disabled, @@ -240,45 +426,51 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) * can keep the vCPU in virtual HLT, even if an IRQ is * pending, without hanging/breaking the guest. */ - return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0); + return __tdx_hypercall(&args); } static int handle_halt(struct ve_info *ve) { + const bool irq_disabled = irqs_disabled(); + /* - * Since non safe halt is mainly used in CPU offlining - * and the guest will always stay in the halt state, don't - * call the STI instruction (set do_sti as false). + * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a + * wake event may be consumed before requesting HLT emulation, leaving + * the vCPU blocking indefinitely. */ - const bool irq_disabled = irqs_disabled(); - const bool do_sti = false; + if (WARN_ONCE(!irq_disabled, "HLT emulation with IRQs enabled")) + return -EIO; - if (__halt(irq_disabled, do_sti)) + if (__halt(irq_disabled)) return -EIO; return ve_instr_len(ve); } -void __cpuidle tdx_safe_halt(void) +void __cpuidle tdx_halt(void) { - /* - * For do_sti=true case, __tdx_hypercall() function enables - * interrupts using the STI instruction before the TDCALL. So - * set irq_disabled as false. - */ const bool irq_disabled = false; - const bool do_sti = true; /* * Use WARN_ONCE() to report the failure. */ - if (__halt(irq_disabled, do_sti)) + if (__halt(irq_disabled)) WARN_ONCE(1, "HLT instruction emulation failed\n"); } +static void __cpuidle tdx_safe_halt(void) +{ + tdx_halt(); + /* + * "__cpuidle" section doesn't support instrumentation, so stick + * with raw_* variant that avoids tracing hooks. + */ + raw_local_irq_enable(); +} + static int read_msr(struct pt_regs *regs, struct ve_info *ve) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_MSR_READ), .r12 = regs->cx, @@ -289,7 +481,7 @@ static int read_msr(struct pt_regs *regs, struct ve_info *ve) * can be found in TDX Guest-Host-Communication Interface * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>". */ - if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) + if (__tdx_hypercall(&args)) return -EIO; regs->ax = lower_32_bits(args.r11); @@ -299,7 +491,7 @@ static int read_msr(struct pt_regs *regs, struct ve_info *ve) static int write_msr(struct pt_regs *regs, struct ve_info *ve) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_MSR_WRITE), .r12 = regs->cx, @@ -311,7 +503,7 @@ static int write_msr(struct pt_regs *regs, struct ve_info *ve) * can be found in TDX Guest-Host-Communication Interface * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>". */ - if (__tdx_hypercall(&args, 0)) + if (__tdx_hypercall(&args)) return -EIO; return ve_instr_len(ve); @@ -319,7 +511,7 @@ static int write_msr(struct pt_regs *regs, struct ve_info *ve) static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_CPUID), .r12 = regs->ax, @@ -343,7 +535,7 @@ static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) * ABI can be found in TDX Guest-Host-Communication Interface * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>". */ - if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) + if (__tdx_hypercall(&args)) return -EIO; /* @@ -361,17 +553,17 @@ static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) static bool mmio_read(int size, unsigned long addr, unsigned long *val) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION), .r12 = size, .r13 = EPT_READ, .r14 = addr, - .r15 = *val, }; - if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) + if (__tdx_hypercall(&args)) return false; + *val = args.r11; return true; } @@ -411,6 +603,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) return -EINVAL; } + if (!fault_in_kernel_space(ve->gla)) { + WARN_ONCE(1, "Access to userspace address is not supported"); + return -EINVAL; + } + /* * Reject EPT violation #VEs that split pages. * @@ -489,7 +686,7 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) static bool handle_in(struct pt_regs *regs, int size, int port) { - struct tdx_hypercall_args args = { + struct tdx_module_args args = { .r10 = TDX_HYPERCALL_STANDARD, .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION), .r12 = size, @@ -504,7 +701,7 @@ static bool handle_in(struct pt_regs *regs, int size, int port) * in TDX Guest-Host-Communication Interface (GHCI) section titled * "TDG.VP.VMCALL<Instruction.IO>". */ - success = !__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT); + success = !__tdx_hypercall(&args); /* Update part of the register affected by the emulated instruction */ regs->ax &= ~mask; @@ -583,7 +780,7 @@ __init bool tdx_early_handle_ve(struct pt_regs *regs) void tdx_get_ve_info(struct ve_info *ve) { - struct tdx_module_output out; + struct tdx_module_args args = {}; /* * Called during #VE handling to retrieve the #VE info from the @@ -600,15 +797,15 @@ void tdx_get_ve_info(struct ve_info *ve) * Note, the TDX module treats virtual NMIs as inhibited if the #VE * valid flag is set. It means that NMI=>#VE will not result in a #DF. */ - tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out); + tdcall(TDG_VP_VEINFO_GET, &args); /* Transfer the output parameters */ - ve->exit_reason = out.rcx; - ve->exit_qual = out.rdx; - ve->gla = out.r8; - ve->gpa = out.r9; - ve->instr_len = lower_32_bits(out.r10); - ve->instr_info = upper_32_bits(out.r10); + ve->exit_reason = args.rcx; + ve->exit_qual = args.rdx; + ve->gla = args.r8; + ve->gpa = args.r9; + ve->instr_len = lower_32_bits(args.r10); + ve->instr_info = upper_32_bits(args.r10); } /* @@ -628,6 +825,11 @@ static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) } } +static inline bool is_private_gpa(u64 gpa) +{ + return gpa == cc_mkenc(gpa); +} + /* * Handle the kernel #VE. * @@ -646,6 +848,8 @@ static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) case EXIT_REASON_CPUID: return handle_cpuid(regs, ve); case EXIT_REASON_EPT_VIOLATION: + if (is_private_gpa(ve->gpa)) + panic("Unexpected EPT-violation on private memory."); return handle_mmio(regs, ve); case EXIT_REASON_IO_INSTRUCTION: return handle_io(regs, ve); @@ -701,45 +905,55 @@ static bool tdx_cache_flush_required(void) return true; } -static bool try_accept_one(phys_addr_t *start, unsigned long len, - enum pg_level pg_level) +/* + * Notify the VMM about page mapping conversion. More info about ABI + * can be found in TDX Guest-Host-Communication Interface (GHCI), + * section "TDG.VP.VMCALL<MapGPA>". + */ +static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc) { - unsigned long accept_size = page_level_size(pg_level); - u64 tdcall_rcx; - u8 page_size; + /* Retrying the hypercall a second time should succeed; use 3 just in case */ + const int max_retries_per_page = 3; + int retry_count = 0; - if (!IS_ALIGNED(*start, accept_size)) - return false; + if (!enc) { + /* Set the shared (decrypted) bits: */ + start |= cc_mkdec(0); + end |= cc_mkdec(0); + } - if (len < accept_size) - return false; + while (retry_count < max_retries_per_page) { + struct tdx_module_args args = { + .r10 = TDX_HYPERCALL_STANDARD, + .r11 = TDVMCALL_MAP_GPA, + .r12 = start, + .r13 = end - start }; - /* - * Pass the page physical address to the TDX module to accept the - * pending, private page. - * - * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G. - */ - switch (pg_level) { - case PG_LEVEL_4K: - page_size = 0; - break; - case PG_LEVEL_2M: - page_size = 1; - break; - case PG_LEVEL_1G: - page_size = 2; - break; - default: - return false; - } + u64 map_fail_paddr; + u64 ret = __tdx_hypercall(&args); - tdcall_rcx = *start | page_size; - if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL)) - return false; + if (ret != TDVMCALL_STATUS_RETRY) + return !ret; + /* + * The guest must retry the operation for the pages in the + * region starting at the GPA specified in R11. R11 comes + * from the untrusted VMM. Sanity check it. + */ + map_fail_paddr = args.r11; + if (map_fail_paddr < start || map_fail_paddr >= end) + return false; - *start += accept_size; - return true; + /* "Consume" a retry without forward progress */ + if (map_fail_paddr == start) { + retry_count++; + continue; + } + + start = map_fail_paddr; + retry_count = 0; + } + + return false; } /* @@ -752,48 +966,148 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) phys_addr_t start = __pa(vaddr); phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE); - if (!enc) { - /* Set the shared (decrypted) bits: */ - start |= cc_mkdec(0); - end |= cc_mkdec(0); - } + if (!tdx_map_gpa(start, end, enc)) + return false; + + /* shared->private conversion requires memory to be accepted before use */ + if (enc) + return tdx_accept_memory(start, end); + return true; +} + +static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, + bool enc) +{ /* - * Notify the VMM about page mapping conversion. More info about ABI - * can be found in TDX Guest-Host-Communication Interface (GHCI), - * section "TDG.VP.VMCALL<MapGPA>" + * Only handle shared->private conversion here. + * See the comment in tdx_early_init(). */ - if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0)) - return false; + if (enc && !tdx_enc_status_changed(vaddr, numpages, enc)) + return -EIO; - /* private->shared conversion requires only MapGPA call */ - if (!enc) - return true; + return 0; +} +static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages, + bool enc) +{ /* - * For shared->private conversion, accept the page using - * TDX_ACCEPT_PAGE TDX module call. + * Only handle private->shared conversion here. + * See the comment in tdx_early_init(). */ - while (start < end) { - unsigned long len = end - start; + if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc)) + return -EIO; - /* - * Try larger accepts first. It gives chance to VMM to keep - * 1G/2M SEPT entries where possible and speeds up process by - * cutting number of hypercalls (if successful). - */ + if (enc) + atomic_long_sub(numpages, &nr_shared); + else + atomic_long_add(numpages, &nr_shared); - if (try_accept_one(&start, len, PG_LEVEL_1G)) - continue; + return 0; +} - if (try_accept_one(&start, len, PG_LEVEL_2M)) - continue; +/* Stop new private<->shared conversions */ +static void tdx_kexec_begin(void) +{ + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; - if (!try_accept_one(&start, len, PG_LEVEL_4K)) - return false; + /* + * Crash kernel reaches here with interrupts disabled: can't wait for + * conversions to finish. + * + * If race happened, just report and proceed. + */ + if (!set_memory_enc_stop_conversion()) + pr_warn("Failed to stop shared<->private conversions\n"); +} + +/* Walk direct mapping and convert all shared memory back to private */ +static void tdx_kexec_finish(void) +{ + unsigned long addr, end; + long found = 0, shared; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + lockdep_assert_irqs_disabled(); + + addr = PAGE_OFFSET; + end = PAGE_OFFSET + get_max_mapped(); + + while (addr < end) { + unsigned long size; + unsigned int level; + pte_t *pte; + + pte = lookup_address(addr, &level); + size = page_level_size(level); + + if (pte && pte_decrypted(*pte)) { + int pages = size / PAGE_SIZE; + + /* + * Touching memory with shared bit set triggers implicit + * conversion to shared. + * + * Make sure nobody touches the shared range from + * now on. + */ + set_pte(pte, __pte(0)); + + /* + * Memory encryption state persists across kexec. + * If tdx_enc_status_changed() fails in the first + * kernel, it leaves memory in an unknown state. + * + * If that memory remains shared, accessing it in the + * *next* kernel through a private mapping will result + * in an unrecoverable guest shutdown. + * + * The kdump kernel boot is not impacted as it uses + * a pre-reserved memory range that is always private. + * However, gathering crash information could lead to + * a crash if it accesses unconverted memory through + * a private mapping which is possible when accessing + * that memory through /proc/vmcore, for example. + * + * In all cases, print error info in order to leave + * enough bread crumbs for debugging. + */ + if (!tdx_enc_status_changed(addr, pages, true)) { + pr_err("Failed to unshare range %#lx-%#lx\n", + addr, addr + size); + } + + found += pages; + } + + addr += size; } - return true; + __flush_tlb_all(); + + shared = atomic_long_read(&nr_shared); + if (shared != found) { + pr_err("shared page accounting is off\n"); + pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found); + } +} + +static __init void tdx_announce(void) +{ + struct tdx_module_args args = {}; + u64 controls; + + pr_info("Guest detected\n"); + + tdcall(TDG_VP_INFO, &args); + tdx_dump_attributes(args.rdx); + + tdg_vm_rd(TDCS_TD_CTLS, &controls); + tdx_dump_td_ctls(controls); } void __init tdx_early_init(void) @@ -808,8 +1122,14 @@ void __init tdx_early_init(void) setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); - cc_set_vendor(CC_VENDOR_INTEL); - tdx_parse_tdinfo(&cc_mask); + /* TSC is the only reliable clock in TDX guest */ + setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); + + cc_vendor = CC_VENDOR_INTEL; + + /* Configure the TD */ + tdx_setup(&cc_mask); + cc_set_mask(cc_mask); /* @@ -820,9 +1140,57 @@ void __init tdx_early_init(void) */ physical_mask &= cc_mask - 1; - x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; - x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; - x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed; + /* + * The kernel mapping should match the TDX metadata for the page. + * load_unaligned_zeropad() can touch memory *adjacent* to that which is + * owned by the caller and can catch even _momentary_ mismatches. Bad + * things happen on mismatch: + * + * - Private mapping => Shared Page == Guest shutdown + * - Shared mapping => Private Page == Recoverable #VE + * + * guest.enc_status_change_prepare() converts the page from + * shared=>private before the mapping becomes private. + * + * guest.enc_status_change_finish() converts the page from + * private=>shared after the mapping becomes private. + * + * In both cases there is a temporary shared mapping to a private page, + * which can result in a #VE. But, there is never a private mapping to + * a shared page. + */ + x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare; + x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish; + + x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; + x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; - pr_info("Guest detected\n"); + x86_platform.guest.enc_kexec_begin = tdx_kexec_begin; + x86_platform.guest.enc_kexec_finish = tdx_kexec_finish; + + /* + * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that + * will enable interrupts before HLT TDCALL invocation if executed + * in STI-shadow, possibly resulting in missed wakeup events. + * + * Modify all possible HLT execution paths to use TDX specific routines + * that directly execute TDCALL and toggle the interrupt state as + * needed after TDCALL completion. This also reduces HLT related #VEs + * in addition to having a reliable halt logic execution. + */ + pv_ops.irq.safe_halt = tdx_safe_halt; + pv_ops.irq.halt = tdx_halt; + + /* + * TDX intercepts the RDMSR to read the X2APIC ID in the parallel + * bringup low level code. That raises #VE which cannot be handled + * there. + * + * Intel-TDX has a secure RDMSR hypercall, but that needs to be + * implemented separately in the low level startup ASM code. + * Until that is in place, disable parallel bringup for TDX. + */ + x86_cpuinit.parallel_bringup = false; + + tdx_announce(); } |
