From 765a0542fdc7aad7cbc1da3bd19bed6297b54e2c Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:21 -0800 Subject: x86/virt/tdx: Detect TDX during kernel boot Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. A CPU-attested software module called 'the TDX module' runs inside a new isolated memory range as a trusted hypervisor to manage and run protected VMs. Pre-TDX Intel hardware has support for a memory encryption architecture called MKTME. The memory encryption hardware underpinning MKTME is also used for Intel TDX. TDX ends up "stealing" some of the physical address space from the MKTME architecture for crypto-protection to VMs. The BIOS is responsible for partitioning the "KeyID" space between legacy MKTME and TDX. The KeyIDs reserved for TDX are called 'TDX private KeyIDs' or 'TDX KeyIDs' for short. During machine boot, TDX microcode verifies that the BIOS programmed TDX private KeyIDs consistently and correctly programmed across all CPU packages. The MSRs are locked in this state after verification. This is why MSR_IA32_MKTME_KEYID_PARTITIONING gets used for TDX enumeration: it indicates not just that the hardware supports TDX, but that all the boot-time security checks passed. The TDX module is expected to be loaded by the BIOS when it enables TDX, but the kernel needs to properly initialize it before it can be used to create and run any TDX guests. The TDX module will be initialized by the KVM subsystem when KVM wants to use TDX. Detect platform TDX support by detecting TDX private KeyIDs. The TDX module itself requires one TDX KeyID as the 'TDX global KeyID' to protect its metadata. Each TDX guest also needs a TDX KeyID for its own protection. Just use the first TDX KeyID as the global KeyID and leave the rest for TDX guests. If no TDX KeyID is left for TDX guests, disable TDX as initializing the TDX module alone is useless. [ dhansen: add X86_FEATURE, replace helper function ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Isaku Yamahata Reviewed-by: David Hildenbrand Reviewed-by: Dave Hansen Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/all/20231208170740.53979-1-dave.hansen%40intel.com --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 3 ++ arch/x86/include/asm/tdx.h | 3 ++ arch/x86/kernel/cpu/common.c | 2 + arch/x86/virt/vmx/tdx/Makefile | 2 +- arch/x86/virt/vmx/tdx/tdx.c | 81 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 arch/x86/virt/vmx/tdx/tdx.c (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4af140cf5719..45ddc6b6baaa 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -198,6 +198,7 @@ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1d51e1850ed0..66c12d4efa31 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -536,6 +536,9 @@ #define MSR_RELOAD_PMC0 0x000014c1 #define MSR_RELOAD_FIXED_CTR0 0x00001309 +/* KeyID partitioning between MKTME and TDX */ +#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087 + /* * AMD64 MSRs. Not complete. See the architecture manual for a more * complete list. diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index f3d5305a60fc..e5dd1cb0e1a1 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -83,6 +83,9 @@ static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, u64 __seamcall(u64 fn, struct tdx_module_args *args); u64 __seamcall_ret(u64 fn, struct tdx_module_args *args); u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args); +void tdx_init(void); +#else +static inline void tdx_init(void) { } #endif /* CONFIG_INTEL_TDX_HOST */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b14fc8c1c953..b968a2496e80 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -66,6 +66,7 @@ #include #include #include +#include #include "cpu.h" @@ -1987,6 +1988,7 @@ static __init void identify_boot_cpu(void) setup_cr_pinning(); tsx_init(); + tdx_init(); lkgs_init(); } diff --git a/arch/x86/virt/vmx/tdx/Makefile b/arch/x86/virt/vmx/tdx/Makefile index 46ef8f73aebb..90da47eb85ee 100644 --- a/arch/x86/virt/vmx/tdx/Makefile +++ b/arch/x86/virt/vmx/tdx/Makefile @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += seamcall.o +obj-y += seamcall.o tdx.o diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c new file mode 100644 index 000000000000..94689aef44a6 --- /dev/null +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2023 Intel Corporation. + * + * Intel Trusted Domain Extensions (TDX) support + */ + +#define pr_fmt(fmt) "virt/tdx: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static u32 tdx_global_keyid __ro_after_init; +static u32 tdx_guest_keyid_start __ro_after_init; +static u32 tdx_nr_guest_keyids __ro_after_init; + +static __init int record_keyid_partitioning(u32 *tdx_keyid_start, + u32 *nr_tdx_keyids) +{ + u32 _nr_mktme_keyids, _tdx_keyid_start, _nr_tdx_keyids; + int ret; + + /* + * IA32_MKTME_KEYID_PARTIONING: + * Bit [31:0]: Number of MKTME KeyIDs. + * Bit [63:32]: Number of TDX private KeyIDs. + */ + ret = rdmsr_safe(MSR_IA32_MKTME_KEYID_PARTITIONING, &_nr_mktme_keyids, + &_nr_tdx_keyids); + if (ret || !_nr_tdx_keyids) + return -EINVAL; + + /* TDX KeyIDs start after the last MKTME KeyID. */ + _tdx_keyid_start = _nr_mktme_keyids + 1; + + *tdx_keyid_start = _tdx_keyid_start; + *nr_tdx_keyids = _nr_tdx_keyids; + + return 0; +} + +void __init tdx_init(void) +{ + u32 tdx_keyid_start, nr_tdx_keyids; + int err; + + err = record_keyid_partitioning(&tdx_keyid_start, &nr_tdx_keyids); + if (err) + return; + + pr_info("BIOS enabled: private KeyID range [%u, %u)\n", + tdx_keyid_start, tdx_keyid_start + nr_tdx_keyids); + + /* + * The TDX module itself requires one 'global KeyID' to protect + * its metadata. If there's only one TDX KeyID, there won't be + * any left for TDX guests thus there's no point to enable TDX + * at all. + */ + if (nr_tdx_keyids < 2) { + pr_err("initialization failed: too few private KeyIDs available.\n"); + return; + } + + /* + * Just use the first TDX KeyID as the 'global KeyID' and + * leave the rest for TDX guests. + */ + tdx_global_keyid = tdx_keyid_start; + tdx_guest_keyid_start = tdx_keyid_start + 1; + tdx_nr_guest_keyids = nr_tdx_keyids - 1; + + setup_force_cpu_cap(X86_FEATURE_TDX_HOST_PLATFORM); +} -- cgit From d623704bb23901a25bf6d6a40aa16b43a17622eb Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:22 -0800 Subject: x86/virt/tdx: Define TDX supported page sizes as macros TDX supports 4K, 2M and 1G page sizes. The corresponding values are defined by the TDX module spec and used as TDX module ABI. Currently, they are used in try_accept_one() when the TDX guest tries to accept a page. However currently try_accept_one() uses hard-coded magic values. Define TDX supported page sizes as macros and get rid of the hard-coded values in try_accept_one(). TDX host support will need to use them too. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Dave Hansen Reviewed-by: David Hildenbrand Link: https://lore.kernel.org/all/20231208170740.53979-2-dave.hansen%40intel.com --- arch/x86/coco/tdx/tdx-shared.c | 6 +++--- arch/x86/include/asm/shared/tdx.h | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/coco/tdx/tdx-shared.c b/arch/x86/coco/tdx/tdx-shared.c index 78e413269791..1655aa56a0a5 100644 --- a/arch/x86/coco/tdx/tdx-shared.c +++ b/arch/x86/coco/tdx/tdx-shared.c @@ -22,13 +22,13 @@ static unsigned long try_accept_one(phys_addr_t start, unsigned long len, */ switch (pg_level) { case PG_LEVEL_4K: - page_size = 0; + page_size = TDX_PS_4K; break; case PG_LEVEL_2M: - page_size = 1; + page_size = TDX_PS_2M; break; case PG_LEVEL_1G: - page_size = 2; + page_size = TDX_PS_1G; break; default: return 0; diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h index ccce7ebd8677..a4036149c484 100644 --- a/arch/x86/include/asm/shared/tdx.h +++ b/arch/x86/include/asm/shared/tdx.h @@ -55,6 +55,11 @@ (TDX_RDX | TDX_RBX | TDX_RSI | TDX_RDI | TDX_R8 | TDX_R9 | \ TDX_R10 | TDX_R11 | TDX_R12 | TDX_R13 | TDX_R14 | TDX_R15) +/* TDX supported page sizes from the TDX module ABI. */ +#define TDX_PS_4K 0 +#define TDX_PS_2M 1 +#define TDX_PS_1G 2 + #ifndef __ASSEMBLY__ #include -- cgit From 3115cabd935ade76fbe61154d1e405158e548272 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:23 -0800 Subject: x86/virt/tdx: Make INTEL_TDX_HOST depend on X86_X2APIC TDX capable platforms are locked to X2APIC mode and cannot fall back to the legacy xAPIC mode when TDX is enabled by the BIOS. TDX host support requires x2APIC. Make INTEL_TDX_HOST depend on X86_X2APIC. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Dave Hansen Reviewed-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Link: https://lore.kernel.org/lkml/ba80b303-31bf-d44a-b05d-5c0f83038798@intel.com/ Link: https://lore.kernel.org/all/20231208170740.53979-3-dave.hansen%40intel.com --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3762f41bb092..eb6e63956d51 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1970,6 +1970,7 @@ config INTEL_TDX_HOST depends on CPU_SUP_INTEL depends on X86_64 depends on KVM_INTEL + depends on X86_X2APIC help Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. This option enables necessary TDX -- cgit From 1e66a7e275393055d98d2306771fe1feadeb1cd6 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:24 -0800 Subject: x86/virt/tdx: Handle SEAMCALL no entropy error in common code Some SEAMCALLs use the RDRAND hardware and can fail for the same reasons as RDRAND. Use the kernel RDRAND retry logic for them. There are three __seamcall*() variants. Do the SEAMCALL retry in common code and add a wrapper for each of them. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-4-dave.hansen%40intel.com --- arch/x86/include/asm/tdx.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index e5dd1cb0e1a1..24c0357081bd 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -24,6 +24,11 @@ #define TDX_SEAMCALL_GP (TDX_SW_ERROR | X86_TRAP_GP) #define TDX_SEAMCALL_UD (TDX_SW_ERROR | X86_TRAP_UD) +/* + * TDX module SEAMCALL leaf function error codes + */ +#define TDX_RND_NO_ENTROPY 0x8000020300000000ULL + #ifndef __ASSEMBLY__ /* @@ -84,6 +89,27 @@ u64 __seamcall(u64 fn, struct tdx_module_args *args); u64 __seamcall_ret(u64 fn, struct tdx_module_args *args); u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args); void tdx_init(void); + +#include + +typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args); + +static inline u64 sc_retry(sc_func_t func, u64 fn, + struct tdx_module_args *args) +{ + int retry = RDRAND_RETRY_LOOPS; + u64 ret; + + do { + ret = func(fn, args); + } while (ret == TDX_RND_NO_ENTROPY && --retry); + + return ret; +} + +#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args)) +#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args)) +#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) #else static inline void tdx_init(void) { } #endif /* CONFIG_INTEL_TDX_HOST */ -- cgit From df01f5ae07dd9ca0c755943a5cfcc4e78e480f99 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:25 -0800 Subject: x86/virt/tdx: Add SEAMCALL error printing for module initialization The SEAMCALLs involved during the TDX module initialization are not expected to fail. In fact, they are not expected to return any non-zero code (except the "running out of entropy error", which can be handled internally already). Add yet another set of SEAMCALL wrappers, which treats all non-zero return code as error, to support printing SEAMCALL error upon failure for module initialization. Note the TDX module initialization doesn't use the _saved_ret() variant thus no wrapper is added for it. SEAMCALL assembly can also return kernel-defined error codes for three special cases: 1) TDX isn't enabled by the BIOS; 2) TDX module isn't loaded; 3) CPU isn't in VMX operation. Whether they can legally happen depends on the caller, so leave to the caller to print error message when desired. Also convert the SEAMCALL error codes to the kernel error codes in the new wrappers so that each SEAMCALL caller doesn't have to repeat the conversion. [ dhansen: Align the register dump with show_regs(). Zero-pad the contents, split on two lines and use consistent spacing. ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-5-dave.hansen%40intel.com --- arch/x86/include/asm/tdx.h | 1 + arch/x86/virt/vmx/tdx/tdx.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index 24c0357081bd..2c0f4162fe99 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -27,6 +27,7 @@ /* * TDX module SEAMCALL leaf function error codes */ +#define TDX_SUCCESS 0ULL #define TDX_RND_NO_ENTROPY 0x8000020300000000ULL #ifndef __ASSEMBLY__ diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 94689aef44a6..262b9b64a50b 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -21,6 +21,50 @@ static u32 tdx_global_keyid __ro_after_init; static u32 tdx_guest_keyid_start __ro_after_init; static u32 tdx_nr_guest_keyids __ro_after_init; +typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args); + +static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args) +{ + pr_err("SEAMCALL (0x%016llx) failed: 0x%016llx\n", fn, err); +} + +static inline void seamcall_err_ret(u64 fn, u64 err, + struct tdx_module_args *args) +{ + seamcall_err(fn, err, args); + pr_err("RCX 0x%016llx RDX 0x%016llx R08 0x%016llx\n", + args->rcx, args->rdx, args->r8); + pr_err("R09 0x%016llx R10 0x%016llx R11 0x%016llx\n", + args->r9, args->r10, args->r11); +} + +static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func, + u64 fn, struct tdx_module_args *args) +{ + u64 sret = sc_retry(func, fn, args); + + if (sret == TDX_SUCCESS) + return 0; + + if (sret == TDX_SEAMCALL_VMFAILINVALID) + return -ENODEV; + + if (sret == TDX_SEAMCALL_GP) + return -EOPNOTSUPP; + + if (sret == TDX_SEAMCALL_UD) + return -EACCES; + + err_func(fn, sret, args); + return -EIO; +} + +#define seamcall_prerr(__fn, __args) \ + sc_retry_prerr(__seamcall, seamcall_err, (__fn), (__args)) + +#define seamcall_prerr_ret(__fn, __args) \ + sc_retry_prerr(__seamcall_ret, seamcall_err_ret, (__fn), (__args)) + static __init int record_keyid_partitioning(u32 *tdx_keyid_start, u32 *nr_tdx_keyids) { -- cgit From 6162b310bc219d18bac970dbd441d7743097d1b9 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:26 -0800 Subject: x86/virt/tdx: Add skeleton to enable TDX on demand There are essentially two steps to get the TDX module ready: 1) Get each CPU ready to run TDX 2) Set up the shared TDX module data structures Introduce and export (to KVM) the infrastructure to do both of these pieces at runtime. == Per-CPU TDX Initialization == Track the initialization status of each CPU with a per-cpu variable. This avoids failures in the case of KVM module reloads and handles cases where CPUs come online later. Generally, the per-cpu SEAMCALLs happen first. But there's actually one global call that has to happen before _any_ others (TDH_SYS_INIT). It's analogous to the boot CPU having to do a bit of extra work just because it happens to be the first one. Track if _any_ CPU has done this call and then only actually do it during the first per-cpu init. == Shared TDX Initialization == Create the global state function (tdx_enable()) as a simple placeholder. The TODO list will be pared down as functionality is added. Use a state machine protected by mutex to make sure the work in tdx_enable() will only be done once. This avoids failures if the KVM module is reloaded. A CPU must be made ready to run TDX before it can participate in initializing the shared parts of the module. Any caller of tdx_enable() need to ensure that it can never run on a CPU which is not ready to run TDX. It needs to be wary of CPU hotplug, preemption and the VMX enabling state of any CPU on which it might run. == Why runtime instead of boot time? == The TDX module can be initialized only once in its lifetime. Instead of always initializing it at boot time, this implementation chooses an "on demand" approach to initialize TDX until there is a real need (e.g when requested by KVM). This approach has below pros: 1) It avoids consuming the memory that must be allocated by kernel and given to the TDX module as metadata (~1/256th of the TDX-usable memory), and also saves the CPU cycles of initializing the TDX module (and the metadata) when TDX is not used at all. 2) The TDX module design allows it to be updated while the system is running. The update procedure shares quite a few steps with this "on demand" initialization mechanism. The hope is that much of "on demand" mechanism can be shared with a future "update" mechanism. A boot-time TDX module implementation would not be able to share much code with the update mechanism. 3) Making SEAMCALL requires VMX to be enabled. Currently, only the KVM code mucks with VMX enabling. If the TDX module were to be initialized separately from KVM (like at boot), the boot code would need to be taught how to muck with VMX enabling and KVM would need to be taught how to cope with that. Making KVM itself responsible for TDX initialization lets the rest of the kernel stay blissfully unaware of VMX. [ dhansen: completely reorder/rewrite changelog ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Nikolay Borisov Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-6-dave.hansen%40intel.com --- arch/x86/include/asm/tdx.h | 4 ++ arch/x86/virt/vmx/tdx/tdx.c | 167 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/virt/vmx/tdx/tdx.h | 30 ++++++++ 3 files changed, 201 insertions(+) create mode 100644 arch/x86/virt/vmx/tdx/tdx.h (limited to 'arch') diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index 2c0f4162fe99..c54948e1999c 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -111,8 +111,12 @@ static inline u64 sc_retry(sc_func_t func, u64 fn, #define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args)) #define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args)) #define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) +int tdx_cpu_enable(void); +int tdx_enable(void); #else static inline void tdx_init(void) { } +static inline int tdx_cpu_enable(void) { return -ENODEV; } +static inline int tdx_enable(void) { return -ENODEV; } #endif /* CONFIG_INTEL_TDX_HOST */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 262b9b64a50b..ecb0df8ff8b8 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -12,15 +12,25 @@ #include #include #include +#include +#include +#include +#include #include #include #include #include +#include "tdx.h" static u32 tdx_global_keyid __ro_after_init; static u32 tdx_guest_keyid_start __ro_after_init; static u32 tdx_nr_guest_keyids __ro_after_init; +static DEFINE_PER_CPU(bool, tdx_lp_initialized); + +static enum tdx_module_status_t tdx_module_status; +static DEFINE_MUTEX(tdx_module_lock); + typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args); static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args) @@ -65,6 +75,163 @@ static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func, #define seamcall_prerr_ret(__fn, __args) \ sc_retry_prerr(__seamcall_ret, seamcall_err_ret, (__fn), (__args)) +/* + * Do the module global initialization once and return its result. + * It can be done on any cpu. It's always called with interrupts + * disabled. + */ +static int try_init_module_global(void) +{ + struct tdx_module_args args = {}; + static DEFINE_RAW_SPINLOCK(sysinit_lock); + static bool sysinit_done; + static int sysinit_ret; + + lockdep_assert_irqs_disabled(); + + raw_spin_lock(&sysinit_lock); + + if (sysinit_done) + goto out; + + /* RCX is module attributes and all bits are reserved */ + args.rcx = 0; + sysinit_ret = seamcall_prerr(TDH_SYS_INIT, &args); + + /* + * The first SEAMCALL also detects the TDX module, thus + * it can fail due to the TDX module is not loaded. + * Dump message to let the user know. + */ + if (sysinit_ret == -ENODEV) + pr_err("module not loaded\n"); + + sysinit_done = true; +out: + raw_spin_unlock(&sysinit_lock); + return sysinit_ret; +} + +/** + * tdx_cpu_enable - Enable TDX on local cpu + * + * Do one-time TDX module per-cpu initialization SEAMCALL (and TDX module + * global initialization SEAMCALL if not done) on local cpu to make this + * cpu be ready to run any other SEAMCALLs. + * + * Always call this function via IPI function calls. + * + * Return 0 on success, otherwise errors. + */ +int tdx_cpu_enable(void) +{ + struct tdx_module_args args = {}; + int ret; + + if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM)) + return -ENODEV; + + lockdep_assert_irqs_disabled(); + + if (__this_cpu_read(tdx_lp_initialized)) + return 0; + + /* + * The TDX module global initialization is the very first step + * to enable TDX. Need to do it first (if hasn't been done) + * before the per-cpu initialization. + */ + ret = try_init_module_global(); + if (ret) + return ret; + + ret = seamcall_prerr(TDH_SYS_LP_INIT, &args); + if (ret) + return ret; + + __this_cpu_write(tdx_lp_initialized, true); + + return 0; +} +EXPORT_SYMBOL_GPL(tdx_cpu_enable); + +static int init_tdx_module(void) +{ + /* + * TODO: + * + * - Build the list of TDX-usable memory regions. + * - Get TDX module "TD Memory Region" (TDMR) global metadata. + * - Construct a list of TDMRs to cover all TDX-usable memory + * regions. + * - Configure the TDMRs and the global KeyID to the TDX module. + * - Configure the global KeyID on all packages. + * - Initialize all TDMRs. + * + * Return error before all steps are done. + */ + return -EINVAL; +} + +static int __tdx_enable(void) +{ + int ret; + + ret = init_tdx_module(); + if (ret) { + pr_err("module initialization failed (%d)\n", ret); + tdx_module_status = TDX_MODULE_ERROR; + return ret; + } + + pr_info("module initialized\n"); + tdx_module_status = TDX_MODULE_INITIALIZED; + + return 0; +} + +/** + * tdx_enable - Enable TDX module to make it ready to run TDX guests + * + * This function assumes the caller has: 1) held read lock of CPU hotplug + * lock to prevent any new cpu from becoming online; 2) done both VMXON + * and tdx_cpu_enable() on all online cpus. + * + * This function can be called in parallel by multiple callers. + * + * Return 0 if TDX is enabled successfully, otherwise error. + */ +int tdx_enable(void) +{ + int ret; + + if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM)) + return -ENODEV; + + lockdep_assert_cpus_held(); + + mutex_lock(&tdx_module_lock); + + switch (tdx_module_status) { + case TDX_MODULE_UNINITIALIZED: + ret = __tdx_enable(); + break; + case TDX_MODULE_INITIALIZED: + /* Already initialized, great, tell the caller. */ + ret = 0; + break; + default: + /* Failed to initialize in the previous attempts */ + ret = -EINVAL; + break; + } + + mutex_unlock(&tdx_module_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(tdx_enable); + static __init int record_keyid_partitioning(u32 *tdx_keyid_start, u32 *nr_tdx_keyids) { diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h new file mode 100644 index 000000000000..a3c52270df5b --- /dev/null +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X86_VIRT_TDX_H +#define _X86_VIRT_TDX_H + +/* + * This file contains both macros and data structures defined by the TDX + * architecture and Linux defined software data structures and functions. + * The two should not be mixed together for better readability. The + * architectural definitions come first. + */ + +/* + * TDX module SEAMCALL leaf functions + */ +#define TDH_SYS_INIT 33 +#define TDH_SYS_LP_INIT 35 + +/* + * Do not put any hardware-defined TDX structure representations below + * this comment! + */ + +/* Kernel defined TDX module status during module initialization. */ +enum tdx_module_status_t { + TDX_MODULE_UNINITIALIZED, + TDX_MODULE_INITIALIZED, + TDX_MODULE_ERROR +}; + +#endif -- cgit From abe8dbab8f9f8370c26e7b79b49ed795c1b6b70f Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:27 -0800 Subject: x86/virt/tdx: Use all system memory when initializing TDX module as TDX memory Start to transit out the "multi-steps" to initialize the TDX module. TDX provides increased levels of memory confidentiality and integrity. This requires special hardware support for features like memory encryption and storage of memory integrity checksums. Not all memory satisfies these requirements. As a result, TDX introduced the concept of a "Convertible Memory Region" (CMR). During boot, the firmware builds a list of all of the memory ranges which can provide the TDX security guarantees. The list of these ranges is available to the kernel by querying the TDX module. CMRs tell the kernel which memory is TDX compatible. The kernel needs to build a list of memory regions (out of CMRs) as "TDX-usable" memory and pass them to the TDX module. Once this is done, those "TDX-usable" memory regions are fixed during module's lifetime. To keep things simple, assume that all TDX-protected memory will come from the page allocator. Make sure all pages in the page allocator *are* TDX-usable memory. As TDX-usable memory is a fixed configuration, take a snapshot of the memory configuration from memblocks at the time of module initialization (memblocks are modified on memory hotplug). This snapshot is used to enable TDX support for *this* memory configuration only. Use a memory hotplug notifier to ensure that no other RAM can be added outside of this configuration. This approach requires all memblock memory regions at the time of module initialization to be TDX convertible memory to work, otherwise module initialization will fail in a later SEAMCALL when passing those regions to the module. This approach works when all boot-time "system RAM" is TDX convertible memory and no non-TDX-convertible memory is hot-added to the core-mm before module initialization. For instance, on the first generation of TDX machines, both CXL memory and NVDIMM are not TDX convertible memory. Using kmem driver to hot-add any CXL memory or NVDIMM to the core-mm before module initialization will result in failure to initialize the module. The SEAMCALL error code will be available in the dmesg to help user to understand the failure. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: "Huang, Ying" Reviewed-by: Isaku Yamahata Reviewed-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Link: https://lore.kernel.org/all/20231208170740.53979-7-dave.hansen%40intel.com --- arch/x86/Kconfig | 1 + arch/x86/kernel/setup.c | 2 + arch/x86/virt/vmx/tdx/tdx.c | 167 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/virt/vmx/tdx/tdx.h | 6 ++ 4 files changed, 174 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index eb6e63956d51..2c69ef844805 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1971,6 +1971,7 @@ config INTEL_TDX_HOST depends on X86_64 depends on KVM_INTEL depends on X86_X2APIC + select ARCH_KEEP_MEMBLOCK help Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. This option enables necessary TDX diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1526747bedf2..9597c002b3c4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1033,6 +1033,8 @@ void __init setup_arch(char **cmdline_p) * * Moreover, on machines with SandyBridge graphics or in setups that use * crashkernel the entire 1M is reserved anyway. + * + * Note the host kernel TDX also requires the first 1MB being reserved. */ x86_platform.realmode_reserve(); diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index ecb0df8ff8b8..6a3585bc8aa4 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -16,6 +16,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include #include @@ -31,6 +37,9 @@ static DEFINE_PER_CPU(bool, tdx_lp_initialized); static enum tdx_module_status_t tdx_module_status; static DEFINE_MUTEX(tdx_module_lock); +/* All TDX-usable memory regions. Protected by mem_hotplug_lock. */ +static LIST_HEAD(tdx_memlist); + typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args); static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args) @@ -155,12 +164,102 @@ int tdx_cpu_enable(void) } EXPORT_SYMBOL_GPL(tdx_cpu_enable); +/* + * Add a memory region as a TDX memory block. The caller must make sure + * all memory regions are added in address ascending order and don't + * overlap. + */ +static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn, + unsigned long end_pfn) +{ + struct tdx_memblock *tmb; + + tmb = kmalloc(sizeof(*tmb), GFP_KERNEL); + if (!tmb) + return -ENOMEM; + + INIT_LIST_HEAD(&tmb->list); + tmb->start_pfn = start_pfn; + tmb->end_pfn = end_pfn; + + /* @tmb_list is protected by mem_hotplug_lock */ + list_add_tail(&tmb->list, tmb_list); + return 0; +} + +static void free_tdx_memlist(struct list_head *tmb_list) +{ + /* @tmb_list is protected by mem_hotplug_lock */ + while (!list_empty(tmb_list)) { + struct tdx_memblock *tmb = list_first_entry(tmb_list, + struct tdx_memblock, list); + + list_del(&tmb->list); + kfree(tmb); + } +} + +/* + * Ensure that all memblock memory regions are convertible to TDX + * memory. Once this has been established, stash the memblock + * ranges off in a secondary structure because memblock is modified + * in memory hotplug while TDX memory regions are fixed. + */ +static int build_tdx_memlist(struct list_head *tmb_list) +{ + unsigned long start_pfn, end_pfn; + int i, ret; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { + /* + * The first 1MB is not reported as TDX convertible memory. + * Although the first 1MB is always reserved and won't end up + * to the page allocator, it is still in memblock's memory + * regions. Skip them manually to exclude them as TDX memory. + */ + start_pfn = max(start_pfn, PHYS_PFN(SZ_1M)); + if (start_pfn >= end_pfn) + continue; + + /* + * Add the memory regions as TDX memory. The regions in + * memblock has already guaranteed they are in address + * ascending order and don't overlap. + */ + ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn); + if (ret) + goto err; + } + + return 0; +err: + free_tdx_memlist(tmb_list); + return ret; +} + static int init_tdx_module(void) { + int ret; + + /* + * To keep things simple, assume that all TDX-protected memory + * will come from the page allocator. Make sure all pages in the + * page allocator are TDX-usable memory. + * + * Build the list of "TDX-usable" memory regions which cover all + * pages in the page allocator to guarantee that. Do it while + * holding mem_hotplug_lock read-lock as the memory hotplug code + * path reads the @tdx_memlist to reject any new memory. + */ + get_online_mems(); + + ret = build_tdx_memlist(&tdx_memlist); + if (ret) + goto out_put_tdxmem; + /* * TODO: * - * - Build the list of TDX-usable memory regions. * - Get TDX module "TD Memory Region" (TDMR) global metadata. * - Construct a list of TDMRs to cover all TDX-usable memory * regions. @@ -170,7 +269,14 @@ static int init_tdx_module(void) * * Return error before all steps are done. */ - return -EINVAL; + ret = -EINVAL; +out_put_tdxmem: + /* + * @tdx_memlist is written here and read at memory hotplug time. + * Lock out memory hotplug code while building it. + */ + put_online_mems(); + return ret; } static int __tdx_enable(void) @@ -257,6 +363,56 @@ static __init int record_keyid_partitioning(u32 *tdx_keyid_start, return 0; } +static bool is_tdx_memory(unsigned long start_pfn, unsigned long end_pfn) +{ + struct tdx_memblock *tmb; + + /* + * This check assumes that the start_pfn<->end_pfn range does not + * cross multiple @tdx_memlist entries. A single memory online + * event across multiple memblocks (from which @tdx_memlist + * entries are derived at the time of module initialization) is + * not possible. This is because memory offline/online is done + * on granularity of 'struct memory_block', and the hotpluggable + * memory region (one memblock) must be multiple of memory_block. + */ + list_for_each_entry(tmb, &tdx_memlist, list) { + if (start_pfn >= tmb->start_pfn && end_pfn <= tmb->end_pfn) + return true; + } + return false; +} + +static int tdx_memory_notifier(struct notifier_block *nb, unsigned long action, + void *v) +{ + struct memory_notify *mn = v; + + if (action != MEM_GOING_ONLINE) + return NOTIFY_OK; + + /* + * Empty list means TDX isn't enabled. Allow any memory + * to go online. + */ + if (list_empty(&tdx_memlist)) + return NOTIFY_OK; + + /* + * The TDX memory configuration is static and can not be + * changed. Reject onlining any memory which is outside of + * the static configuration whether it supports TDX or not. + */ + if (is_tdx_memory(mn->start_pfn, mn->start_pfn + mn->nr_pages)) + return NOTIFY_OK; + + return NOTIFY_BAD; +} + +static struct notifier_block tdx_memory_nb = { + .notifier_call = tdx_memory_notifier, +}; + void __init tdx_init(void) { u32 tdx_keyid_start, nr_tdx_keyids; @@ -280,6 +436,13 @@ void __init tdx_init(void) return; } + err = register_memory_notifier(&tdx_memory_nb); + if (err) { + pr_err("initialization failed: register_memory_notifier() failed (%d)\n", + err); + return; + } + /* * Just use the first TDX KeyID as the 'global KeyID' and * leave the rest for TDX guests. diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index a3c52270df5b..c11e0a7ca664 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -27,4 +27,10 @@ enum tdx_module_status_t { TDX_MODULE_ERROR }; +struct tdx_memblock { + struct list_head list; + unsigned long start_pfn; + unsigned long end_pfn; +}; + #endif -- cgit From cf72bc481634b7c4cd780b6338f222e2892b0232 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:28 -0800 Subject: x86/virt/tdx: Get module global metadata for module initialization The TDX module global metadata provides system-wide information about the module. TL;DR: Use the TDH.SYS.RD SEAMCALL to tell if the module is good or not. Long Version: 1) Only initialize TDX module with version 1.5 and later TDX module 1.0 has some compatibility issues with the later versions of module, as documented in the "Intel TDX module ABI incompatibilities between TDX1.0 and TDX1.5" spec. Don't bother with module versions that do not have a stable ABI. 2) Get the essential global metadata for module initialization TDX reports a list of "Convertible Memory Region" (CMR) to tell the kernel which memory is TDX compatible. The kernel needs to build a list of memory regions (out of CMRs) as "TDX-usable" memory and pass them to the TDX module. The kernel does this by constructing a list of "TD Memory Regions" (TDMRs) to cover all these memory regions and passing them to the TDX module. Each TDMR is a TDX architectural data structure containing the memory region that the TDMR covers, plus the information to track (within this TDMR): a) the "Physical Address Metadata Table" (PAMT) to track each TDX memory page's status (such as which TDX guest "owns" a given page, and b) the "reserved areas" to tell memory holes that cannot be used as TDX memory. The kernel needs to get below metadata from the TDX module to build the list of TDMRs: a) the maximum number of supported TDMRs b) the maximum number of supported reserved areas per TDMR and, c) the PAMT entry size for each TDX-supported page size. == Implementation == The TDX module has two modes of fetching the metadata: a one field at a time, or all in one blob. Use the field at a time for now. It is slower, but there just are not enough fields now to justify the complexity of extra unpacking. The err_free_tdxmem=>out_put_tdxmem goto looks wonky by itself. But it is the first of a bunch of error handling that will get stuck at its site. [ dhansen: clean up changelog and add a struct to map between the TDX module fields and 'struct tdx_tdmr_sysinfo' ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-8-dave.hansen%40intel.com --- arch/x86/include/asm/shared/tdx.h | 1 + arch/x86/virt/vmx/tdx/tdx.c | 88 ++++++++++++++++++++++++++++++++++++++- arch/x86/virt/vmx/tdx/tdx.h | 39 +++++++++++++++++ 3 files changed, 127 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h index a4036149c484..fdfd41511b02 100644 --- a/arch/x86/include/asm/shared/tdx.h +++ b/arch/x86/include/asm/shared/tdx.h @@ -59,6 +59,7 @@ #define TDX_PS_4K 0 #define TDX_PS_2M 1 #define TDX_PS_1G 2 +#define TDX_PS_NR (TDX_PS_1G + 1) #ifndef __ASSEMBLY__ diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 6a3585bc8aa4..e76ad7c2e53b 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -237,8 +237,85 @@ err: return ret; } +static int read_sys_metadata_field(u64 field_id, u64 *data) +{ + struct tdx_module_args args = {}; + int ret; + + /* + * TDH.SYS.RD -- reads one global metadata field + * - RDX (in): the field to read + * - R8 (out): the field data + */ + args.rdx = field_id; + ret = seamcall_prerr_ret(TDH_SYS_RD, &args); + if (ret) + return ret; + + *data = args.r8; + + return 0; +} + +static int read_sys_metadata_field16(u64 field_id, + int offset, + struct tdx_tdmr_sysinfo *ts) +{ + u16 *ts_member = ((void *)ts) + offset; + u64 tmp; + int ret; + + if (WARN_ON_ONCE(MD_FIELD_ID_ELE_SIZE_CODE(field_id) != + MD_FIELD_ID_ELE_SIZE_16BIT)) + return -EINVAL; + + ret = read_sys_metadata_field(field_id, &tmp); + if (ret) + return ret; + + *ts_member = tmp; + + return 0; +} + +struct field_mapping { + u64 field_id; + int offset; +}; + +#define TD_SYSINFO_MAP(_field_id, _offset) \ + { .field_id = MD_FIELD_ID_##_field_id, \ + .offset = offsetof(struct tdx_tdmr_sysinfo, _offset) } + +/* Map TD_SYSINFO fields into 'struct tdx_tdmr_sysinfo': */ +static const struct field_mapping fields[] = { + TD_SYSINFO_MAP(MAX_TDMRS, max_tdmrs), + TD_SYSINFO_MAP(MAX_RESERVED_PER_TDMR, max_reserved_per_tdmr), + TD_SYSINFO_MAP(PAMT_4K_ENTRY_SIZE, pamt_entry_size[TDX_PS_4K]), + TD_SYSINFO_MAP(PAMT_2M_ENTRY_SIZE, pamt_entry_size[TDX_PS_2M]), + TD_SYSINFO_MAP(PAMT_1G_ENTRY_SIZE, pamt_entry_size[TDX_PS_1G]), +}; + +static int get_tdx_tdmr_sysinfo(struct tdx_tdmr_sysinfo *tdmr_sysinfo) +{ + int ret; + int i; + + /* Populate 'tdmr_sysinfo' fields using the mapping structure above: */ + for (i = 0; i < ARRAY_SIZE(fields); i++) { + ret = read_sys_metadata_field16(fields[i].field_id, + fields[i].offset, + tdmr_sysinfo); + if (ret) + return ret; + } + + return 0; +} + static int init_tdx_module(void) { + struct tdx_tdmr_sysinfo tdmr_sysinfo; int ret; /* @@ -257,10 +334,13 @@ static int init_tdx_module(void) if (ret) goto out_put_tdxmem; + ret = get_tdx_tdmr_sysinfo(&tdmr_sysinfo); + if (ret) + goto err_free_tdxmem; + /* * TODO: * - * - Get TDX module "TD Memory Region" (TDMR) global metadata. * - Construct a list of TDMRs to cover all TDX-usable memory * regions. * - Configure the TDMRs and the global KeyID to the TDX module. @@ -270,6 +350,8 @@ static int init_tdx_module(void) * Return error before all steps are done. */ ret = -EINVAL; + if (ret) + goto err_free_tdxmem; out_put_tdxmem: /* * @tdx_memlist is written here and read at memory hotplug time. @@ -277,6 +359,10 @@ out_put_tdxmem: */ put_online_mems(); return ret; + +err_free_tdxmem: + free_tdx_memlist(&tdx_memlist); + goto out_put_tdxmem; } static int __tdx_enable(void) diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index c11e0a7ca664..29cdf5ea5544 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -2,6 +2,8 @@ #ifndef _X86_VIRT_TDX_H #define _X86_VIRT_TDX_H +#include + /* * This file contains both macros and data structures defined by the TDX * architecture and Linux defined software data structures and functions. @@ -13,8 +15,38 @@ * TDX module SEAMCALL leaf functions */ #define TDH_SYS_INIT 33 +#define TDH_SYS_RD 34 #define TDH_SYS_LP_INIT 35 +/* + * Global scope metadata field ID. + * + * See Table "Global Scope Metadata", TDX module 1.5 ABI spec. + */ +#define MD_FIELD_ID_MAX_TDMRS 0x9100000100000008ULL +#define MD_FIELD_ID_MAX_RESERVED_PER_TDMR 0x9100000100000009ULL +#define MD_FIELD_ID_PAMT_4K_ENTRY_SIZE 0x9100000100000010ULL +#define MD_FIELD_ID_PAMT_2M_ENTRY_SIZE 0x9100000100000011ULL +#define MD_FIELD_ID_PAMT_1G_ENTRY_SIZE 0x9100000100000012ULL + +/* + * Sub-field definition of metadata field ID. + * + * See Table "MD_FIELD_ID (Metadata Field Identifier / Sequence Header) + * Definition", TDX module 1.5 ABI spec. + * + * - Bit 33:32: ELEMENT_SIZE_CODE -- size of a single element of metadata + * + * 0: 8 bits + * 1: 16 bits + * 2: 32 bits + * 3: 64 bits + */ +#define MD_FIELD_ID_ELE_SIZE_CODE(_field_id) \ + (((_field_id) & GENMASK_ULL(33, 32)) >> 32) + +#define MD_FIELD_ID_ELE_SIZE_16BIT 1 + /* * Do not put any hardware-defined TDX structure representations below * this comment! @@ -33,4 +65,11 @@ struct tdx_memblock { unsigned long end_pfn; }; +/* "TDMR info" part of "Global Scope Metadata" for constructing TDMRs */ +struct tdx_tdmr_sysinfo { + u16 max_tdmrs; + u16 max_reserved_per_tdmr; + u16 pamt_entry_size[TDX_PS_NR]; +}; + #endif -- cgit From 5173d3c5d018161aca17d4ac95367cf832c7fff1 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:29 -0800 Subject: x86/virt/tdx: Add placeholder to construct TDMRs to cover all TDX memory regions After the kernel selects all TDX-usable memory regions, the kernel needs to pass those regions to the TDX module via data structure "TD Memory Region" (TDMR). Add a placeholder to construct a list of TDMRs (in multiple steps) to cover all TDX-usable memory regions. === Long Version === TDX provides increased levels of memory confidentiality and integrity. This requires special hardware support for features like memory encryption and storage of memory integrity checksums. Not all memory satisfies these requirements. As a result, TDX introduced the concept of a "Convertible Memory Region" (CMR). During boot, the firmware builds a list of all of the memory ranges which can provide the TDX security guarantees. The list of these ranges is available to the kernel by querying the TDX module. The TDX architecture needs additional metadata to record things like which TD guest "owns" a given page of memory. This metadata essentially serves as the 'struct page' for the TDX module. The space for this metadata is not reserved by the hardware up front and must be allocated by the kernel and given to the TDX module. Since this metadata consumes space, the VMM can choose whether or not to allocate it for a given area of convertible memory. If it chooses not to, the memory cannot receive TDX protections and can not be used by TDX guests as private memory. For every memory region that the VMM wants to use as TDX memory, it sets up a "TD Memory Region" (TDMR). Each TDMR represents a physically contiguous convertible range and must also have its own physically contiguous metadata table, referred to as a Physical Address Metadata Table (PAMT), to track status for each page in the TDMR range. Unlike a CMR, each TDMR requires 1G granularity and alignment. To support physical RAM areas that don't meet those strict requirements, each TDMR permits a number of internal "reserved areas" which can be placed over memory holes. If PAMT metadata is placed within a TDMR it must be covered by one of these reserved areas. Let's summarize the concepts: CMR - Firmware-enumerated physical ranges that support TDX. CMRs are 4K aligned. TDMR - Physical address range which is chosen by the kernel to support TDX. 1G granularity and alignment required. Each TDMR has reserved areas where TDX memory holes and overlapping PAMTs can be represented. PAMT - Physically contiguous TDX metadata. One table for each page size per TDMR. Roughly 1/256th of TDMR in size. 256G TDMR = ~1G PAMT. As one step of initializing the TDX module, the kernel configures TDX-usable memory regions by passing a list of TDMRs to the TDX module. Constructing the list of TDMRs consists below steps: 1) Fill out TDMRs to cover all memory regions that the TDX module will use for TD memory. 2) Allocate and set up PAMT for each TDMR. 3) Designate reserved areas for each TDMR. Add a placeholder to construct TDMRs to do the above steps. To keep things simple, just allocate enough space to hold maximum number of TDMRs up front. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Link: https://lore.kernel.org/all/20231208170740.53979-9-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 93 +++++++++++++++++++++++++++++++++++++++++++-- arch/x86/virt/vmx/tdx/tdx.h | 33 ++++++++++++++++ 2 files changed, 123 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index e76ad7c2e53b..eeb7bf8c3742 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,8 @@ static u32 tdx_nr_guest_keyids __ro_after_init; static DEFINE_PER_CPU(bool, tdx_lp_initialized); +static struct tdmr_info_list tdx_tdmr_list; + static enum tdx_module_status_t tdx_module_status; static DEFINE_MUTEX(tdx_module_lock); @@ -313,6 +316,80 @@ static int get_tdx_tdmr_sysinfo(struct tdx_tdmr_sysinfo *tdmr_sysinfo) return 0; } +/* Calculate the actual TDMR size */ +static int tdmr_size_single(u16 max_reserved_per_tdmr) +{ + int tdmr_sz; + + /* + * The actual size of TDMR depends on the maximum + * number of reserved areas. + */ + tdmr_sz = sizeof(struct tdmr_info); + tdmr_sz += sizeof(struct tdmr_reserved_area) * max_reserved_per_tdmr; + + return ALIGN(tdmr_sz, TDMR_INFO_ALIGNMENT); +} + +static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list, + struct tdx_tdmr_sysinfo *tdmr_sysinfo) +{ + size_t tdmr_sz, tdmr_array_sz; + void *tdmr_array; + + tdmr_sz = tdmr_size_single(tdmr_sysinfo->max_reserved_per_tdmr); + tdmr_array_sz = tdmr_sz * tdmr_sysinfo->max_tdmrs; + + /* + * To keep things simple, allocate all TDMRs together. + * The buffer needs to be physically contiguous to make + * sure each TDMR is physically contiguous. + */ + tdmr_array = alloc_pages_exact(tdmr_array_sz, + GFP_KERNEL | __GFP_ZERO); + if (!tdmr_array) + return -ENOMEM; + + tdmr_list->tdmrs = tdmr_array; + + /* + * Keep the size of TDMR to find the target TDMR + * at a given index in the TDMR list. + */ + tdmr_list->tdmr_sz = tdmr_sz; + tdmr_list->max_tdmrs = tdmr_sysinfo->max_tdmrs; + tdmr_list->nr_consumed_tdmrs = 0; + + return 0; +} + +static void free_tdmr_list(struct tdmr_info_list *tdmr_list) +{ + free_pages_exact(tdmr_list->tdmrs, + tdmr_list->max_tdmrs * tdmr_list->tdmr_sz); +} + +/* + * Construct a list of TDMRs on the preallocated space in @tdmr_list + * to cover all TDX memory regions in @tmb_list based on the TDX module + * TDMR global information in @tdmr_sysinfo. + */ +static int construct_tdmrs(struct list_head *tmb_list, + struct tdmr_info_list *tdmr_list, + struct tdx_tdmr_sysinfo *tdmr_sysinfo) +{ + /* + * TODO: + * + * - Fill out TDMRs to cover all TDX memory regions. + * - Allocate and set up PAMTs for each TDMR. + * - Designate reserved areas for each TDMR. + * + * Return -EINVAL until constructing TDMRs is done + */ + return -EINVAL; +} + static int init_tdx_module(void) { struct tdx_tdmr_sysinfo tdmr_sysinfo; @@ -338,11 +415,19 @@ static int init_tdx_module(void) if (ret) goto err_free_tdxmem; + /* Allocate enough space for constructing TDMRs */ + ret = alloc_tdmr_list(&tdx_tdmr_list, &tdmr_sysinfo); + if (ret) + goto err_free_tdxmem; + + /* Cover all TDX-usable memory regions in TDMRs */ + ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdmr_sysinfo); + if (ret) + goto err_free_tdmrs; + /* * TODO: * - * - Construct a list of TDMRs to cover all TDX-usable memory - * regions. * - Configure the TDMRs and the global KeyID to the TDX module. * - Configure the global KeyID on all packages. * - Initialize all TDMRs. @@ -351,7 +436,7 @@ static int init_tdx_module(void) */ ret = -EINVAL; if (ret) - goto err_free_tdxmem; + goto err_free_tdmrs; out_put_tdxmem: /* * @tdx_memlist is written here and read at memory hotplug time. @@ -360,6 +445,8 @@ out_put_tdxmem: put_online_mems(); return ret; +err_free_tdmrs: + free_tdmr_list(&tdx_tdmr_list); err_free_tdxmem: free_tdx_memlist(&tdx_memlist); goto out_put_tdxmem; diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index 29cdf5ea5544..9b6b5d70804f 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -47,6 +47,30 @@ #define MD_FIELD_ID_ELE_SIZE_16BIT 1 +struct tdmr_reserved_area { + u64 offset; + u64 size; +} __packed; + +#define TDMR_INFO_ALIGNMENT 512 + +struct tdmr_info { + u64 base; + u64 size; + u64 pamt_1g_base; + u64 pamt_1g_size; + u64 pamt_2m_base; + u64 pamt_2m_size; + u64 pamt_4k_base; + u64 pamt_4k_size; + /* + * The actual number of reserved areas depends on the value of + * field MD_FIELD_ID_MAX_RESERVED_PER_TDMR in the TDX module + * global metadata. + */ + DECLARE_FLEX_ARRAY(struct tdmr_reserved_area, reserved_areas); +} __packed __aligned(TDMR_INFO_ALIGNMENT); + /* * Do not put any hardware-defined TDX structure representations below * this comment! @@ -72,4 +96,13 @@ struct tdx_tdmr_sysinfo { u16 pamt_entry_size[TDX_PS_NR]; }; +struct tdmr_info_list { + void *tdmrs; /* Flexible array to hold 'tdmr_info's */ + int nr_consumed_tdmrs; /* How many 'tdmr_info's are in use */ + + /* Metadata for finding target 'tdmr_info' and freeing @tdmrs */ + int tdmr_sz; /* Size of one 'tdmr_info' */ + int max_tdmrs; /* How many 'tdmr_info's are allocated */ +}; + #endif -- cgit From f3338ac15931075ef8b81145f5de128158a8dc8e Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:30 -0800 Subject: x86/virt/tdx: Fill out TDMRs to cover all TDX memory regions Start to transit out the "multi-steps" to construct a list of "TD Memory Regions" (TDMRs) to cover all TDX-usable memory regions. The kernel configures TDX-usable memory regions by passing a list of TDMRs "TD Memory Regions" (TDMRs) to the TDX module. Each TDMR contains the information of the base/size of a memory region, the base/size of the associated Physical Address Metadata Table (PAMT) and a list of reserved areas in the region. Do the first step to fill out a number of TDMRs to cover all TDX memory regions. To keep it simple, always try to use one TDMR for each memory region. As the first step only set up the base/size for each TDMR. Each TDMR must be 1G aligned and the size must be in 1G granularity. This implies that one TDMR could cover multiple memory regions. If a memory region spans the 1GB boundary and the former part is already covered by the previous TDMR, just use a new TDMR for the remaining part. TDX only supports a limited number of TDMRs. Disable TDX if all TDMRs are consumed but there is more memory region to cover. There are fancier things that could be done like trying to merge adjacent TDMRs. This would allow more pathological memory layouts to be supported. But, current systems are not even close to exhausting the existing TDMR resources in practice. For now, keep it simple. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-10-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 103 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/virt/vmx/tdx/tdx.h | 3 ++ 2 files changed, 105 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index eeb7bf8c3742..d7f928713b6c 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -369,6 +369,102 @@ static void free_tdmr_list(struct tdmr_info_list *tdmr_list) tdmr_list->max_tdmrs * tdmr_list->tdmr_sz); } +/* Get the TDMR from the list at the given index. */ +static struct tdmr_info *tdmr_entry(struct tdmr_info_list *tdmr_list, + int idx) +{ + int tdmr_info_offset = tdmr_list->tdmr_sz * idx; + + return (void *)tdmr_list->tdmrs + tdmr_info_offset; +} + +#define TDMR_ALIGNMENT SZ_1G +#define TDMR_ALIGN_DOWN(_addr) ALIGN_DOWN((_addr), TDMR_ALIGNMENT) +#define TDMR_ALIGN_UP(_addr) ALIGN((_addr), TDMR_ALIGNMENT) + +static inline u64 tdmr_end(struct tdmr_info *tdmr) +{ + return tdmr->base + tdmr->size; +} + +/* + * Take the memory referenced in @tmb_list and populate the + * preallocated @tdmr_list, following all the special alignment + * and size rules for TDMR. + */ +static int fill_out_tdmrs(struct list_head *tmb_list, + struct tdmr_info_list *tdmr_list) +{ + struct tdx_memblock *tmb; + int tdmr_idx = 0; + + /* + * Loop over TDX memory regions and fill out TDMRs to cover them. + * To keep it simple, always try to use one TDMR to cover one + * memory region. + * + * In practice TDX supports at least 64 TDMRs. A 2-socket system + * typically only consumes less than 10 of those. This code is + * dumb and simple and may use more TMDRs than is strictly + * required. + */ + list_for_each_entry(tmb, tmb_list, list) { + struct tdmr_info *tdmr = tdmr_entry(tdmr_list, tdmr_idx); + u64 start, end; + + start = TDMR_ALIGN_DOWN(PFN_PHYS(tmb->start_pfn)); + end = TDMR_ALIGN_UP(PFN_PHYS(tmb->end_pfn)); + + /* + * A valid size indicates the current TDMR has already + * been filled out to cover the previous memory region(s). + */ + if (tdmr->size) { + /* + * Loop to the next if the current memory region + * has already been fully covered. + */ + if (end <= tdmr_end(tdmr)) + continue; + + /* Otherwise, skip the already covered part. */ + if (start < tdmr_end(tdmr)) + start = tdmr_end(tdmr); + + /* + * Create a new TDMR to cover the current memory + * region, or the remaining part of it. + */ + tdmr_idx++; + if (tdmr_idx >= tdmr_list->max_tdmrs) { + pr_warn("initialization failed: TDMRs exhausted.\n"); + return -ENOSPC; + } + + tdmr = tdmr_entry(tdmr_list, tdmr_idx); + } + + tdmr->base = start; + tdmr->size = end - start; + } + + /* @tdmr_idx is always the index of the last valid TDMR. */ + tdmr_list->nr_consumed_tdmrs = tdmr_idx + 1; + + /* + * Warn early that kernel is about to run out of TDMRs. + * + * This is an indication that TDMR allocation has to be + * reworked to be smarter to not run into an issue. + */ + if (tdmr_list->max_tdmrs - tdmr_list->nr_consumed_tdmrs < TDMR_NR_WARN) + pr_warn("consumed TDMRs reaching limit: %d used out of %d\n", + tdmr_list->nr_consumed_tdmrs, + tdmr_list->max_tdmrs); + + return 0; +} + /* * Construct a list of TDMRs on the preallocated space in @tdmr_list * to cover all TDX memory regions in @tmb_list based on the TDX module @@ -378,10 +474,15 @@ static int construct_tdmrs(struct list_head *tmb_list, struct tdmr_info_list *tdmr_list, struct tdx_tdmr_sysinfo *tdmr_sysinfo) { + int ret; + + ret = fill_out_tdmrs(tmb_list, tdmr_list); + if (ret) + return ret; + /* * TODO: * - * - Fill out TDMRs to cover all TDX memory regions. * - Allocate and set up PAMTs for each TDMR. * - Designate reserved areas for each TDMR. * diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index 9b6b5d70804f..f18ce1b88b0a 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -96,6 +96,9 @@ struct tdx_tdmr_sysinfo { u16 pamt_entry_size[TDX_PS_NR]; }; +/* Warn if kernel has less than TDMR_NR_WARN TDMRs after allocation */ +#define TDMR_NR_WARN 4 + struct tdmr_info_list { void *tdmrs; /* Flexible array to hold 'tdmr_info's */ int nr_consumed_tdmrs; /* How many 'tdmr_info's are in use */ -- cgit From ac3a22088434e31dddda82aa473bee008df99b97 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:31 -0800 Subject: x86/virt/tdx: Allocate and set up PAMTs for TDMRs The TDX module uses additional metadata to record things like which guest "owns" a given page of memory. This metadata, referred as Physical Address Metadata Table (PAMT), essentially serves as the 'struct page' for the TDX module. PAMTs are not reserved by hardware up front. They must be allocated by the kernel and then given to the TDX module during module initialization. TDX supports 3 page sizes: 4K, 2M, and 1G. Each "TD Memory Region" (TDMR) has 3 PAMTs to track the 3 supported page sizes. Each PAMT must be a physically contiguous area from a Convertible Memory Region (CMR). However, the PAMTs which track pages in one TDMR do not need to reside within that TDMR but can be anywhere in CMRs. If one PAMT overlaps with any TDMR, the overlapping part must be reported as a reserved area in that particular TDMR. Use alloc_contig_pages() since PAMT must be a physically contiguous area and it may be potentially large (~1/256th of the size of the given TDMR). The downside is alloc_contig_pages() may fail at runtime. One (bad) mitigation is to launch a TDX guest early during system boot to get those PAMTs allocated at early time, but the only way to fix is to add a boot option to allocate or reserve PAMTs during kernel boot. It is imperfect but will be improved on later. TDX only supports a limited number of reserved areas per TDMR to cover both PAMTs and memory holes within the given TDMR. If many PAMTs are allocated within a single TDMR, the reserved areas may not be sufficient to cover all of them. Adopt the following policies when allocating PAMTs for a given TDMR: - Allocate three PAMTs of the TDMR in one contiguous chunk to minimize the total number of reserved areas consumed for PAMTs. - Try to first allocate PAMT from the local node of the TDMR for better NUMA locality. Also dump out how many pages are allocated for PAMTs when the TDX module is initialized successfully. This helps answer the eternal "where did all my memory go?" questions. [ dhansen: merge in error handling cleanup ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Link: https://lore.kernel.org/all/20231208170740.53979-11-dave.hansen%40intel.com --- arch/x86/Kconfig | 1 + arch/x86/virt/vmx/tdx/tdx.c | 217 ++++++++++++++++++++++++++++++++++++++++++-- arch/x86/virt/vmx/tdx/tdx.h | 1 + 3 files changed, 213 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2c69ef844805..e255d8ae5e77 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1972,6 +1972,7 @@ config INTEL_TDX_HOST depends on KVM_INTEL depends on X86_X2APIC select ARCH_KEEP_MEMBLOCK + depends on CONTIG_ALLOC help Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. This option enables necessary TDX diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index d7f928713b6c..c2bff8124598 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(tdx_cpu_enable); * overlap. */ static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, int nid) { struct tdx_memblock *tmb; @@ -184,6 +184,7 @@ static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn, INIT_LIST_HEAD(&tmb->list); tmb->start_pfn = start_pfn; tmb->end_pfn = end_pfn; + tmb->nid = nid; /* @tmb_list is protected by mem_hotplug_lock */ list_add_tail(&tmb->list, tmb_list); @@ -211,9 +212,9 @@ static void free_tdx_memlist(struct list_head *tmb_list) static int build_tdx_memlist(struct list_head *tmb_list) { unsigned long start_pfn, end_pfn; - int i, ret; + int i, nid, ret; - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { /* * The first 1MB is not reported as TDX convertible memory. * Although the first 1MB is always reserved and won't end up @@ -229,7 +230,7 @@ static int build_tdx_memlist(struct list_head *tmb_list) * memblock has already guaranteed they are in address * ascending order and don't overlap. */ - ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn); + ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn, nid); if (ret) goto err; } @@ -465,6 +466,202 @@ static int fill_out_tdmrs(struct list_head *tmb_list, return 0; } +/* + * Calculate PAMT size given a TDMR and a page size. The returned + * PAMT size is always aligned up to 4K page boundary. + */ +static unsigned long tdmr_get_pamt_sz(struct tdmr_info *tdmr, int pgsz, + u16 pamt_entry_size) +{ + unsigned long pamt_sz, nr_pamt_entries; + + switch (pgsz) { + case TDX_PS_4K: + nr_pamt_entries = tdmr->size >> PAGE_SHIFT; + break; + case TDX_PS_2M: + nr_pamt_entries = tdmr->size >> PMD_SHIFT; + break; + case TDX_PS_1G: + nr_pamt_entries = tdmr->size >> PUD_SHIFT; + break; + default: + WARN_ON_ONCE(1); + return 0; + } + + pamt_sz = nr_pamt_entries * pamt_entry_size; + /* TDX requires PAMT size must be 4K aligned */ + pamt_sz = ALIGN(pamt_sz, PAGE_SIZE); + + return pamt_sz; +} + +/* + * Locate a NUMA node which should hold the allocation of the @tdmr + * PAMT. This node will have some memory covered by the TDMR. The + * relative amount of memory covered is not considered. + */ +static int tdmr_get_nid(struct tdmr_info *tdmr, struct list_head *tmb_list) +{ + struct tdx_memblock *tmb; + + /* + * A TDMR must cover at least part of one TMB. That TMB will end + * after the TDMR begins. But, that TMB may have started before + * the TDMR. Find the next 'tmb' that _ends_ after this TDMR + * begins. Ignore 'tmb' start addresses. They are irrelevant. + */ + list_for_each_entry(tmb, tmb_list, list) { + if (tmb->end_pfn > PHYS_PFN(tdmr->base)) + return tmb->nid; + } + + /* + * Fall back to allocating the TDMR's metadata from node 0 when + * no TDX memory block can be found. This should never happen + * since TDMRs originate from TDX memory blocks. + */ + pr_warn("TDMR [0x%llx, 0x%llx): unable to find local NUMA node for PAMT allocation, fallback to use node 0.\n", + tdmr->base, tdmr_end(tdmr)); + return 0; +} + +/* + * Allocate PAMTs from the local NUMA node of some memory in @tmb_list + * within @tdmr, and set up PAMTs for @tdmr. + */ +static int tdmr_set_up_pamt(struct tdmr_info *tdmr, + struct list_head *tmb_list, + u16 pamt_entry_size[]) +{ + unsigned long pamt_base[TDX_PS_NR]; + unsigned long pamt_size[TDX_PS_NR]; + unsigned long tdmr_pamt_base; + unsigned long tdmr_pamt_size; + struct page *pamt; + int pgsz, nid; + + nid = tdmr_get_nid(tdmr, tmb_list); + + /* + * Calculate the PAMT size for each TDX supported page size + * and the total PAMT size. + */ + tdmr_pamt_size = 0; + for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) { + pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz, + pamt_entry_size[pgsz]); + tdmr_pamt_size += pamt_size[pgsz]; + } + + /* + * Allocate one chunk of physically contiguous memory for all + * PAMTs. This helps minimize the PAMT's use of reserved areas + * in overlapped TDMRs. + */ + pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL, + nid, &node_online_map); + if (!pamt) + return -ENOMEM; + + /* + * Break the contiguous allocation back up into the + * individual PAMTs for each page size. + */ + tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT; + for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) { + pamt_base[pgsz] = tdmr_pamt_base; + tdmr_pamt_base += pamt_size[pgsz]; + } + + tdmr->pamt_4k_base = pamt_base[TDX_PS_4K]; + tdmr->pamt_4k_size = pamt_size[TDX_PS_4K]; + tdmr->pamt_2m_base = pamt_base[TDX_PS_2M]; + tdmr->pamt_2m_size = pamt_size[TDX_PS_2M]; + tdmr->pamt_1g_base = pamt_base[TDX_PS_1G]; + tdmr->pamt_1g_size = pamt_size[TDX_PS_1G]; + + return 0; +} + +static void tdmr_get_pamt(struct tdmr_info *tdmr, unsigned long *pamt_base, + unsigned long *pamt_size) +{ + unsigned long pamt_bs, pamt_sz; + + /* + * The PAMT was allocated in one contiguous unit. The 4K PAMT + * should always point to the beginning of that allocation. + */ + pamt_bs = tdmr->pamt_4k_base; + pamt_sz = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size; + + WARN_ON_ONCE((pamt_bs & ~PAGE_MASK) || (pamt_sz & ~PAGE_MASK)); + + *pamt_base = pamt_bs; + *pamt_size = pamt_sz; +} + +static void tdmr_free_pamt(struct tdmr_info *tdmr) +{ + unsigned long pamt_base, pamt_size; + + tdmr_get_pamt(tdmr, &pamt_base, &pamt_size); + + /* Do nothing if PAMT hasn't been allocated for this TDMR */ + if (!pamt_size) + return; + + if (WARN_ON_ONCE(!pamt_base)) + return; + + free_contig_range(pamt_base >> PAGE_SHIFT, pamt_size >> PAGE_SHIFT); +} + +static void tdmrs_free_pamt_all(struct tdmr_info_list *tdmr_list) +{ + int i; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) + tdmr_free_pamt(tdmr_entry(tdmr_list, i)); +} + +/* Allocate and set up PAMTs for all TDMRs */ +static int tdmrs_set_up_pamt_all(struct tdmr_info_list *tdmr_list, + struct list_head *tmb_list, + u16 pamt_entry_size[]) +{ + int i, ret = 0; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + ret = tdmr_set_up_pamt(tdmr_entry(tdmr_list, i), tmb_list, + pamt_entry_size); + if (ret) + goto err; + } + + return 0; +err: + tdmrs_free_pamt_all(tdmr_list); + return ret; +} + +static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list) +{ + unsigned long pamt_size = 0; + int i; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + unsigned long base, size; + + tdmr_get_pamt(tdmr_entry(tdmr_list, i), &base, &size); + pamt_size += size; + } + + return pamt_size / 1024; +} + /* * Construct a list of TDMRs on the preallocated space in @tdmr_list * to cover all TDX memory regions in @tmb_list based on the TDX module @@ -480,10 +677,13 @@ static int construct_tdmrs(struct list_head *tmb_list, if (ret) return ret; + ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list, + tdmr_sysinfo->pamt_entry_size); + if (ret) + return ret; /* * TODO: * - * - Allocate and set up PAMTs for each TDMR. * - Designate reserved areas for each TDMR. * * Return -EINVAL until constructing TDMRs is done @@ -537,7 +737,10 @@ static int init_tdx_module(void) */ ret = -EINVAL; if (ret) - goto err_free_tdmrs; + goto err_free_pamts; + + pr_info("%lu KB allocated for PAMT\n", tdmrs_count_pamt_kb(&tdx_tdmr_list)); + out_put_tdxmem: /* * @tdx_memlist is written here and read at memory hotplug time. @@ -546,6 +749,8 @@ out_put_tdxmem: put_online_mems(); return ret; +err_free_pamts: + tdmrs_free_pamt_all(&tdx_tdmr_list); err_free_tdmrs: free_tdmr_list(&tdx_tdmr_list); err_free_tdxmem: diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index f18ce1b88b0a..1b04efece9db 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -87,6 +87,7 @@ struct tdx_memblock { struct list_head list; unsigned long start_pfn; unsigned long end_pfn; + int nid; }; /* "TDMR info" part of "Global Scope Metadata" for constructing TDMRs */ -- cgit From dde3b60d572c941e2c72d22c18a08586fb67bb50 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:32 -0800 Subject: x86/virt/tdx: Designate reserved areas for all TDMRs As the last step of constructing TDMRs, populate reserved areas for all TDMRs. Cover all memory holes and PAMTs with a TMDR reserved area. [ dhansen: trim down chagnelog ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-12-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 217 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 209 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index c2bff8124598..98283f728a81 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -662,6 +663,207 @@ static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list) return pamt_size / 1024; } +static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx, u64 addr, + u64 size, u16 max_reserved_per_tdmr) +{ + struct tdmr_reserved_area *rsvd_areas = tdmr->reserved_areas; + int idx = *p_idx; + + /* Reserved area must be 4K aligned in offset and size */ + if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK)) + return -EINVAL; + + if (idx >= max_reserved_per_tdmr) { + pr_warn("initialization failed: TDMR [0x%llx, 0x%llx): reserved areas exhausted.\n", + tdmr->base, tdmr_end(tdmr)); + return -ENOSPC; + } + + /* + * Consume one reserved area per call. Make no effort to + * optimize or reduce the number of reserved areas which are + * consumed by contiguous reserved areas, for instance. + */ + rsvd_areas[idx].offset = addr - tdmr->base; + rsvd_areas[idx].size = size; + + *p_idx = idx + 1; + + return 0; +} + +/* + * Go through @tmb_list to find holes between memory areas. If any of + * those holes fall within @tdmr, set up a TDMR reserved area to cover + * the hole. + */ +static int tdmr_populate_rsvd_holes(struct list_head *tmb_list, + struct tdmr_info *tdmr, + int *rsvd_idx, + u16 max_reserved_per_tdmr) +{ + struct tdx_memblock *tmb; + u64 prev_end; + int ret; + + /* + * Start looking for reserved blocks at the + * beginning of the TDMR. + */ + prev_end = tdmr->base; + list_for_each_entry(tmb, tmb_list, list) { + u64 start, end; + + start = PFN_PHYS(tmb->start_pfn); + end = PFN_PHYS(tmb->end_pfn); + + /* Break if this region is after the TDMR */ + if (start >= tdmr_end(tdmr)) + break; + + /* Exclude regions before this TDMR */ + if (end < tdmr->base) + continue; + + /* + * Skip over memory areas that + * have already been dealt with. + */ + if (start <= prev_end) { + prev_end = end; + continue; + } + + /* Add the hole before this region */ + ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end, + start - prev_end, + max_reserved_per_tdmr); + if (ret) + return ret; + + prev_end = end; + } + + /* Add the hole after the last region if it exists. */ + if (prev_end < tdmr_end(tdmr)) { + ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end, + tdmr_end(tdmr) - prev_end, + max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + +/* + * Go through @tdmr_list to find all PAMTs. If any of those PAMTs + * overlaps with @tdmr, set up a TDMR reserved area to cover the + * overlapping part. + */ +static int tdmr_populate_rsvd_pamts(struct tdmr_info_list *tdmr_list, + struct tdmr_info *tdmr, + int *rsvd_idx, + u16 max_reserved_per_tdmr) +{ + int i, ret; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + struct tdmr_info *tmp = tdmr_entry(tdmr_list, i); + unsigned long pamt_base, pamt_size, pamt_end; + + tdmr_get_pamt(tmp, &pamt_base, &pamt_size); + /* Each TDMR must already have PAMT allocated */ + WARN_ON_ONCE(!pamt_size || !pamt_base); + + pamt_end = pamt_base + pamt_size; + /* Skip PAMTs outside of the given TDMR */ + if ((pamt_end <= tdmr->base) || + (pamt_base >= tdmr_end(tdmr))) + continue; + + /* Only mark the part within the TDMR as reserved */ + if (pamt_base < tdmr->base) + pamt_base = tdmr->base; + if (pamt_end > tdmr_end(tdmr)) + pamt_end = tdmr_end(tdmr); + + ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, pamt_base, + pamt_end - pamt_base, + max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + +/* Compare function called by sort() for TDMR reserved areas */ +static int rsvd_area_cmp_func(const void *a, const void *b) +{ + struct tdmr_reserved_area *r1 = (struct tdmr_reserved_area *)a; + struct tdmr_reserved_area *r2 = (struct tdmr_reserved_area *)b; + + if (r1->offset + r1->size <= r2->offset) + return -1; + if (r1->offset >= r2->offset + r2->size) + return 1; + + /* Reserved areas cannot overlap. The caller must guarantee. */ + WARN_ON_ONCE(1); + return -1; +} + +/* + * Populate reserved areas for the given @tdmr, including memory holes + * (via @tmb_list) and PAMTs (via @tdmr_list). + */ +static int tdmr_populate_rsvd_areas(struct tdmr_info *tdmr, + struct list_head *tmb_list, + struct tdmr_info_list *tdmr_list, + u16 max_reserved_per_tdmr) +{ + int ret, rsvd_idx = 0; + + ret = tdmr_populate_rsvd_holes(tmb_list, tdmr, &rsvd_idx, + max_reserved_per_tdmr); + if (ret) + return ret; + + ret = tdmr_populate_rsvd_pamts(tdmr_list, tdmr, &rsvd_idx, + max_reserved_per_tdmr); + if (ret) + return ret; + + /* TDX requires reserved areas listed in address ascending order */ + sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area), + rsvd_area_cmp_func, NULL); + + return 0; +} + +/* + * Populate reserved areas for all TDMRs in @tdmr_list, including memory + * holes (via @tmb_list) and PAMTs. + */ +static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list, + struct list_head *tmb_list, + u16 max_reserved_per_tdmr) +{ + int i; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + int ret; + + ret = tdmr_populate_rsvd_areas(tdmr_entry(tdmr_list, i), + tmb_list, tdmr_list, max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + /* * Construct a list of TDMRs on the preallocated space in @tdmr_list * to cover all TDX memory regions in @tmb_list based on the TDX module @@ -681,14 +883,13 @@ static int construct_tdmrs(struct list_head *tmb_list, tdmr_sysinfo->pamt_entry_size); if (ret) return ret; - /* - * TODO: - * - * - Designate reserved areas for each TDMR. - * - * Return -EINVAL until constructing TDMRs is done - */ - return -EINVAL; + + ret = tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list, + tdmr_sysinfo->max_reserved_per_tdmr); + if (ret) + tdmrs_free_pamt_all(tdmr_list); + + return ret; } static int init_tdx_module(void) -- cgit From 554ce1c36d1bbeaec42ba870bad75e200af204be Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:33 -0800 Subject: x86/virt/tdx: Configure TDX module with the TDMRs and global KeyID The TDX module uses a private KeyID as the "global KeyID" for mapping things like the PAMT and other TDX metadata. This KeyID has already been reserved when detecting TDX during the kernel early boot. Now that the "TD Memory Regions" (TDMRs) are fully built, pass them to the TDX module together with the global KeyID. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-13-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 43 ++++++++++++++++++++++++++++++++++++++++++- arch/x86/virt/vmx/tdx/tdx.h | 2 ++ 2 files changed, 44 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 98283f728a81..5146503233c0 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include #include @@ -892,6 +894,41 @@ static int construct_tdmrs(struct list_head *tmb_list, return ret; } +static int config_tdx_module(struct tdmr_info_list *tdmr_list, u64 global_keyid) +{ + struct tdx_module_args args = {}; + u64 *tdmr_pa_array; + size_t array_sz; + int i, ret; + + /* + * TDMRs are passed to the TDX module via an array of physical + * addresses of each TDMR. The array itself also has certain + * alignment requirement. + */ + array_sz = tdmr_list->nr_consumed_tdmrs * sizeof(u64); + array_sz = roundup_pow_of_two(array_sz); + if (array_sz < TDMR_INFO_PA_ARRAY_ALIGNMENT) + array_sz = TDMR_INFO_PA_ARRAY_ALIGNMENT; + + tdmr_pa_array = kzalloc(array_sz, GFP_KERNEL); + if (!tdmr_pa_array) + return -ENOMEM; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) + tdmr_pa_array[i] = __pa(tdmr_entry(tdmr_list, i)); + + args.rcx = __pa(tdmr_pa_array); + args.rdx = tdmr_list->nr_consumed_tdmrs; + args.r8 = global_keyid; + ret = seamcall_prerr(TDH_SYS_CONFIG, &args); + + /* Free the array as it is not required anymore. */ + kfree(tdmr_pa_array); + + return ret; +} + static int init_tdx_module(void) { struct tdx_tdmr_sysinfo tdmr_sysinfo; @@ -927,10 +964,14 @@ static int init_tdx_module(void) if (ret) goto err_free_tdmrs; + /* Pass the TDMRs and the global KeyID to the TDX module */ + ret = config_tdx_module(&tdx_tdmr_list, tdx_global_keyid); + if (ret) + goto err_free_pamts; + /* * TODO: * - * - Configure the TDMRs and the global KeyID to the TDX module. * - Configure the global KeyID on all packages. * - Initialize all TDMRs. * diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index 1b04efece9db..fa5bcf8b5a9c 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -17,6 +17,7 @@ #define TDH_SYS_INIT 33 #define TDH_SYS_RD 34 #define TDH_SYS_LP_INIT 35 +#define TDH_SYS_CONFIG 45 /* * Global scope metadata field ID. @@ -53,6 +54,7 @@ struct tdmr_reserved_area { } __packed; #define TDMR_INFO_ALIGNMENT 512 +#define TDMR_INFO_PA_ARRAY_ALIGNMENT 512 struct tdmr_info { u64 base; -- cgit From e56d28df2f666af9ce0c27d8d204ad813d3766f4 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:34 -0800 Subject: x86/virt/tdx: Configure global KeyID on all packages After the list of TDMRs and the global KeyID are configured to the TDX module, the kernel needs to configure the key of the global KeyID on all packages using TDH.SYS.KEY.CONFIG. This SEAMCALL cannot run parallel on different cpus. Loop all online cpus and use smp_call_on_cpu() to call this SEAMCALL on the first cpu of each package. To keep things simple, this implementation takes no affirmative steps to online cpus to make sure there's at least one cpu for each package. The callers (aka. KVM) can ensure success by ensuring sufficient CPUs are online for this to succeed. Intel hardware doesn't guarantee cache coherency across different KeyIDs. The PAMTs are transitioning from being used by the kernel mapping (KeyId 0) to the TDX module's "global KeyID" mapping. This means that the kernel must flush any dirty KeyID-0 PAMT cachelines before the TDX module uses the global KeyID to access the PAMTs. Otherwise, if those dirty cachelines were written back, they would corrupt the TDX module's metadata. Aside: This corruption would be detected by the memory integrity hardware on the next read of the memory with the global KeyID. The result would likely be fatal to the system but would not impact TDX security. Following the TDX module specification, flush cache before configuring the global KeyID on all packages. Given the PAMT size can be large (~1/256th of system RAM), just use WBINVD on all CPUs to flush. If TDH.SYS.KEY.CONFIG fails, the TDX module may already have "converted" some memory for TDX module use. Convert the memory back so that it can be safely used by the kernel again. Note that this is slower than it should be because of the "partial write machine check" erratum which affects TDX-capable hardware. Also refactor and introduce a new helper: tdmr_do_pamt_func(). This takes a TDMR and runs a function on its PAMT. It looks a _bit_ odd to pass a function pointer around like this, but its use is pretty narrow and it does eliminate what would otherwise be some copying and pasting. [ dhansen: * munge changelog as usual * remove weird (*pamd_func)() syntax ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-14-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 133 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/virt/vmx/tdx/tdx.h | 1 + 2 files changed, 132 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 5146503233c0..d2e4180b8bd0 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -606,7 +607,8 @@ static void tdmr_get_pamt(struct tdmr_info *tdmr, unsigned long *pamt_base, *pamt_size = pamt_sz; } -static void tdmr_free_pamt(struct tdmr_info *tdmr) +static void tdmr_do_pamt_func(struct tdmr_info *tdmr, + void (*pamt_func)(unsigned long base, unsigned long size)) { unsigned long pamt_base, pamt_size; @@ -619,9 +621,19 @@ static void tdmr_free_pamt(struct tdmr_info *tdmr) if (WARN_ON_ONCE(!pamt_base)) return; + pamt_func(pamt_base, pamt_size); +} + +static void free_pamt(unsigned long pamt_base, unsigned long pamt_size) +{ free_contig_range(pamt_base >> PAGE_SHIFT, pamt_size >> PAGE_SHIFT); } +static void tdmr_free_pamt(struct tdmr_info *tdmr) +{ + tdmr_do_pamt_func(tdmr, free_pamt); +} + static void tdmrs_free_pamt_all(struct tdmr_info_list *tdmr_list) { int i; @@ -650,6 +662,41 @@ err: return ret; } +/* + * Convert TDX private pages back to normal by using MOVDIR64B to + * clear these pages. Note this function doesn't flush cache of + * these TDX private pages. The caller should make sure of that. + */ +static void reset_tdx_pages(unsigned long base, unsigned long size) +{ + const void *zero_page = (const void *)page_address(ZERO_PAGE(0)); + unsigned long phys, end; + + end = base + size; + for (phys = base; phys < end; phys += 64) + movdir64b(__va(phys), zero_page); + + /* + * MOVDIR64B uses WC protocol. Use memory barrier to + * make sure any later user of these pages sees the + * updated data. + */ + mb(); +} + +static void tdmr_reset_pamt(struct tdmr_info *tdmr) +{ + tdmr_do_pamt_func(tdmr, reset_tdx_pages); +} + +static void tdmrs_reset_pamt_all(struct tdmr_info_list *tdmr_list) +{ + int i; + + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) + tdmr_reset_pamt(tdmr_entry(tdmr_list, i)); +} + static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list) { unsigned long pamt_size = 0; @@ -929,6 +976,64 @@ static int config_tdx_module(struct tdmr_info_list *tdmr_list, u64 global_keyid) return ret; } +static int do_global_key_config(void *unused) +{ + struct tdx_module_args args = {}; + + return seamcall_prerr(TDH_SYS_KEY_CONFIG, &args); +} + +/* + * Attempt to configure the global KeyID on all physical packages. + * + * This requires running code on at least one CPU in each package. + * TDMR initialization) will fail will fail if any package in the + * system has no online CPUs. + * + * This code takes no affirmative steps to online CPUs. Callers (aka. + * KVM) can ensure success by ensuring sufficient CPUs are online and + * can run SEAMCALLs. + */ +static int config_global_keyid(void) +{ + cpumask_var_t packages; + int cpu, ret = -EINVAL; + + if (!zalloc_cpumask_var(&packages, GFP_KERNEL)) + return -ENOMEM; + + /* + * Hardware doesn't guarantee cache coherency across different + * KeyIDs. The kernel needs to flush PAMT's dirty cachelines + * (associated with KeyID 0) before the TDX module can use the + * global KeyID to access the PAMT. Given PAMTs are potentially + * large (~1/256th of system RAM), just use WBINVD. + */ + wbinvd_on_all_cpus(); + + for_each_online_cpu(cpu) { + /* + * The key configuration only needs to be done once per + * package and will return an error if configured more + * than once. Avoid doing it multiple times per package. + */ + if (cpumask_test_and_set_cpu(topology_physical_package_id(cpu), + packages)) + continue; + + /* + * TDH.SYS.KEY.CONFIG cannot run concurrently on + * different cpus. Do it one by one. + */ + ret = smp_call_on_cpu(cpu, do_global_key_config, NULL, true); + if (ret) + break; + } + + free_cpumask_var(packages); + return ret; +} + static int init_tdx_module(void) { struct tdx_tdmr_sysinfo tdmr_sysinfo; @@ -969,6 +1074,11 @@ static int init_tdx_module(void) if (ret) goto err_free_pamts; + /* Config the key of global KeyID on all packages */ + ret = config_global_keyid(); + if (ret) + goto err_reset_pamts; + /* * TODO: * @@ -979,7 +1089,7 @@ static int init_tdx_module(void) */ ret = -EINVAL; if (ret) - goto err_free_pamts; + goto err_reset_pamts; pr_info("%lu KB allocated for PAMT\n", tdmrs_count_pamt_kb(&tdx_tdmr_list)); @@ -991,6 +1101,22 @@ out_put_tdxmem: put_online_mems(); return ret; +err_reset_pamts: + /* + * Part of PAMTs may already have been initialized by the + * TDX module. Flush cache before returning PAMTs back + * to the kernel. + */ + wbinvd_on_all_cpus(); + /* + * According to the TDX hardware spec, if the platform + * doesn't have the "partial write machine check" + * erratum, any kernel read/write will never cause #MC + * in kernel space, thus it's OK to not convert PAMTs + * back to normal. But do the conversion anyway here + * as suggested by the TDX spec. + */ + tdmrs_reset_pamt_all(&tdx_tdmr_list); err_free_pamts: tdmrs_free_pamt_all(&tdx_tdmr_list); err_free_tdmrs: @@ -1024,6 +1150,9 @@ static int __tdx_enable(void) * lock to prevent any new cpu from becoming online; 2) done both VMXON * and tdx_cpu_enable() on all online cpus. * + * This function requires there's at least one online cpu for each CPU + * package to succeed. + * * This function can be called in parallel by multiple callers. * * Return 0 if TDX is enabled successfully, otherwise error. diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index fa5bcf8b5a9c..dd35baf756b8 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -14,6 +14,7 @@ /* * TDX module SEAMCALL leaf functions */ +#define TDH_SYS_KEY_CONFIG 31 #define TDH_SYS_INIT 33 #define TDH_SYS_RD 34 #define TDH_SYS_LP_INIT 35 -- cgit From 0b2bc38131f02d6fd38695f191bbd8c6109ecffc Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:35 -0800 Subject: x86/virt/tdx: Initialize all TDMRs After the global KeyID has been configured on all packages, initialize all TDMRs to make all TDX-usable memory regions that are passed to the TDX module become usable. This is the last step of initializing the TDX module. Initializing TDMRs can be time consuming on large memory systems as it involves initializing all metadata entries for all pages that can be used by TDX guests. Initializing different TDMRs can be parallelized. For now to keep it simple, just initialize all TDMRs one by one. It can be enhanced in the future. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-15-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 61 ++++++++++++++++++++++++++++++++++++++------- arch/x86/virt/vmx/tdx/tdx.h | 1 + 2 files changed, 53 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index d2e4180b8bd0..48fb1b3101c0 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -1034,6 +1034,56 @@ static int config_global_keyid(void) return ret; } +static int init_tdmr(struct tdmr_info *tdmr) +{ + u64 next; + + /* + * Initializing a TDMR can be time consuming. To avoid long + * SEAMCALLs, the TDX module may only initialize a part of the + * TDMR in each call. + */ + do { + struct tdx_module_args args = { + .rcx = tdmr->base, + }; + int ret; + + ret = seamcall_prerr_ret(TDH_SYS_TDMR_INIT, &args); + if (ret) + return ret; + /* + * RDX contains 'next-to-initialize' address if + * TDH.SYS.TDMR.INIT did not fully complete and + * should be retried. + */ + next = args.rdx; + cond_resched(); + /* Keep making SEAMCALLs until the TDMR is done */ + } while (next < tdmr->base + tdmr->size); + + return 0; +} + +static int init_tdmrs(struct tdmr_info_list *tdmr_list) +{ + int i; + + /* + * This operation is costly. It can be parallelized, + * but keep it simple for now. + */ + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + int ret; + + ret = init_tdmr(tdmr_entry(tdmr_list, i)); + if (ret) + return ret; + } + + return 0; +} + static int init_tdx_module(void) { struct tdx_tdmr_sysinfo tdmr_sysinfo; @@ -1079,15 +1129,8 @@ static int init_tdx_module(void) if (ret) goto err_reset_pamts; - /* - * TODO: - * - * - Configure the global KeyID on all packages. - * - Initialize all TDMRs. - * - * Return error before all steps are done. - */ - ret = -EINVAL; + /* Initialize TDMRs to complete the TDX module initialization */ + ret = init_tdmrs(&tdx_tdmr_list); if (ret) goto err_reset_pamts; diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index dd35baf756b8..c0610f0bb88c 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -18,6 +18,7 @@ #define TDH_SYS_INIT 33 #define TDH_SYS_RD 34 #define TDH_SYS_LP_INIT 35 +#define TDH_SYS_TDMR_INIT 36 #define TDH_SYS_CONFIG 45 /* -- cgit From f3f6aa68640298fb966811b991c7b8efee67e181 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:36 -0800 Subject: x86/virt/tdx: Handle TDX interaction with sleep and hibernation TDX is incompatible with hibernation and some ACPI sleep states. Users must disable hibernation to use TDX. Users must also disable TDX if they want to use ACPI S3 sleep. This feels a bit wonky and asymmetric, but it avoids adding any new command-line parameters for now. It can be improved if users hate it too much. Long version: TDX cannot survive from S3 and deeper states. The hardware resets and disables TDX completely when platform goes to S3 and deeper. Both TDX guests and the TDX module get destroyed permanently. The kernel uses S3 to support suspend-to-ram, and S4 or deeper states to support hibernation. The kernel also maintains TDX states to track whether it has been initialized and its metadata resource, etc. After resuming from S3 or hibernation, these TDX states won't be correct anymore. Theoretically, the kernel can do more complicated things like resetting TDX internal states and TDX module metadata before going to S3 or deeper, and re-initialize TDX module after resuming, etc, but there is no way to save/restore TDX guests for now. Until TDX supports full save and restore of TDX guests, there is no big value to handle TDX module in suspend and hibernation alone. To make things simple, just choose to make TDX mutually exclusive with S3 and hibernation. Note the TDX module is initialized at runtime. To avoid having to deal with the fuss of determining TDX state at runtime, just choose TDX vs S3 and hibernation at kernel early boot. It's a bad user experience if the choice of TDX and S3/hibernation is done at runtime anyway, i.e., the user can experience being able to do S3/hibernation but later becoming unable to due to TDX being enabled. Disable TDX in kernel early boot when hibernation support is available. Currently there's no mechanism exposed by the hibernation code to allow other kernel code to disable hibernation once for all. Users that want TDX must disable hibernation, like using hibername=no on the command line. Disable ACPI S3 when TDX is enabled by the BIOS. For now the user needs to disable TDX in the BIOS to use ACPI S3. A new kernel command line can be added in the future if there's a need to let user disable TDX host via kernel command line. Alternatively, the kernel could disable TDX when ACPI S3 is supported and request the user to disable S3 to use TDX. But there's no existing kernel command line to do that, and BIOS doesn't always have an option to disable S3. [ dhansen: subject / changelog tweaks ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-16-dave.hansen%40intel.com --- arch/x86/virt/vmx/tdx/tdx.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 48fb1b3101c0..6d030f64720b 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -1329,6 +1331,15 @@ void __init tdx_init(void) return; } + /* + * At this point, hibernation_available() indicates whether or + * not hibernation support has been permanently disabled. + */ + if (hibernation_available()) { + pr_err("initialization failed: Hibernation support is enabled\n"); + return; + } + err = register_memory_notifier(&tdx_memory_nb); if (err) { pr_err("initialization failed: register_memory_notifier() failed (%d)\n", @@ -1336,6 +1347,11 @@ void __init tdx_init(void) return; } +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) + pr_info("Disable ACPI S3. Turn off TDX in the BIOS to use ACPI S3.\n"); + acpi_suspend_lowlevel = NULL; +#endif + /* * Just use the first TDX KeyID as the 'global KeyID' and * leave the rest for TDX guests. -- cgit From 1e536e10689700e006989dea33918cce348e04b6 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:37 -0800 Subject: x86/cpu: Detect TDX partial write machine check erratum TDX memory has integrity and confidentiality protections. Violations of this integrity protection are supposed to only affect TDX operations and are never supposed to affect the host kernel itself. In other words, the host kernel should never, itself, see machine checks induced by the TDX integrity hardware. Alas, the first few generations of TDX hardware have an erratum. A partial write to a TDX private memory cacheline will silently "poison" the line. Subsequent reads will consume the poison and generate a machine check. According to the TDX hardware spec, neither of these things should have happened. Virtually all kernel memory accesses operations happen in full cachelines. In practice, writing a "byte" of memory usually reads a 64 byte cacheline of memory, modifies it, then writes the whole line back. Those operations do not trigger this problem. This problem is triggered by "partial" writes where a write transaction of less than cacheline lands at the memory controller. The CPU does these via non-temporal write instructions (like MOVNTI), or through UC/WC memory mappings. The issue can also be triggered away from the CPU by devices doing partial writes via DMA. With this erratum, there are additional things need to be done. To prepare for those changes, add a CPU bug bit to indicate this erratum. Note this bug reflects the hardware thus it is detected regardless of whether the kernel is built with TDX support or not. Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: David Hildenbrand Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-17-dave.hansen%40intel.com --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/virt/vmx/tdx/tdx.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 45ddc6b6baaa..dbec4a0cf36a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -496,6 +496,7 @@ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ +#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */ /* BUG word 2 */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 6d030f64720b..06b04b9c3236 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include "tdx.h" static u32 tdx_global_keyid __ro_after_init; @@ -1308,6 +1310,21 @@ static struct notifier_block tdx_memory_nb = { .notifier_call = tdx_memory_notifier, }; +static void __init check_tdx_erratum(void) +{ + /* + * These CPUs have an erratum. A partial write from non-TD + * software (e.g. via MOVNTI variants or UC/WC mapping) to TDX + * private memory poisons that memory, and a subsequent read of + * that memory triggers #MC. + */ + switch (boot_cpu_data.x86_model) { + case INTEL_FAM6_SAPPHIRERAPIDS_X: + case INTEL_FAM6_EMERALDRAPIDS_X: + setup_force_cpu_bug(X86_BUG_TDX_PW_MCE); + } +} + void __init tdx_init(void) { u32 tdx_keyid_start, nr_tdx_keyids; @@ -1361,4 +1378,6 @@ void __init tdx_init(void) tdx_nr_guest_keyids = nr_tdx_keyids - 1; setup_force_cpu_cap(X86_FEATURE_TDX_HOST_PLATFORM); + + check_tdx_erratum(); } -- cgit From 70060463cb2ba72bbb77c7ced503f53e09380f17 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 8 Dec 2023 09:07:38 -0800 Subject: x86/mce: Differentiate real hardware #MCs from TDX erratum ones The first few generations of TDX hardware have an erratum. Triggering it in Linux requires some kind of kernel bug involving relatively exotic memory writes to TDX private memory and will manifest via spurious-looking machine checks when reading the affected memory. Make an effort to detect these TDX-induced machine checks and spit out a new blurb to dmesg so folks do not think their hardware is failing. == Background == Virtually all kernel memory accesses operations happen in full cachelines. In practice, writing a "byte" of memory usually reads a 64 byte cacheline of memory, modifies it, then writes the whole line back. Those operations do not trigger this problem. This problem is triggered by "partial" writes where a write transaction of less than cacheline lands at the memory controller. The CPU does these via non-temporal write instructions (like MOVNTI), or through UC/WC memory mappings. The issue can also be triggered away from the CPU by devices doing partial writes via DMA. == Problem == A partial write to a TDX private memory cacheline will silently "poison" the line. Subsequent reads will consume the poison and generate a machine check. According to the TDX hardware spec, neither of these things should have happened. To add insult to injury, the Linux machine code will present these as a literal "Hardware error" when they were, in fact, a software-triggered issue. == Solution == In the end, this issue is hard to trigger. Rather than do something rash (and incomplete) like unmap TDX private memory from the direct map, improve the machine check handler. Currently, the #MC handler doesn't distinguish whether the memory is TDX private memory or not but just dump, for instance, below message: [...] mce: [Hardware Error]: CPU 147: Machine Check Exception: f Bank 1: bd80000000100134 [...] mce: [Hardware Error]: RIP 10: {__tlb_remove_page_size+0x10/0xa0} ... [...] mce: [Hardware Error]: Run the above through 'mcelog --ascii' [...] mce: [Hardware Error]: Machine check: Data load in unrecoverable area of kernel [...] Kernel panic - not syncing: Fatal local machine check Which says "Hardware Error" and "Data load in unrecoverable area of kernel". Ideally, it's better for the log to say "software bug around TDX private memory" instead of "Hardware Error". But in reality the real hardware memory error can happen, and sadly such software-triggered #MC cannot be distinguished from the real hardware error. Also, the error message is used by userspace tool 'mcelog' to parse, so changing the output may break userspace. So keep the "Hardware Error". The "Data load in unrecoverable area of kernel" is also helpful, so keep it too. Instead of modifying above error log, improve the error log by printing additional TDX related message to make the log like: ... [...] mce: [Hardware Error]: Machine check: Data load in unrecoverable area of kernel [...] mce: [Hardware Error]: Machine Check: TDX private memory error. Possible kernel bug. Adding this additional message requires determination of whether the memory page is TDX private memory. There is no existing infrastructure to do that. Add an interface to query the TDX module to fill this gap. == Impact == This issue requires some kind of kernel bug to trigger. TDX private memory should never be mapped UC/WC. A partial write originating from these mappings would require *two* bugs, first mapping the wrong page, then writing the wrong memory. It would also be detectable using traditional memory corruption techniques like DEBUG_PAGEALLOC. MOVNTI (and friends) could cause this issue with something like a simple buffer overrun or use-after-free on the direct map. It should also be detectable with normal debug techniques. The one place where this might get nasty would be if the CPU read data then wrote back the same data. That would trigger this problem but would not, for instance, set off mechanisms like slab redzoning because it doesn't actually corrupt data. With an IOMMU at least, the DMA exposure is similar to the UC/WC issue. TDX private memory would first need to be incorrectly mapped into the I/O space and then a later DMA to that mapping would actually cause the poisoning event. [ dhansen: changelog tweaks ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Reviewed-by: Tony Luck Link: https://lore.kernel.org/all/20231208170740.53979-18-dave.hansen%40intel.com --- arch/x86/include/asm/tdx.h | 4 ++ arch/x86/kernel/cpu/mce/core.c | 15 ++++++ arch/x86/virt/vmx/tdx/tdx.c | 109 +++++++++++++++++++++++++++++++++++++++++ arch/x86/virt/vmx/tdx/tdx.h | 5 ++ 4 files changed, 133 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index c54948e1999c..eba178996d84 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -32,6 +32,8 @@ #ifndef __ASSEMBLY__ +#include + /* * Used by the #VE exception handler to gather the #VE exception * info from the TDX module. This is a software only structure @@ -113,10 +115,12 @@ static inline u64 sc_retry(sc_func_t func, u64 fn, #define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) int tdx_cpu_enable(void); int tdx_enable(void); +const char *tdx_dump_mce_info(struct mce *m); #else static inline void tdx_init(void) { } static inline int tdx_cpu_enable(void) { return -ENODEV; } static inline int tdx_enable(void) { return -ENODEV; } +static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; } #endif /* CONFIG_INTEL_TDX_HOST */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 7b397370b4d6..00d2b6b1dc24 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "internal.h" @@ -228,11 +229,20 @@ static void wait_for_panic(void) panic("Panicing machine check CPU died"); } +static const char *mce_dump_aux_info(struct mce *m) +{ + if (boot_cpu_has_bug(X86_BUG_TDX_PW_MCE)) + return tdx_dump_mce_info(m); + + return NULL; +} + static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) { struct llist_node *pending; struct mce_evt_llist *l; int apei_err = 0; + const char *memmsg; /* * Allow instrumentation around external facilities usage. Not that it @@ -283,6 +293,11 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) } if (exp) pr_emerg(HW_ERR "Machine check: %s\n", exp); + + memmsg = mce_dump_aux_info(final); + if (memmsg) + pr_emerg(HW_ERR "Machine check: %s\n", memmsg); + if (!fake_panic) { if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 06b04b9c3236..4d6826a76f78 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include "tdx.h" static u32 tdx_global_keyid __ro_after_init; @@ -942,6 +944,13 @@ static int construct_tdmrs(struct list_head *tmb_list, if (ret) tdmrs_free_pamt_all(tdmr_list); + /* + * The tdmr_info_list is read-only from here on out. + * Ensure that these writes are seen by other CPUs. + * Pairs with a smp_rmb() in is_pamt_page(). + */ + smp_wmb(); + return ret; } @@ -1235,6 +1244,106 @@ int tdx_enable(void) } EXPORT_SYMBOL_GPL(tdx_enable); +static bool is_pamt_page(unsigned long phys) +{ + struct tdmr_info_list *tdmr_list = &tdx_tdmr_list; + int i; + + /* Ensure that all remote 'tdmr_list' writes are visible: */ + smp_rmb(); + + /* + * The TDX module is no longer returning TDX_SYS_NOT_READY and + * is initialized. The 'tdmr_list' was initialized long ago + * and is now read-only. + */ + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + unsigned long base, size; + + tdmr_get_pamt(tdmr_entry(tdmr_list, i), &base, &size); + + if (phys >= base && phys < (base + size)) + return true; + } + + return false; +} + +/* + * Return whether the memory page at the given physical address is TDX + * private memory or not. + * + * This can be imprecise for two known reasons: + * 1. PAMTs are private memory and exist before the TDX module is + * ready and TDH_PHYMEM_PAGE_RDMD works. This is a relatively + * short window that occurs once per boot. + * 2. TDH_PHYMEM_PAGE_RDMD reflects the TDX module's knowledge of the + * page. However, the page can still cause #MC until it has been + * fully converted to shared using 64-byte writes like MOVDIR64B. + * Buggy hosts might still leave #MC-causing memory in place which + * this function can not detect. + */ +static bool paddr_is_tdx_private(unsigned long phys) +{ + struct tdx_module_args args = { + .rcx = phys & PAGE_MASK, + }; + u64 sret; + + if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM)) + return false; + + /* Get page type from the TDX module */ + sret = __seamcall_ret(TDH_PHYMEM_PAGE_RDMD, &args); + + /* + * The SEAMCALL will not return success unless there is a + * working, "ready" TDX module. Assume an absence of TDX + * private pages until SEAMCALL is working. + */ + if (sret) + return false; + + /* + * SEAMCALL was successful -- read page type (via RCX): + * + * - PT_NDA: Page is not used by the TDX module + * - PT_RSVD: Reserved for Non-TDX use + * - Others: Page is used by the TDX module + * + * Note PAMT pages are marked as PT_RSVD but they are also TDX + * private memory. + */ + switch (args.rcx) { + case PT_NDA: + return false; + case PT_RSVD: + return is_pamt_page(phys); + default: + return true; + } +} + +/* + * Some TDX-capable CPUs have an erratum. A write to TDX private + * memory poisons that memory, and a subsequent read of that memory + * triggers #MC. + * + * Help distinguish erratum-triggered #MCs from a normal hardware one. + * Just print additional message to show such #MC may be result of the + * erratum. + */ +const char *tdx_dump_mce_info(struct mce *m) +{ + if (!m || !mce_is_memory_error(m) || !mce_usable_address(m)) + return NULL; + + if (!paddr_is_tdx_private(m->addr)) + return NULL; + + return "TDX private memory error. Possible kernel bug."; +} + static __init int record_keyid_partitioning(u32 *tdx_keyid_start, u32 *nr_tdx_keyids) { diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h index c0610f0bb88c..b701f69485d3 100644 --- a/arch/x86/virt/vmx/tdx/tdx.h +++ b/arch/x86/virt/vmx/tdx/tdx.h @@ -14,6 +14,7 @@ /* * TDX module SEAMCALL leaf functions */ +#define TDH_PHYMEM_PAGE_RDMD 24 #define TDH_SYS_KEY_CONFIG 31 #define TDH_SYS_INIT 33 #define TDH_SYS_RD 34 @@ -21,6 +22,10 @@ #define TDH_SYS_TDMR_INIT 36 #define TDH_SYS_CONFIG 45 +/* TDX page types */ +#define PT_NDA 0x0 +#define PT_RSVD 0x1 + /* * Global scope metadata field ID. * -- cgit From cb8eb06d50fcf4a478813a612f68c38cca45c742 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 8 Dec 2023 09:07:40 -0800 Subject: x86/virt/tdx: Disable TDX host support when kexec is enabled TDX host support currently lacks the ability to handle kexec. Disable TDX when kexec is enabled. Signed-off-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-20-dave.hansen%40intel.com --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e255d8ae5e77..01cdb16acff6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1973,6 +1973,7 @@ config INTEL_TDX_HOST depends on X86_X2APIC select ARCH_KEEP_MEMBLOCK depends on CONTIG_ALLOC + depends on !KEXEC_CORE help Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. This option enables necessary TDX -- cgit From 83e1bdc94f32dcf52dfcd2025acc7a2b9376b1e8 Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Thu, 14 Dec 2023 11:28:25 +1300 Subject: x86/virt/tdx: Make TDX host depend on X86_MCE A build failure was reported that when INTEL_TDX_HOST is enabled but X86_MCE is not, the tdx_dump_mce_info() function fails to link: ld: vmlinux.o: in function `tdx_dump_mce_info': ...: undefined reference to `mce_is_memory_error' ...: undefined reference to `mce_usable_address' The reason is in such configuration, despite there's no caller of tdx_dump_mce_info() it is still built and there's no implementation for the two "mce_*()" functions. Make INTEL_TDX_HOST depend on X86_MCE to fix. It makes sense to enable MCE support for the TDX host anyway. Because the only way that TDX has to report integrity errors is an MCE, and it is not good to silently ignore such MCE. The TDX spec also suggests the host VMM is expected to implement the MCE handler. Note it also makes sense to make INTEL_TDX_HOST select X86_MCE but this generates "recursive dependency detected!" error in the Kconfig. Closes: https://lore.kernel.org/all/20231212214612.GHZXjUpBFa1IwVMTI7@fat_crate.local/T/ Fixes: 70060463cb2b ("x86/mce: Differentiate real hardware #MCs from TDX erratum ones") Reported-by: Arnd Bergmann Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Link: https://lore.kernel.org/all/20231212214612.GHZXjUpBFa1IwVMTI7@fat_crate.local/T/#m1a109c29324b2bbd0b3b1d45c218012cd3a13be6 Link: https://lore.kernel.org/all/20231213222825.286809-1-kai.huang%40intel.com --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 01cdb16acff6..92c03cb99b3e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1974,6 +1974,7 @@ config INTEL_TDX_HOST select ARCH_KEEP_MEMBLOCK depends on CONTIG_ALLOC depends on !KEXEC_CORE + depends on X86_MCE help Intel Trust Domain Extensions (TDX) protects guest VMs from malicious host and certain physical attacks. This option enables necessary TDX -- cgit