diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-21 09:00:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-21 09:00:31 -0800 |
commit | 13b6931c44969ff057ad2a3aa1c20a1b28b79b6f (patch) | |
tree | 1d8bca26b2eaf896bbdc31bba7de0c3a0e12e6dc /arch/x86/include/asm | |
parent | 254d7633103cfe2773fa001ca3ab9e5c1c7fa59a (diff) | |
parent | 0563ee35ae2c9cfb0c6a7b2c0ddf7d9372bb8a98 (diff) |
Merge tag 'x86_sev_for_v6.14_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV updates from Borislav Petkov:
- A segmented Reverse Map table (RMP) is a across-nodes distributed
table of sorts which contains per-node descriptors of each node-local
4K page, denoting its ownership (hypervisor, guest, etc) in the realm
of confidential computing. Add support for such a table in order to
improve referential locality when accessing or modifying RMP table
entries
- Add support for reading the TSC in SNP guests by removing any
interference or influence the hypervisor might have, with the goal of
making a confidential guest even more independent from the hypervisor
* tag 'x86_sev_for_v6.14_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/sev: Add the Secure TSC feature for SNP guests
x86/tsc: Init the TSC for Secure TSC guests
x86/sev: Mark the TSC in a secure TSC guest as reliable
x86/sev: Prevent RDTSC/RDTSCP interception for Secure TSC enabled guests
x86/sev: Prevent GUEST_TSC_FREQ MSR interception for Secure TSC enabled guests
x86/sev: Change TSC MSR behavior for Secure TSC enabled guests
x86/sev: Add Secure TSC support for SNP guests
x86/sev: Relocate SNP guest messaging routines to common code
x86/sev: Carve out and export SNP guest messaging init routines
virt: sev-guest: Replace GFP_KERNEL_ACCOUNT with GFP_KERNEL
virt: sev-guest: Remove is_vmpck_empty() helper
x86/sev/docs: Document the SNP Reverse Map Table (RMP)
x86/sev: Add full support for a segmented RMP table
x86/sev: Treat the contiguous RMP table as a single RMP segment
x86/sev: Map only the RMP table entries instead of the full RMP range
x86/sev: Move the SNP probe routine out of the way
x86/sev: Require the RMPREAD instruction after Zen4
x86/sev: Add support for the RMPREAD instruction
x86/sev: Prepare for using the RMPREAD instruction to access the RMP
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/sev-common.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/sev.h | 48 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 6 |
5 files changed, 51 insertions, 15 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0e2d81763615..2cac4e18cf39 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -451,6 +451,8 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */ #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */ +#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */ +#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */ #define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */ #define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3ae84c3b8e6d..9a71880eec07 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -608,6 +608,7 @@ #define MSR_AMD_PERF_CTL 0xc0010062 #define MSR_AMD_PERF_STATUS 0xc0010063 #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 +#define MSR_AMD64_GUEST_TSC_FREQ 0xc0010134 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD_PPIN_CTL 0xc00102f0 @@ -644,6 +645,7 @@ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ #define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b #define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f #define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV_ENABLED_BIT 0 @@ -682,11 +684,12 @@ #define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT) #define MSR_AMD64_SNP_RESV_BIT 18 #define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT) - -#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f - #define MSR_AMD64_RMP_BASE 0xc0010132 #define MSR_AMD64_RMP_END 0xc0010133 +#define MSR_AMD64_RMP_CFG 0xc0010136 +#define MSR_AMD64_SEG_RMP_ENABLED_BIT 0 +#define MSR_AMD64_SEG_RMP_ENABLED BIT_ULL(MSR_AMD64_SEG_RMP_ENABLED_BIT) +#define MSR_AMD64_RMP_SEGMENT_SHIFT(x) (((x) & GENMASK_ULL(13, 8)) >> 8) #define MSR_SVSM_CAA 0xc001f000 diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index 50f5666938c0..6ef92432a5ce 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -206,6 +206,7 @@ struct snp_psc_desc { #define GHCB_TERM_NO_SVSM 7 /* SVSM is not advertised in the secrets page */ #define GHCB_TERM_SVSM_VMPL0 8 /* SVSM is present but has set VMPL to 0 */ #define GHCB_TERM_SVSM_CAA 9 /* SVSM is present but CAA is not page aligned */ +#define GHCB_TERM_SECURE_TSC 10 /* Secure TSC initialization failed */ #define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK) diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 91f08af31078..5d9685f92e5c 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -14,6 +14,7 @@ #include <asm/insn.h> #include <asm/sev-common.h> #include <asm/coco.h> +#include <asm/set_memory.h> #define GHCB_PROTOCOL_MIN 1ULL #define GHCB_PROTOCOL_MAX 2ULL @@ -124,6 +125,9 @@ struct snp_req_data { #define AAD_LEN 48 #define MSG_HDR_VER 1 +#define SNP_REQ_MAX_RETRY_DURATION (60*HZ) +#define SNP_REQ_RETRY_DELAY (2*HZ) + /* See SNP spec SNP_GUEST_REQUEST section for the structure */ enum msg_type { SNP_MSG_TYPE_INVALID = 0, @@ -142,6 +146,9 @@ enum msg_type { SNP_MSG_VMRK_REQ, SNP_MSG_VMRK_RSP, + SNP_MSG_TSC_INFO_REQ = 17, + SNP_MSG_TSC_INFO_RSP, + SNP_MSG_TYPE_MAX }; @@ -170,9 +177,20 @@ struct snp_guest_msg { u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)]; } __packed; -struct sev_guest_platform_data { - u64 secrets_gpa; -}; +#define SNP_TSC_INFO_REQ_SZ 128 + +struct snp_tsc_info_req { + u8 rsvd[SNP_TSC_INFO_REQ_SZ]; +} __packed; + +struct snp_tsc_info_resp { + u32 status; + u32 rsvd1; + u64 tsc_scale; + u64 tsc_offset; + u32 tsc_factor; + u8 rsvd2[100]; +} __packed; struct snp_guest_req { void *req_buf; @@ -253,6 +271,7 @@ struct snp_msg_desc { u32 *os_area_msg_seqno; u8 *vmpck; + int vmpck_id; }; /* @@ -445,8 +464,6 @@ void snp_set_wakeup_secondary_cpu(void); bool snp_init(struct boot_params *bp); void __noreturn snp_abort(void); void snp_dmi_setup(void); -int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, - struct snp_guest_request_ioctl *rio); int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input); void snp_accept_memory(phys_addr_t start, phys_addr_t end); u64 snp_get_unsupported_features(u64 status); @@ -458,6 +475,15 @@ void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); void snp_kexec_finish(void); void snp_kexec_begin(void); +int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id); +struct snp_msg_desc *snp_msg_alloc(void); +void snp_msg_free(struct snp_msg_desc *mdesc); +int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, + struct snp_guest_request_ioctl *rio); + +void __init snp_secure_tsc_prepare(void); +void __init snp_secure_tsc_init(void); + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define snp_vmpl 0 @@ -480,11 +506,6 @@ static inline void snp_set_wakeup_secondary_cpu(void) { } static inline bool snp_init(struct boot_params *bp) { return false; } static inline void snp_abort(void) { } static inline void snp_dmi_setup(void) { } -static inline int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, - struct snp_guest_request_ioctl *rio) -{ - return -ENOTTY; -} static inline int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input) { return -ENOTTY; @@ -498,6 +519,13 @@ static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } static inline void snp_kexec_finish(void) { } static inline void snp_kexec_begin(void) { } +static inline int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id) { return -1; } +static inline struct snp_msg_desc *snp_msg_alloc(void) { return NULL; } +static inline void snp_msg_free(struct snp_msg_desc *mdesc) { } +static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, + struct snp_guest_request_ioctl *rio) { return -ENODEV; } +static inline void __init snp_secure_tsc_prepare(void) { } +static inline void __init snp_secure_tsc_init(void) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 2b59b9951c90..92e18798f197 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -417,7 +417,9 @@ struct sev_es_save_area { u8 reserved_0x298[80]; u32 pkru; u32 tsc_aux; - u8 reserved_0x2f0[24]; + u64 tsc_scale; + u64 tsc_offset; + u8 reserved_0x300[8]; u64 rcx; u64 rdx; u64 rbx; @@ -564,7 +566,7 @@ static inline void __unused_size_checks(void) BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298); - BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0); |