diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-08-17 06:53:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-08-17 06:53:15 -0700 |
commit | 8d561baae505bab6b3f133e10dc48e27e4505cbe (patch) | |
tree | 445f78fea719ab249e48870e0b8dfb4b0cb86576 /arch/x86/kernel/cpu | |
parent | 0a9ee9ce49a66bfdf12e34130b45fafe170dfc84 (diff) | |
parent | ed6c4b657bca3b39f7b11cba1405931aeb490f3d (diff) |
Merge tag 'x86_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tipHEADmaster
Pull x86 fixes from Borislav Petkov:
- Remove a transitional asm/cpuid.h header which was added only as a
fallback during cpuid helpers reorg
- Initialize reserved fields in the SVSM page validation calls
structure to zero in order to allow for future structure extensions
- Have the sev-guest driver's buffers used in encryption operations be
in linear mapping space as the encryption operation can be offloaded
to an accelerator
- Have a read-only MSR write when in an AMD SNP guest trap to the
hypervisor as it is usually done. This makes the guest user
experience better by simply raising a #GP instead of terminating said
guest
- Do not output AVX512 elapsed time for kernel threads because the data
is wrong and fix a NULL pointer dereferencing in the process
- Adjust the SRSO mitigation selection to the new attack vectors
* tag 'x86_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpuid: Remove transitional <asm/cpuid.h> header
x86/sev: Ensure SVSM reserved fields in a page validation entry are initialized to zero
virt: sev-guest: Satisfy linear mapping requirement in get_derived_key()
x86/sev: Improve handling of writes to intercepted TSC MSRs
x86/fpu: Fix NULL dereference in avx512_status()
x86/bugs: Select best SRSO mitigation
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index b74bf937cd9f..2186a771b9fc 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -386,7 +386,6 @@ static bool __init should_mitigate_vuln(unsigned int bug) case X86_BUG_SPECTRE_V2: case X86_BUG_RETBLEED: - case X86_BUG_SRSO: case X86_BUG_L1TF: case X86_BUG_ITS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || @@ -3184,8 +3183,18 @@ static void __init srso_select_mitigation(void) } if (srso_mitigation == SRSO_MITIGATION_AUTO) { - if (should_mitigate_vuln(X86_BUG_SRSO)) { + /* + * Use safe-RET if user->kernel or guest->host protection is + * required. Otherwise the 'microcode' mitigation is sufficient + * to protect the user->user and guest->guest vectors. + */ + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || + (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && + !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; } else { srso_mitigation = SRSO_MITIGATION_NONE; return; |