diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 465 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 14 |
4 files changed, 350 insertions, 150 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b750ac008b78..a5ece6ebe8a7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -530,9 +530,11 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) } bsp_determine_snp(c); - tsa_init(c); + if (cpu_has(c, X86_FEATURE_GP_ON_USER_CPUID)) + setup_force_cpu_cap(X86_FEATURE_CPUID_FAULT); + return; warn: @@ -974,6 +976,16 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); + + /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ + if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { + clear_cpu_cap(c, X86_FEATURE_RDSEED); + msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); + pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); + } + + /* Correct misconfigured CPUID on some clients. */ + clear_cpu_cap(c, X86_FEATURE_INVLPGB); } static void init_amd_zen3(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f4d3abb12317..b74bf937cd9f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -115,10 +115,9 @@ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; static void __init set_return_thunk(void *thunk) { - if (x86_return_thunk != __x86_return_thunk) - pr_warn("x86/bugs: return thunk changed\n"); - x86_return_thunk = thunk; + + pr_info("active return thunk: %ps\n", thunk); } /* Update SPEC_CTRL MSR and its cached copy unconditionally */ @@ -190,6 +189,39 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); +#undef pr_fmt +#define pr_fmt(fmt) "mitigations: " fmt + +static void __init cpu_print_attack_vectors(void) +{ + pr_info("Enabled attack vectors: "); + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) + pr_cont("user_kernel, "); + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) + pr_cont("user_user, "); + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) + pr_cont("guest_host, "); + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) + pr_cont("guest_guest, "); + + pr_cont("SMT mitigations: "); + + switch (smt_mitigations) { + case SMT_MITIGATIONS_OFF: + pr_cont("off\n"); + break; + case SMT_MITIGATIONS_AUTO: + pr_cont("auto\n"); + break; + case SMT_MITIGATIONS_ON: + pr_cont("on\n"); + } +} + void __init cpu_select_mitigations(void) { /* @@ -210,6 +242,8 @@ void __init cpu_select_mitigations(void) x86_arch_cap_msr = x86_read_arch_cap_msr(); + cpu_print_attack_vectors(); + /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); @@ -333,6 +367,62 @@ static void x86_amd_ssb_disable(void) #undef pr_fmt #define pr_fmt(fmt) "MDS: " fmt +/* + * Returns true if vulnerability should be mitigated based on the + * selected attack vector controls. + * + * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst + */ +static bool __init should_mitigate_vuln(unsigned int bug) +{ + switch (bug) { + /* + * The only runtime-selected spectre_v1 mitigations in the kernel are + * related to SWAPGS protection on kernel entry. Therefore, protection + * is only required for the user->kernel attack vector. + */ + case X86_BUG_SPECTRE_V1: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL); + + case X86_BUG_SPECTRE_V2: + case X86_BUG_RETBLEED: + case X86_BUG_SRSO: + case X86_BUG_L1TF: + case X86_BUG_ITS: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); + + case X86_BUG_SPECTRE_V2_USER: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); + + /* + * All the vulnerabilities below allow potentially leaking data + * across address spaces. Therefore, mitigation is required for + * any of these 4 attack vectors. + */ + case X86_BUG_MDS: + case X86_BUG_TAA: + case X86_BUG_MMIO_STALE_DATA: + case X86_BUG_RFDS: + case X86_BUG_SRBDS: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || + cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); + + case X86_BUG_GDS: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || + cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || + (smt_mitigations != SMT_MITIGATIONS_OFF); + default: + WARN(1, "Unknown bug %x\n", bug); + return false; + } +} + /* Default mitigation for MDS-affected CPUs */ static enum mds_mitigations mds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; @@ -386,13 +476,17 @@ static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; static void __init mds_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_MDS)) { mds_mitigation = MDS_MITIGATION_OFF; return; } - if (mds_mitigation == MDS_MITIGATION_AUTO) - mds_mitigation = MDS_MITIGATION_FULL; + if (mds_mitigation == MDS_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_MDS)) + mds_mitigation = MDS_MITIGATION_FULL; + else + mds_mitigation = MDS_MITIGATION_OFF; + } if (mds_mitigation == MDS_MITIGATION_OFF) return; @@ -402,7 +496,7 @@ static void __init mds_select_mitigation(void) static void __init mds_update_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_MDS)) return; /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ @@ -423,7 +517,7 @@ static void __init mds_apply_mitigation(void) mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && - (mds_nosmt || cpu_mitigations_auto_nosmt())) + (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); } } @@ -479,12 +573,13 @@ static void __init taa_select_mitigation(void) return; } - if (cpu_mitigations_off()) - taa_mitigation = TAA_MITIGATION_OFF; - /* Microcode will be checked in taa_update_mitigation(). */ - if (taa_mitigation == TAA_MITIGATION_AUTO) - taa_mitigation = TAA_MITIGATION_VERW; + if (taa_mitigation == TAA_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_TAA)) + taa_mitigation = TAA_MITIGATION_VERW; + else + taa_mitigation = TAA_MITIGATION_OFF; + } if (taa_mitigation != TAA_MITIGATION_OFF) verw_clear_cpu_buf_mitigation_selected = true; @@ -492,7 +587,7 @@ static void __init taa_select_mitigation(void) static void __init taa_update_mitigation(void) { - if (!taa_vulnerable() || cpu_mitigations_off()) + if (!taa_vulnerable()) return; if (verw_clear_cpu_buf_mitigation_selected) @@ -533,7 +628,7 @@ static void __init taa_apply_mitigation(void) */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); - if (taa_nosmt || cpu_mitigations_auto_nosmt()) + if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); } } @@ -579,8 +674,12 @@ static void __init mmio_select_mitigation(void) } /* Microcode will be checked in mmio_update_mitigation(). */ - if (mmio_mitigation == MMIO_MITIGATION_AUTO) - mmio_mitigation = MMIO_MITIGATION_VERW; + if (mmio_mitigation == MMIO_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_OFF; + } if (mmio_mitigation == MMIO_MITIGATION_OFF) return; @@ -595,7 +694,7 @@ static void __init mmio_select_mitigation(void) static void __init mmio_update_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return; if (verw_clear_cpu_buf_mitigation_selected) @@ -643,7 +742,7 @@ static void __init mmio_apply_mitigation(void) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) static_branch_enable(&cpu_buf_idle_clear); - if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); } @@ -684,13 +783,17 @@ static inline bool __init verw_clears_cpu_reg_file(void) static void __init rfds_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_RFDS)) { rfds_mitigation = RFDS_MITIGATION_OFF; return; } - if (rfds_mitigation == RFDS_MITIGATION_AUTO) - rfds_mitigation = RFDS_MITIGATION_VERW; + if (rfds_mitigation == RFDS_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_RFDS)) + rfds_mitigation = RFDS_MITIGATION_VERW; + else + rfds_mitigation = RFDS_MITIGATION_OFF; + } if (rfds_mitigation == RFDS_MITIGATION_OFF) return; @@ -701,7 +804,7 @@ static void __init rfds_select_mitigation(void) static void __init rfds_update_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_RFDS)) return; if (verw_clear_cpu_buf_mitigation_selected) @@ -802,13 +905,19 @@ void update_srbds_msr(void) static void __init srbds_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { srbds_mitigation = SRBDS_MITIGATION_OFF; return; } - if (srbds_mitigation == SRBDS_MITIGATION_AUTO) - srbds_mitigation = SRBDS_MITIGATION_FULL; + if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_SRBDS)) + srbds_mitigation = SRBDS_MITIGATION_FULL; + else { + srbds_mitigation = SRBDS_MITIGATION_OFF; + return; + } + } /* * Check to see if this is one of the MDS_NO systems supporting TSX that @@ -956,12 +1065,15 @@ static void __init gds_select_mitigation(void) return; } - if (cpu_mitigations_off()) - gds_mitigation = GDS_MITIGATION_OFF; /* Will verify below that mitigation _can_ be disabled */ - - if (gds_mitigation == GDS_MITIGATION_AUTO) - gds_mitigation = GDS_MITIGATION_FULL; + if (gds_mitigation == GDS_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_GDS)) + gds_mitigation = GDS_MITIGATION_FULL; + else { + gds_mitigation = GDS_MITIGATION_OFF; + return; + } + } /* No microcode */ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { @@ -1067,13 +1179,16 @@ static bool smap_works_speculatively(void) static void __init spectre_v1_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; + + if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; } static void __init spectre_v1_apply_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return; if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { @@ -1124,6 +1239,20 @@ early_param("nospectre_v1", nospectre_v1_cmdline); enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; +/* Depends on spectre_v2 mitigation selected already */ +static inline bool cdt_possible(enum spectre_v2_mitigation mode) +{ + if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || + !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) + return false; + + if (mode == SPECTRE_V2_RETPOLINE || + mode == SPECTRE_V2_EIBRS_RETPOLINE) + return true; + + return false; +} + #undef pr_fmt #define pr_fmt(fmt) "RETBleed: " fmt @@ -1162,6 +1291,21 @@ static enum retbleed_mitigation retbleed_mitigation __ro_after_init = static int __ro_after_init retbleed_nosmt = false; +enum srso_mitigation { + SRSO_MITIGATION_NONE, + SRSO_MITIGATION_AUTO, + SRSO_MITIGATION_UCODE_NEEDED, + SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, + SRSO_MITIGATION_MICROCODE, + SRSO_MITIGATION_NOSMT, + SRSO_MITIGATION_SAFE_RET, + SRSO_MITIGATION_IBPB, + SRSO_MITIGATION_IBPB_ON_VMEXIT, + SRSO_MITIGATION_BP_SPEC_REDUCE, +}; + +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; + static int __init retbleed_parse_cmdline(char *str) { if (!str) @@ -1204,7 +1348,7 @@ early_param("retbleed", retbleed_parse_cmdline); static void __init retbleed_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { retbleed_mitigation = RETBLEED_MITIGATION_NONE; return; } @@ -1241,6 +1385,11 @@ static void __init retbleed_select_mitigation(void) if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) return; + if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { + retbleed_mitigation = RETBLEED_MITIGATION_NONE; + return; + } + /* Intel mitigation selected in retbleed_update_mitigation() */ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { @@ -1251,35 +1400,36 @@ static void __init retbleed_select_mitigation(void) retbleed_mitigation = RETBLEED_MITIGATION_IBPB; else retbleed_mitigation = RETBLEED_MITIGATION_NONE; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + /* Final mitigation depends on spectre-v2 selection */ + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + else if (boot_cpu_has(X86_FEATURE_IBRS)) + retbleed_mitigation = RETBLEED_MITIGATION_IBRS; + else + retbleed_mitigation = RETBLEED_MITIGATION_NONE; } } static void __init retbleed_update_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) return; - if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) - goto out; + /* ITS can also enable stuffing */ + if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) + retbleed_mitigation = RETBLEED_MITIGATION_STUFF; - /* - * retbleed=stuff is only allowed on Intel. If stuffing can't be used - * then a different mitigation will be selected below. - * - * its=stuff will also attempt to enable stuffing. - */ - if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF || - its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) { - if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) { - pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); - retbleed_mitigation = RETBLEED_MITIGATION_AUTO; - } else { - if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) - pr_info("Retbleed mitigation updated to stuffing\n"); + /* If SRSO is using IBPB, that works for retbleed too */ + if (srso_mitigation == SRSO_MITIGATION_IBPB) + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; - retbleed_mitigation = RETBLEED_MITIGATION_STUFF; - } + if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && + !cdt_possible(spectre_v2_enabled)) { + pr_err("WARNING: retbleed=stuff depends on retpoline\n"); + retbleed_mitigation = RETBLEED_MITIGATION_NONE; } + /* * Let IBRS trump all on Intel without affecting the effects of the * retbleed= cmdline option except for call depth based stuffing @@ -1298,15 +1448,11 @@ static void __init retbleed_update_mitigation(void) if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) pr_err(RETBLEED_INTEL_MSG); } - /* If nothing has set the mitigation yet, default to NONE. */ - if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO) - retbleed_mitigation = RETBLEED_MITIGATION_NONE; } -out: + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); } - static void __init retbleed_apply_mitigation(void) { bool mitigate_smt = false; @@ -1362,7 +1508,7 @@ static void __init retbleed_apply_mitigation(void) } if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && - (retbleed_nosmt || cpu_mitigations_auto_nosmt())) + (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); } @@ -1407,13 +1553,17 @@ early_param("indirect_target_selection", its_parse_cmdline); static void __init its_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_ITS)) { its_mitigation = ITS_MITIGATION_OFF; return; } - if (its_mitigation == ITS_MITIGATION_AUTO) - its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; + if (its_mitigation == ITS_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_ITS)) + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; + else + its_mitigation = ITS_MITIGATION_OFF; + } if (its_mitigation == ITS_MITIGATION_OFF) return; @@ -1444,15 +1594,17 @@ static void __init its_select_mitigation(void) static void __init its_update_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_ITS)) return; switch (spectre_v2_enabled) { case SPECTRE_V2_NONE: - pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); + if (its_mitigation != ITS_MITIGATION_OFF) + pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); its_mitigation = ITS_MITIGATION_OFF; break; case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_EIBRS_RETPOLINE: /* Retpoline+CDT mitigates ITS */ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; @@ -1466,13 +1618,8 @@ static void __init its_update_mitigation(void) break; } - /* - * retbleed_update_mitigation() will try to do stuffing if its=stuff. - * If it can't, such as if spectre_v2!=retpoline, then fall back to - * aligned thunks. - */ if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && - retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + !cdt_possible(spectre_v2_enabled)) its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; pr_info("%s\n", its_strings[its_mitigation]); @@ -1480,15 +1627,24 @@ static void __init its_update_mitigation(void) static void __init its_apply_mitigation(void) { - /* its=stuff forces retbleed stuffing and is enabled there. */ - if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS) - return; - - if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) - setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); + switch (its_mitigation) { + case ITS_MITIGATION_OFF: + case ITS_MITIGATION_AUTO: + case ITS_MITIGATION_VMEXIT_ONLY: + break; + case ITS_MITIGATION_ALIGNED_THUNKS: + if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) + setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); - setup_force_cpu_cap(X86_FEATURE_RETHUNK); - set_return_thunk(its_return_thunk); + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + set_return_thunk(its_return_thunk); + break; + case ITS_MITIGATION_RETPOLINE_STUFF: + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); + set_return_thunk(call_depth_return_thunk); + break; + } } #undef pr_fmt @@ -1536,28 +1692,43 @@ early_param("tsa", tsa_parse_cmdline); static void __init tsa_select_mitigation(void) { - if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { + if (!boot_cpu_has_bug(X86_BUG_TSA)) { tsa_mitigation = TSA_MITIGATION_NONE; return; } + if (tsa_mitigation == TSA_MITIGATION_AUTO) { + bool vm = false, uk = false; + + tsa_mitigation = TSA_MITIGATION_NONE; + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || + cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) { + tsa_mitigation = TSA_MITIGATION_USER_KERNEL; + uk = true; + } + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { + tsa_mitigation = TSA_MITIGATION_VM; + vm = true; + } + + if (uk && vm) + tsa_mitigation = TSA_MITIGATION_FULL; + } + if (tsa_mitigation == TSA_MITIGATION_NONE) return; - if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) { + if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; - goto out; - } - - if (tsa_mitigation == TSA_MITIGATION_AUTO) - tsa_mitigation = TSA_MITIGATION_FULL; /* * No need to set verw_clear_cpu_buf_mitigation_selected - it * doesn't fit all cases here and it is not needed because this * is the only VERW-based mitigation on AMD. */ -out: pr_info("%s\n", tsa_strings[tsa_mitigation]); } @@ -1701,7 +1872,7 @@ static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) char arg[20]; int ret, i; - if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) + if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) return SPECTRE_V2_USER_CMD_NONE; ret = cmdline_find_option(boot_command_line, "spectre_v2_user", @@ -1739,6 +1910,13 @@ static void __init spectre_v2_user_select_mitigation(void) spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_AUTO: + if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) + break; + spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; + if (smt_mitigations == SMT_MITIGATIONS_OFF) + break; + spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; + break; case SPECTRE_V2_USER_CMD_PRCTL: spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; @@ -1890,8 +2068,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) int ret, i; cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; - if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || - cpu_mitigations_off()) + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) return SPECTRE_V2_CMD_NONE; ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); @@ -2094,11 +2271,20 @@ early_param("spectre_bhi", spectre_bhi_parse_cmdline); static void __init bhi_select_mitigation(void) { - if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off()) + if (!boot_cpu_has(X86_BUG_BHI)) bhi_mitigation = BHI_MITIGATION_OFF; - if (bhi_mitigation == BHI_MITIGATION_AUTO) - bhi_mitigation = BHI_MITIGATION_ON; + if (bhi_mitigation != BHI_MITIGATION_AUTO) + return; + + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { + if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) + bhi_mitigation = BHI_MITIGATION_ON; + else + bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; + } else { + bhi_mitigation = BHI_MITIGATION_OFF; + } } static void __init bhi_update_mitigation(void) @@ -2154,8 +2340,11 @@ static void __init spectre_v2_select_mitigation(void) case SPECTRE_V2_CMD_NONE: return; - case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: + if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) + break; + fallthrough; + case SPECTRE_V2_CMD_FORCE: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { spectre_v2_enabled = SPECTRE_V2_EIBRS; break; @@ -2209,7 +2398,7 @@ static void __init spectre_v2_update_mitigation(void) } } - if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off()) + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); } @@ -2861,17 +3050,23 @@ static void override_cache_bits(struct cpuinfo_x86 *c) static void __init l1tf_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_L1TF)) { l1tf_mitigation = L1TF_MITIGATION_OFF; return; } - if (l1tf_mitigation == L1TF_MITIGATION_AUTO) { - if (cpu_mitigations_auto_nosmt()) - l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; - else - l1tf_mitigation = L1TF_MITIGATION_FLUSH; + if (l1tf_mitigation != L1TF_MITIGATION_AUTO) + return; + + if (!should_mitigate_vuln(X86_BUG_L1TF)) { + l1tf_mitigation = L1TF_MITIGATION_OFF; + return; } + + if (smt_mitigations == SMT_MITIGATIONS_ON) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + else + l1tf_mitigation = L1TF_MITIGATION_FLUSH; } static void __init l1tf_apply_mitigation(void) @@ -2945,31 +3140,18 @@ early_param("l1tf", l1tf_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt -enum srso_mitigation { - SRSO_MITIGATION_NONE, - SRSO_MITIGATION_AUTO, - SRSO_MITIGATION_UCODE_NEEDED, - SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, - SRSO_MITIGATION_MICROCODE, - SRSO_MITIGATION_SAFE_RET, - SRSO_MITIGATION_IBPB, - SRSO_MITIGATION_IBPB_ON_VMEXIT, - SRSO_MITIGATION_BP_SPEC_REDUCE, -}; - static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", + [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" }; -static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; - static int __init srso_parse_cmdline(char *str) { if (!str) @@ -2996,35 +3178,44 @@ early_param("spec_rstack_overflow", srso_parse_cmdline); static void __init srso_select_mitigation(void) { - bool has_microcode; - - if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + if (!boot_cpu_has_bug(X86_BUG_SRSO)) { srso_mitigation = SRSO_MITIGATION_NONE; - - if (srso_mitigation == SRSO_MITIGATION_NONE) return; + } - if (srso_mitigation == SRSO_MITIGATION_AUTO) - srso_mitigation = SRSO_MITIGATION_SAFE_RET; - - has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); - if (has_microcode) { - /* - * Zen1/2 with SMT off aren't vulnerable after the right - * IBPB microcode has been applied. - */ - if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { - setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + if (srso_mitigation == SRSO_MITIGATION_AUTO) { + if (should_mitigate_vuln(X86_BUG_SRSO)) { + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else { srso_mitigation = SRSO_MITIGATION_NONE; return; } - } else { + } + + /* Zen1/2 with SMT off aren't vulnerable to SRSO. */ + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { + srso_mitigation = SRSO_MITIGATION_NOSMT; + return; + } + + if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); + + /* + * Safe-RET provides partial mitigation without microcode, but + * other mitigations require microcode to provide any + * mitigations. + */ + if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) + srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; + else + srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; } switch (srso_mitigation) { case SRSO_MITIGATION_SAFE_RET: + case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; goto ibpb_on_vmexit; @@ -3034,9 +3225,6 @@ static void __init srso_select_mitigation(void) pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); srso_mitigation = SRSO_MITIGATION_NONE; } - - if (!has_microcode) - srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; break; ibpb_on_vmexit: case SRSO_MITIGATION_IBPB_ON_VMEXIT: @@ -3051,9 +3239,6 @@ ibpb_on_vmexit: pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); srso_mitigation = SRSO_MITIGATION_NONE; } - - if (!has_microcode) - srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; break; default: break; @@ -3068,8 +3253,7 @@ static void __init srso_update_mitigation(void) srso_mitigation = SRSO_MITIGATION_IBPB; if (boot_cpu_has_bug(X86_BUG_SRSO) && - !cpu_mitigations_off() && - !boot_cpu_has(X86_FEATURE_SRSO_NO)) + !cpu_mitigations_off()) pr_info("%s\n", srso_strings[srso_mitigation]); } @@ -3365,9 +3549,6 @@ static ssize_t retbleed_show_state(char *buf) static ssize_t srso_show_state(char *buf) { - if (boot_cpu_has(X86_FEATURE_SRSO_NO)) - return sysfs_emit(buf, "Mitigation: SMT disabled\n"); - return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index fb50c1dd53ef..34a054181c4d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -26,6 +26,7 @@ #include <linux/pgtable.h> #include <linux/stackprotector.h> #include <linux/utsname.h> +#include <linux/efi.h> #include <asm/alternative.h> #include <asm/cmdline.h> @@ -2538,6 +2539,12 @@ void __init arch_cpu_finalize_init(void) fpu__init_cpu(); /* + * This needs to follow the FPU initializtion, since EFI depends on it. + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_enter_virtual_mode(); + + /* * Ensure that access to the per CPU representation has the initial * boot CPU configuration. */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index fe50eb5b7c4a..b92e09a87c69 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -17,8 +17,8 @@ #define pr_fmt(fmt) "microcode: " fmt -#include <linux/platform_device.h> #include <linux/stop_machine.h> +#include <linux/device/faux.h> #include <linux/syscore_ops.h> #include <linux/miscdevice.h> #include <linux/capability.h> @@ -249,7 +249,7 @@ static void reload_early_microcode(unsigned int cpu) } /* fake device for request_firmware */ -static struct platform_device *microcode_pdev; +static struct faux_device *microcode_fdev; #ifdef CONFIG_MICROCODE_LATE_LOADING /* @@ -690,7 +690,7 @@ static int load_late_locked(void) if (!setup_cpus()) return -EBUSY; - switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { + switch (microcode_ops->request_microcode_fw(0, µcode_fdev->dev)) { case UCODE_NEW: return load_late_stop_cpus(false); case UCODE_NEW_SAFE: @@ -841,9 +841,9 @@ static int __init microcode_init(void) if (early_data.new_rev) pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); - microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); - if (IS_ERR(microcode_pdev)) - return PTR_ERR(microcode_pdev); + microcode_fdev = faux_device_create("microcode", NULL, NULL); + if (!microcode_fdev) + return -ENODEV; dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { @@ -862,7 +862,7 @@ static int __init microcode_init(void) return 0; out_pdev: - platform_device_unregister(microcode_pdev); + faux_device_destroy(microcode_fdev); return error; } |