summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-07-17 08:27:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-07-17 08:27:30 -0700
commit59c80f053d50467758c8284348b463fa820b1b1f (patch)
tree350d6f5e2e02595c0ca5449a7050d73bf5602272
parent2eccaca7b62b2836260c6fb22156a44e3d99a74a (diff)
parentbcf163150cd37348a0cb59e95c916a83a9344b0e (diff)
Merge tag 'x86_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Improve the check whether the kernel supports WP mappings so that it can accomodate a XenPV guest due to how the latter is setting up the PAT machinery - Now that the retbleed nightmare is public, here's the first round of fallout fixes: * Fix a build failure on 32-bit due to missing include * Remove an untraining point in espfix64 return path * other small cleanups * tag 'x86_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/bugs: Remove apostrophe typo um: Add missing apply_returns() x86/entry: Remove UNTRAIN_RET from native_irq_return_ldt x86/bugs: Mark retbleed_strings static x86/pat: Fix x86_has_pat_wp() x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit
-rw-r--r--arch/um/kernel/um_arch.c4
-rw-r--r--arch/x86/entry/entry_64.S1
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/mm/init.c14
5 files changed, 19 insertions, 5 deletions
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 0760e24f2eba..9838967d0b2f 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -432,6 +432,10 @@ void apply_retpolines(s32 *start, s32 *end)
{
}
+void apply_returns(s32 *start, s32 *end)
+{
+}
+
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 285e043a3e40..9953d966d124 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -727,7 +727,6 @@ native_irq_return_ldt:
pushq %rdi /* Stash user RDI */
swapgs /* to kernel GS */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
- UNTRAIN_RET
movq PER_CPU_VAR(espfix_waddr), %rdi
movq %rax, (0*8)(%rdi) /* user RAX */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 0dd04713434b..aa34f908c39f 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -793,7 +793,7 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_IBPB,
};
-const char * const retbleed_strings[] = {
+static const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
@@ -1181,7 +1181,7 @@ spectre_v2_user_select_mitigation(void)
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
if (mode != SPECTRE_V2_USER_STRICT &&
mode != SPECTRE_V2_USER_STRICT_PREFERRED)
- pr_info("Selecting STIBP always-on mode to complement retbleed mitigation'\n");
+ pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index eb8656bac99b..9b7acc9c7874 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -23,6 +23,7 @@
#include <asm/cpufeatures.h>
#include <asm/percpu.h>
#include <asm/nops.h>
+#include <asm/nospec-branch.h>
#include <asm/bootparam.h>
#include <asm/export.h>
#include <asm/pgtable_32.h>
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d8cfce221275..57ba5502aecf 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -77,10 +77,20 @@ static uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
-/* Check that the write-protect PAT entry is set for write-protect */
+/*
+ * Check that the write-protect PAT entry is set for write-protect.
+ * To do this without making assumptions how PAT has been set up (Xen has
+ * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
+ * mode via the __cachemode2pte_tbl[] into protection bits (those protection
+ * bits will select a cache mode of WP or better), and then translate the
+ * protection bits back into the cache mode using __pte2cm_idx() and the
+ * __pte2cachemode_tbl[] array. This will return the really used cache mode.
+ */
bool x86_has_pat_wp(void)
{
- return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
+ uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
+
+ return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
}
enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)