summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/mmu.h87
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/kernel/cpufeature.c55
-rw-r--r--arch/arm64/kernel/setup.c7
4 files changed, 65 insertions, 88 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 1eec3971f0a9..e4d862420bb4 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -29,91 +29,11 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
-static inline bool arm64_kernel_unmapped_at_el0(void)
-{
- return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
- cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
-}
+extern bool arm64_use_ng_mappings;
-/*
- * This check is triggered during the early boot before the cpufeature
- * is initialised. Checking the status on the local CPU allows the boot
- * CPU to detect the need for non-global mappings and thus avoiding a
- * pagetable re-write after all the CPUs are booted. This check will be
- * anyway run on individual CPUs, allowing us to get the consistent
- * state once the SMP CPUs are up and thus make the switch to non-global
- * mappings if required.
- */
-static inline bool kaslr_requires_kpti(void)
-{
- bool tx1_bug;
- u64 ftr;
-
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return false;
-
- /*
- * E0PD does a similar job to KPTI so can be used instead
- * where available.
- */
- if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
- ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
- if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
- return false;
- }
-
- /*
- * Systems affected by Cavium erratum 24756 are incompatible
- * with KPTI.
- */
- if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- tx1_bug = false;
-#ifndef MODULE
- } else if (!static_branch_likely(&arm64_const_caps_ready)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- tx1_bug = is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus);
-#endif
- } else {
- tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
- }
- if (tx1_bug)
- return false;
-
- return kaslr_offset() > 0;
-}
-
-static inline bool arm64_kernel_use_ng_mappings(void)
+static inline bool arm64_kernel_unmapped_at_el0(void)
{
- /* What's a kpti? Use global mappings if we don't know. */
- if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
- return false;
-
- /*
- * Note: this function is called before the CPU capabilities have
- * been configured, so our early mappings will be global. If we
- * later determine that kpti is required, then
- * kpti_install_ng_mappings() will make them non-global.
- */
- if (arm64_kernel_unmapped_at_el0())
- return true;
-
- /*
- * Once we are far enough into boot for capabilities to be
- * ready we will have confirmed if we are using non-global
- * mappings so don't need to consider anything else here.
- */
- if (static_branch_likely(&arm64_const_caps_ready))
- return false;
-
- /*
- * KASLR is enabled so we're going to be enabling kpti on non-broken
- * CPUs regardless of their susceptibility to Meltdown. Rather
- * than force everybody to go through the G -> nG dance later on,
- * just put down non-global mappings from the beginning
- */
- return kaslr_requires_kpti();
+ return arm64_use_ng_mappings;
}
typedef void (*bp_hardening_cb_t)(void);
@@ -167,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
+extern bool kaslr_requires_kpti(void);
#define INIT_MM_CONTEXT(name) \
.pgd = init_pg_dir,
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 8dc6c5cdabe6..0a1fd95a8972 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -26,8 +26,8 @@
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
-#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 88aa5ab02926..4a031111ceb5 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,9 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
/* Need also bit for ARM64_CB_PATCH */
DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
@@ -966,6 +969,53 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+bool kaslr_requires_kpti(void)
+{
+ bool tx1_bug;
+ u64 ftr;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * E0PD does a similar job to KPTI so can be used instead
+ * where available.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+ ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
+ return false;
+ }
+
+ /*
+ * Systems affected by Cavium erratum 24756 are incompatible
+ * with KPTI.
+ */
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ tx1_bug = false;
+ } else if (!static_branch_likely(&arm64_const_caps_ready)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ tx1_bug = is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus);
+ } else {
+ tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
+ }
+ if (tx1_bug)
+ return false;
+
+ return kaslr_offset() > 0;
+}
+
static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
@@ -1044,7 +1094,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
kpti_remap_fn *remap_fn;
- static bool kpti_applied = false;
int cpu = smp_processor_id();
/*
@@ -1052,7 +1101,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
* it already or we have KASLR enabled and therefore have not
* created any global mappings at all.
*/
- if (kpti_applied || kaslr_offset() > 0)
+ if (arm64_use_ng_mappings)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -1062,7 +1111,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
cpu_uninstall_idmap();
if (!cpu)
- kpti_applied = true;
+ arm64_use_ng_mappings = true;
return;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 56f664561754..b6f9455d7ca3 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ /*
+ * If know now we are going to need KPTI then use non-global
+ * mappings from the start, avoiding the cost of rewriting
+ * everything later.
+ */
+ arm64_use_ng_mappings = kaslr_requires_kpti();
+
early_fixmap_init();
early_ioremap_init();