summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/acpi.c4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/bpi.S44
-rw-r--r--arch/arm64/kernel/cpu-reset.S2
-rw-r--r--arch/arm64/kernel/cpu_errata.c77
-rw-r--r--arch/arm64/kernel/cpufeature.c42
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate-asm.S4
-rw-r--r--arch/arm64/kernel/sleep.S2
10 files changed, 150 insertions, 88 deletions
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 252396a96c78..7b09487ff8fb 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -230,10 +230,10 @@ void __init acpi_boot_table_init(void)
done:
if (acpi_disabled) {
- if (earlycon_init_is_deferred)
+ if (earlycon_acpi_spcr_enable)
early_init_dt_scan_chosen_stdout();
} else {
- parse_spcr(earlycon_init_is_deferred);
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
}
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 67368c7329c0..66be504edb6c 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(raw_copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
index 76225c2611ea..e5de33513b5d 100644
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -17,6 +17,7 @@
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
.macro ventry target
.rept 31
@@ -53,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
vectors __kvm_hyp_vector
.endr
ENTRY(__bp_harden_hyp_vecs_end)
-ENTRY(__psci_hyp_bp_inval_start)
- sub sp, sp, #(8 * 18)
- stp x16, x17, [sp, #(16 * 0)]
- stp x14, x15, [sp, #(16 * 1)]
- stp x12, x13, [sp, #(16 * 2)]
- stp x10, x11, [sp, #(16 * 3)]
- stp x8, x9, [sp, #(16 * 4)]
- stp x6, x7, [sp, #(16 * 5)]
- stp x4, x5, [sp, #(16 * 6)]
- stp x2, x3, [sp, #(16 * 7)]
- stp x0, x1, [sp, #(16 * 8)]
- mov x0, #0x84000000
- smc #0
- ldp x16, x17, [sp, #(16 * 0)]
- ldp x14, x15, [sp, #(16 * 1)]
- ldp x12, x13, [sp, #(16 * 2)]
- ldp x10, x11, [sp, #(16 * 3)]
- ldp x8, x9, [sp, #(16 * 4)]
- ldp x6, x7, [sp, #(16 * 5)]
- ldp x4, x5, [sp, #(16 * 6)]
- ldp x2, x3, [sp, #(16 * 7)]
- ldp x0, x1, [sp, #(16 * 8)]
- add sp, sp, #(8 * 18)
-ENTRY(__psci_hyp_bp_inval_end)
ENTRY(__qcom_hyp_sanitize_link_stack_start)
stp x29, x30, [sp, #-16]!
@@ -85,3 +62,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start)
.endr
ldp x29, x30, [sp], #16
ENTRY(__qcom_hyp_sanitize_link_stack_end)
+
+.macro smccc_workaround_1 inst
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+ \inst #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ ldp x0, x1, [sp, #(8 * 2)]
+ add sp, sp, #(8 * 4)
+.endm
+
+ENTRY(__smccc_workaround_1_smc_start)
+ smccc_workaround_1 smc
+ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_1_hvc_start)
+ smccc_workaround_1 hvc
+ENTRY(__smccc_workaround_1_hvc_end)
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 2a752cb2a0f3..8021b46c9743 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -16,7 +16,7 @@
#include <asm/virt.h>
.text
-.pushsection .idmap.text, "ax"
+.pushsection .idmap.text, "awx"
/*
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ed6881882231..07823595b7f0 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -67,9 +67,12 @@ static int cpu_enable_trap_ctr_access(void *__unused)
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM
-extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
extern char __qcom_hyp_sanitize_link_stack_start[];
extern char __qcom_hyp_sanitize_link_stack_end[];
+extern char __smccc_workaround_1_smc_start[];
+extern char __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_1_hvc_start[];
+extern char __smccc_workaround_1_hvc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
@@ -112,10 +115,12 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
spin_unlock(&bp_lock);
}
#else
-#define __psci_hyp_bp_inval_start NULL
-#define __psci_hyp_bp_inval_end NULL
#define __qcom_hyp_sanitize_link_stack_start NULL
#define __qcom_hyp_sanitize_link_stack_end NULL
+#define __smccc_workaround_1_smc_start NULL
+#define __smccc_workaround_1_smc_end NULL
+#define __smccc_workaround_1_hvc_start NULL
+#define __smccc_workaround_1_hvc_end NULL
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
@@ -142,17 +147,59 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
+#include <uapi/linux/psci.h>
+#include <linux/arm-smccc.h>
#include <linux/psci.h>
-static int enable_psci_bp_hardening(void *data)
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static int enable_smccc_arch_workaround_1(void *data)
{
const struct arm64_cpu_capabilities *entry = data;
+ bp_hardening_cb_t cb;
+ void *smccc_start, *smccc_end;
+ struct arm_smccc_res res;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return 0;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ return 0;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_hvc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_hvc_start;
+ smccc_end = __smccc_workaround_1_hvc_end;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+
+ default:
+ return 0;
+ }
- if (psci_ops.get_version)
- install_bp_hardening_cb(entry,
- (bp_hardening_cb_t)psci_ops.get_version,
- __psci_hyp_bp_inval_start,
- __psci_hyp_bp_inval_end);
+ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
return 0;
}
@@ -333,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
@@ -362,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
#endif
{
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0fb6a3151443..29b1f873e337 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -856,12 +856,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int __unused)
{
+ char const *str = "command line option";
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- /* Forced on command line? */
+ /*
+ * For reasons that aren't entirely clear, enabling KPTI on Cavium
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+
+ /* Forced? */
if (__kpti_forced) {
- pr_info_once("kernel page table isolation forced %s by command line option\n",
- __kpti_forced > 0 ? "ON" : "OFF");
+ pr_info_once("kernel page table isolation forced %s by %s\n",
+ __kpti_forced > 0 ? "ON" : "OFF", str);
return __kpti_forced > 0;
}
@@ -881,6 +892,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
ID_AA64PFR0_CSV3_SHIFT);
}
+static int kpti_install_ng_mappings(void *__unused)
+{
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+ extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+ kpti_remap_fn *remap_fn;
+
+ static bool kpti_applied = false;
+ int cpu = smp_processor_id();
+
+ if (kpti_applied)
+ return 0;
+
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+ cpu_install_idmap();
+ remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ cpu_uninstall_idmap();
+
+ if (!cpu)
+ kpti_applied = true;
+
+ return 0;
+}
+
static int __init parse_kpti(char *str)
{
bool enabled;
@@ -1004,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.def_scope = SCOPE_SYSTEM,
.matches = unmap_kernel_at_el0,
+ .enable = kpti_install_ng_mappings,
},
#endif
{
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cccd2788e631..ec2ee720e33e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -167,10 +167,10 @@ alternative_else_nop_endif
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
- /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ /* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
@@ -382,6 +382,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
* x7 is reserved for the system call number in 32-bit mode.
*/
wsc_nr .req w25 // number of system calls
+xsc_nr .req x25 // number of system calls (zero-extended)
wscno .req w26 // syscall number
xscno .req x26 // syscall number (zero-extended)
stbl .req x27 // syscall table pointer
@@ -770,7 +771,10 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- enable_daif
+ enable_da_f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -828,6 +832,11 @@ el0_irq_naked:
#endif
ct_user_exit
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+1:
+#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -939,6 +948,7 @@ el0_svc_naked: // compat entry point
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
+ mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
@@ -1017,16 +1027,9 @@ alternative_else_nop_endif
orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
/*
- * We avoid running the post_ttbr_update_workaround here because the
- * user and kernel ASIDs don't have conflicting mappings, so any
- * "blessing" as described in:
- *
- * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
- *
- * will not hurt correctness. Whilst this may partially defeat the
- * point of using split ASIDs in the first place, it avoids
- * the hit of invalidating the entire I-cache on every return to
- * userspace.
+ * We avoid running the post_ttbr_update_workaround here because
+ * it's only needed by Cavium ThunderX, which requires KPTI to be
+ * disabled.
*/
.endm
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ba3ab04788dc..2b6b8b24e5ab 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -148,26 +148,6 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
- * Macro to arrange a physical address in a page table entry, taking care of
- * 52-bit addresses.
- *
- * Preserves: phys
- * Returns: pte
- */
- .macro phys_to_pte, phys, pte
-#ifdef CONFIG_ARM64_PA_BITS_52
- /*
- * We assume \phys is 64K aligned and this is guaranteed by only
- * supporting this configuration with 64K pages.
- */
- orr \pte, \phys, \phys, lsr #36
- and \pte, \pte, #PTE_ADDR_MASK
-#else
- mov \pte, \phys
-#endif
- .endm
-
-/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -181,7 +161,7 @@ ENDPROC(preserve_boot_args)
*/
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
add \tmp1, \tbl, #PAGE_SIZE
- phys_to_pte \tmp1, \tmp2
+ phys_to_pte \tmp2, \tmp1
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
lsr \tmp1, \virt, #\shift
sub \ptrs, \ptrs, #1
@@ -207,7 +187,7 @@ ENDPROC(preserve_boot_args)
* Returns: rtbl
*/
.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@: phys_to_pte \rtbl, \tmp1
+.Lpe\@: phys_to_pte \tmp1, \rtbl
orr \tmp1, \tmp1, \flags // tmp1 = table entry
str \tmp1, [\tbl, \index, lsl #3]
add \rtbl, \rtbl, \inc // rtbl = pa next level
@@ -475,7 +455,7 @@ ENDPROC(__primary_switched)
* end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region
*/
- .section ".idmap.text","ax"
+ .section ".idmap.text","awx"
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
@@ -776,8 +756,8 @@ ENTRY(__enable_mmu)
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir
- phys_to_ttbr x1, x3
- phys_to_ttbr x2, x4
+ phys_to_ttbr x3, x1
+ phys_to_ttbr x4, x2
msr ttbr0_el1, x3 // load TTBR0
msr ttbr1_el1, x4 // load TTBR1
isb
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 84f5d52fddda..dd14ab8c9f72 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -34,12 +34,12 @@
* each stage of the walk.
*/
.macro break_before_make_ttbr_switch zero_page, page_table, tmp
- phys_to_ttbr \zero_page, \tmp
+ phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
- phys_to_ttbr \page_table, \tmp
+ phys_to_ttbr \tmp, \page_table
msr ttbr1_el1, \tmp
isb
.endm
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 10dd16d7902d..bebec8ef9372 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
ret
ENDPROC(__cpu_suspend_enter)
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup