summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/entry-armv.S79
-rw-r--r--arch/arm/kernel/entry-common.S24
-rw-r--r--arch/arm/kernel/kgdb.c36
-rw-r--r--arch/arm/kernel/spectre.c71
-rw-r--r--arch/arm/kernel/traps.c65
6 files changed, 257 insertions, 20 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ae295a3bcfef..6ef3b535b7bf 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -106,4 +106,6 @@ endif
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
+
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 5cd057859fe9..ee3f7a599181 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -1002,12 +1002,11 @@ vector_\name:
sub lr, lr, #\correction
.endif
- @
- @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
- @ (parent CPSR)
- @
+ @ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr} @ save r0, lr
- mrs lr, spsr
+
+ @ Save spsr_<exception> (parent CPSR)
+2: mrs lr, spsr
str lr, [sp, #8] @ save spsr
@
@@ -1028,6 +1027,44 @@ vector_\name:
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ .subsection 1
+ .align 5
+vector_bhb_loop8_\name:
+ .if \correction
+ sub lr, lr, #\correction
+ .endif
+
+ @ Save r0, lr_<exception> (parent PC)
+ stmia sp, {r0, lr}
+
+ @ bhb workaround
+ mov r0, #8
+3: b . + 4
+ subs r0, r0, #1
+ bne 3b
+ dsb
+ isb
+ b 2b
+ENDPROC(vector_bhb_loop8_\name)
+
+vector_bhb_bpiall_\name:
+ .if \correction
+ sub lr, lr, #\correction
+ .endif
+
+ @ Save r0, lr_<exception> (parent PC)
+ stmia sp, {r0, lr}
+
+ @ bhb workaround
+ mcr p15, 0, r0, c7, c5, 6 @ BPIALL
+ @ isb not needed due to "movs pc, lr" in the vector stub
+ @ which gives a "context synchronisation".
+ b 2b
+ENDPROC(vector_bhb_bpiall_\name)
+ .previous
+#endif
+
.align 2
@ handler addresses follow this label
1:
@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
.section .stubs, "ax", %progbits
@ This must be the first word
.word vector_swi
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ .word vector_bhb_loop8_swi
+ .word vector_bhb_bpiall_swi
+#endif
vector_rst:
ARM( swi SYS_ERROR0 )
@@ -1150,8 +1191,10 @@ vector_addrexcptn:
* FIQ "NMI" handler
*-----------------------------------------------------------------------------
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
- * systems.
+ * systems. This must be the last vector stub, so lets place it in its own
+ * subsection.
*/
+ .subsection 2
vector_stub fiq, FIQ_MODE, 4
.long __fiq_usr @ 0 (USR_26 / USR_32)
@@ -1184,6 +1227,30 @@ vector_addrexcptn:
W(b) vector_irq
W(b) vector_fiq
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ .section .vectors.bhb.loop8, "ax", %progbits
+.L__vectors_bhb_loop8_start:
+ W(b) vector_rst
+ W(b) vector_bhb_loop8_und
+ W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
+ W(b) vector_bhb_loop8_pabt
+ W(b) vector_bhb_loop8_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_bhb_loop8_irq
+ W(b) vector_bhb_loop8_fiq
+
+ .section .vectors.bhb.bpiall, "ax", %progbits
+.L__vectors_bhb_bpiall_start:
+ W(b) vector_rst
+ W(b) vector_bhb_bpiall_und
+ W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
+ W(b) vector_bhb_bpiall_pabt
+ W(b) vector_bhb_bpiall_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_bhb_bpiall_irq
+ W(b) vector_bhb_bpiall_fiq
+#endif
+
.data
.align 2
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index ac86c34682bb..dbc1913ee30b 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -154,12 +154,36 @@ ENDPROC(ret_from_fork)
*/
.align 5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ENTRY(vector_bhb_loop8_swi)
+ sub sp, sp, #PT_REGS_SIZE
+ stmia sp, {r0 - r12}
+ mov r8, #8
+1: b 2f
+2: subs r8, r8, #1
+ bne 1b
+ dsb
+ isb
+ b 3f
+ENDPROC(vector_bhb_loop8_swi)
+
+ .align 5
+ENTRY(vector_bhb_bpiall_swi)
+ sub sp, sp, #PT_REGS_SIZE
+ stmia sp, {r0 - r12}
+ mcr p15, 0, r8, c7, c5, 6 @ BPIALL
+ isb
+ b 3f
+ENDPROC(vector_bhb_bpiall_swi)
+#endif
+ .align 5
ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
+3:
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 7bd30c0a4280..22f937e6f3ff 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
return 0;
}
-static struct undef_hook kgdb_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_BREAKINST,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_brk_fn
};
-static struct undef_hook kgdb_compiled_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_BREAKINST & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_COMPILED_BREAK,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_compiled_brk_fn
};
+static struct undef_hook kgdb_compiled_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_COMPILED_BREAK & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_compiled_brk_fn
+};
+
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
@@ -210,8 +226,10 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
- register_undef_hook(&kgdb_brkpt_hook);
- register_undef_hook(&kgdb_compiled_brkpt_hook);
+ register_undef_hook(&kgdb_brkpt_arm_hook);
+ register_undef_hook(&kgdb_brkpt_thumb_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
return 0;
}
@@ -224,8 +242,10 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_undef_hook(&kgdb_brkpt_hook);
- unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+ unregister_undef_hook(&kgdb_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_brkpt_thumb_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
unregister_die_notifier(&kgdb_notifier);
}
diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
new file mode 100644
index 000000000000..0dcefc36fb7a
--- /dev/null
+++ b/arch/arm/kernel/spectre.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bpf.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/spectre.h>
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ return !sysctl_unprivileged_bpf_disabled;
+#else
+ return false;
+#endif
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+static unsigned int spectre_v2_state;
+static unsigned int spectre_v2_methods;
+
+void spectre_v2_update_state(unsigned int state, unsigned int method)
+{
+ if (state > spectre_v2_state)
+ spectre_v2_state = state;
+ spectre_v2_methods |= method;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *method;
+
+ if (spectre_v2_state == SPECTRE_UNAFFECTED)
+ return sprintf(buf, "%s\n", "Not affected");
+
+ if (spectre_v2_state != SPECTRE_MITIGATED)
+ return sprintf(buf, "%s\n", "Vulnerable");
+
+ if (_unprivileged_ebpf_enabled())
+ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+ switch (spectre_v2_methods) {
+ case SPECTRE_V2_METHOD_BPIALL:
+ method = "Branch predictor hardening";
+ break;
+
+ case SPECTRE_V2_METHOD_ICIALLU:
+ method = "I-cache invalidation";
+ break;
+
+ case SPECTRE_V2_METHOD_SMC:
+ case SPECTRE_V2_METHOD_HVC:
+ method = "Firmware call";
+ break;
+
+ case SPECTRE_V2_METHOD_LOOP8:
+ method = "History overwrite";
+ break;
+
+ default:
+ method = "Multiple mitigations";
+ break;
+ }
+
+ return sprintf(buf, "Mitigation: %s\n", method);
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index da04ed85855a..cae4a748811f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
+#include <asm/spectre.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
@@ -789,10 +790,59 @@ static inline void __init kuser_init(void *vectors)
}
#endif
+#ifndef CONFIG_CPU_V7M
+static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
+{
+ memcpy(vma, lma_start, lma_end - lma_start);
+}
+
+static void flush_vectors(void *vma, size_t offset, size_t size)
+{
+ unsigned long start = (unsigned long)vma + offset;
+ unsigned long end = start + size;
+
+ flush_icache_range(start, end);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+ extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+ extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+ void *vec_start, *vec_end;
+
+ if (system_state >= SYSTEM_FREEING_INITMEM) {
+ pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+ smp_processor_id());
+ return SPECTRE_VULNERABLE;
+ }
+
+ switch (method) {
+ case SPECTRE_V2_METHOD_LOOP8:
+ vec_start = __vectors_bhb_loop8_start;
+ vec_end = __vectors_bhb_loop8_end;
+ break;
+
+ case SPECTRE_V2_METHOD_BPIALL:
+ vec_start = __vectors_bhb_bpiall_start;
+ vec_end = __vectors_bhb_bpiall_end;
+ break;
+
+ default:
+ pr_err("CPU%u: unknown Spectre BHB state %d\n",
+ smp_processor_id(), method);
+ return SPECTRE_VULNERABLE;
+ }
+
+ copy_from_lma(vectors_page, vec_start, vec_end);
+ flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+ return SPECTRE_MITIGATED;
+}
+#endif
+
void __init early_trap_init(void *vectors_base)
{
-#ifndef CONFIG_CPU_V7M
- unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
unsigned i;
@@ -813,17 +863,20 @@ void __init early_trap_init(void *vectors_base)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
- memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+ copy_from_lma(vectors_base, __vectors_start, __vectors_end);
+ copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
+}
#else /* ifndef CONFIG_CPU_V7M */
+void __init early_trap_init(void *vectors_base)
+{
/*
* on V7-M there is no need to copy the vector table to a dedicated
* memory area. The address is configurable and so a table in the kernel
* image can be used.
*/
-#endif
}
+#endif