summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/process.c')
-rw-r--r--arch/arm64/kernel/process.c482
1 files changed, 374 insertions, 108 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c8989b999250..fba7ca102a8c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -6,9 +6,6 @@
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
*/
-
-#include <stdarg.h>
-
#include <linux/compat.h>
#include <linux/efi.h>
#include <linux/elf.h>
@@ -43,13 +40,16 @@
#include <linux/percpu.h>
#include <linux/thread_info.h>
#include <linux/prctl.h>
+#include <linux/stacktrace.h>
#include <asm/alternative.h>
+#include <asm/arch_timer.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
+#include <asm/gcs.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
@@ -60,7 +60,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
@@ -71,7 +71,7 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
cpu_die();
}
@@ -113,8 +113,7 @@ void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
/*
@@ -163,7 +162,7 @@ static void print_pstate(struct pt_regs *regs)
u64 pstate = regs->pstate;
if (compat_user_mode(regs)) {
- printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
pstate,
pstate & PSR_AA32_N_BIT ? 'N' : 'n',
pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
@@ -174,12 +173,14 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_AA32_E_BIT ? "BE" : "LE",
pstate & PSR_AA32_A_BIT ? 'A' : 'a',
pstate & PSR_AA32_I_BIT ? 'I' : 'i',
- pstate & PSR_AA32_F_BIT ? 'F' : 'f');
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f',
+ pstate & PSR_AA32_DIT_BIT ? '+' : '-',
+ pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
} else {
const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
PSR_BTYPE_SHIFT];
- printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
@@ -192,6 +193,8 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_PAN_BIT ? '+' : '-',
pstate & PSR_UAO_BIT ? '+' : '-',
pstate & PSR_TCO_BIT ? '+' : '-',
+ pstate & PSR_DIT_BIT ? '+' : '-',
+ pstate & PSR_SSBS_BIT ? '+' : '-',
btype_str);
}
}
@@ -216,7 +219,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
+ printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
@@ -225,7 +228,7 @@ void __show_regs(struct pt_regs *regs)
printk("sp : %016llx\n", sp);
if (system_uses_irq_prio_masking())
- printk("pmr_save: %08llx\n", regs->pmr_save);
+ printk("pmr: %08x\n", regs->pmr);
i = top_reg;
@@ -248,6 +251,8 @@ void show_regs(struct pt_regs *regs)
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
@@ -268,16 +273,69 @@ static void flush_tagged_addr_state(void)
clear_thread_flag(TIF_TAGGED_ADDR);
}
+static void flush_poe(void)
+{
+ if (!system_supports_poe())
+ return;
+
+ write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
+}
+
+#ifdef CONFIG_ARM64_GCS
+
+static void flush_gcs(void)
+{
+ if (!system_supports_gcs())
+ return;
+
+ current->thread.gcspr_el0 = 0;
+ current->thread.gcs_base = 0;
+ current->thread.gcs_size = 0;
+ current->thread.gcs_el0_mode = 0;
+ write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
+ write_sysreg_s(0, SYS_GCSPR_EL0);
+}
+
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ unsigned long gcs;
+
+ if (!system_supports_gcs())
+ return 0;
+
+ p->thread.gcs_base = 0;
+ p->thread.gcs_size = 0;
+
+ p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
+ p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
+
+ gcs = gcs_alloc_thread_stack(p, args);
+ if (IS_ERR_VALUE(gcs))
+ return PTR_ERR((void *)gcs);
+
+ return 0;
+}
+
+#else
+
+static void flush_gcs(void) { }
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ return 0;
+}
+
+#endif
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
-}
-
-void release_thread(struct task_struct *dead_task)
-{
+ flush_poe();
+ flush_gcs();
}
void arch_release_task_struct(struct task_struct *tsk)
@@ -287,24 +345,34 @@ void arch_release_task_struct(struct task_struct *tsk)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- if (current->mm)
- fpsimd_preserve_current_state();
- *dst = *src;
+ /*
+ * The current/src task's FPSIMD state may or may not be live, and may
+ * have been altered by ptrace after entry to the kernel. Save the
+ * effective FPSIMD state so that this will be copied into dst.
+ */
+ fpsimd_save_and_flush_current_state();
+ fpsimd_sync_from_effective_state(src);
- /* We rely on the above assignment to initialize dst's thread_flags: */
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+ *dst = *src;
/*
- * Detach src's sve_state (if any) from dst so that it does not
- * get erroneously used or freed prematurely. dst's sve_state
- * will be allocated on demand later on if dst uses SVE.
- * For consistency, also clear TIF_SVE here: this could be done
- * later in copy_process(), but to avoid tripping up future
- * maintainers it is best not to leave TIF_SVE and sve_state in
- * an inconsistent state, even temporarily.
+ * Drop stale reference to src's sve_state and convert dst to
+ * non-streaming FPSIMD mode.
*/
+ dst->thread.fp_type = FP_STATE_FPSIMD;
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ task_smstop_sm(dst);
+
+ /*
+ * Drop stale reference to src's sme_state and ensure dst has ZA
+ * disabled.
+ *
+ * When necessary, ZA will be inherited later in copy_thread_za().
+ */
+ dst->thread.sme_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr &= ~SVCR_ZA_MASK;
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -312,12 +380,40 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+static int copy_thread_za(struct task_struct *dst, struct task_struct *src)
+{
+ if (!thread_za_enabled(&src->thread))
+ return 0;
+
+ dst->thread.sve_state = kzalloc(sve_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sve_state)
+ return -ENOMEM;
+
+ dst->thread.sme_state = kmemdup(src->thread.sme_state,
+ sme_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sme_state) {
+ kfree(dst->thread.sve_state);
+ dst->thread.sve_state = NULL;
+ return -ENOMEM;
+ }
+
+ set_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr |= SVCR_ZA_MASK;
+
+ return 0;
+}
+
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ u64 clone_flags = args->flags;
+ unsigned long stack_start = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
+ int ret;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
@@ -332,7 +428,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
ptrauth_thread_init_kernel(p);
- if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+ if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -342,6 +438,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
+ if (system_supports_poe())
+ p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
@@ -350,11 +449,43 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
/*
+ * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and
+ * TPIDR2 need to be manipulated as a pair, and either both
+ * need to be inherited or both need to be reset.
+ *
+ * Within a process, child threads must not inherit their
+ * parent's TPIDR2 value or they may clobber their parent's
+ * stack at some later point.
+ *
+ * When a process is fork()'d, the child must inherit ZA and
+ * TPIDR2 from its parent in case there was dormant ZA state.
+ *
+ * Use CLONE_VM to determine when the child will share the
+ * address space with the parent, and cannot safely inherit the
+ * state.
+ */
+ if (system_supports_sme()) {
+ if (!(clone_flags & CLONE_VM)) {
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
+ ret = copy_thread_za(p, current);
+ if (ret)
+ return ret;
+ } else {
+ p->thread.tpidr2_el0 = 0;
+ WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK);
+ }
+ }
+
+ /*
* If a TLS pointer was passed to clone, use it for the new
* thread.
*/
if (clone_flags & CLONE_SETTLS)
p->thread.uw.tp_value = tls;
+
+ ret = copy_thread_gcs(p, args);
+ if (ret != 0)
+ return ret;
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
@@ -365,9 +496,13 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*/
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
+ childregs->stackframe.type = FRAME_META_TYPE_FINAL;
+
+ p->thread.cpu_context.x19 = (unsigned long)args->fn;
+ p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
- p->thread.cpu_context.x19 = stack_start;
- p->thread.cpu_context.x20 = stk_sz;
+ if (system_supports_poe())
+ p->thread.por_el0 = POR_EL0_INIT;
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
@@ -375,7 +510,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* For the benefit of the unwinder, set up childregs->stackframe
* as the final frame for the new task.
*/
- p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
+ p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe;
ptrace_hw_copy_thread(p);
@@ -385,6 +520,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2() && !is_compat_task())
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
@@ -393,10 +530,12 @@ static void tls_thread_switch(struct task_struct *next)
if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
- else if (!arm64_kernel_unmapped_at_el0())
+ else
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
/*
@@ -416,7 +555,7 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
- if (cpus_have_const_cap(ARM64_SSBS))
+ if (alternative_has_cap_unlikely(ARM64_SSBS))
return;
spectre_v4_enable_task_mitigation(next);
@@ -436,48 +575,118 @@ static void entry_task_switch(struct task_struct *next)
__this_cpu_write(__entry_task, next);
}
-/*
- * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
- * Assuming the virtual counter is enabled at the beginning of times:
- *
- * - disable access when switching from a 64bit task to a 32bit task
- * - enable access when switching from a 32bit task to a 64bit task
- */
-static void erratum_1418040_thread_switch(struct task_struct *prev,
- struct task_struct *next)
+#ifdef CONFIG_ARM64_GCS
+
+void gcs_preserve_current_state(void)
{
- bool prev32, next32;
- u64 val;
+ current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
+}
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
+static void gcs_thread_switch(struct task_struct *next)
+{
+ if (!system_supports_gcs())
return;
- prev32 = is_compat_thread(task_thread_info(prev));
- next32 = is_compat_thread(task_thread_info(next));
+ /* GCSPR_EL0 is always readable */
+ gcs_preserve_current_state();
+ write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0);
- if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
- return;
+ if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode)
+ gcs_set_el0_mode(next);
+
+ /*
+ * Ensure that GCS memory effects of the 'prev' thread are
+ * ordered before other memory accesses with release semantics
+ * (or preceded by a DMB) on the current PE. In addition, any
+ * memory accesses with acquire semantics (or succeeded by a
+ * DMB) are ordered before GCS memory effects of the 'next'
+ * thread. This will ensure that the GCS memory effects are
+ * visible to other PEs in case of migration.
+ */
+ if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next))
+ gcsb_dsync();
+}
+
+#else
- val = read_sysreg(cntkctl_el1);
+static void gcs_thread_switch(struct task_struct *next)
+{
+}
+
+#endif
+
+/*
+ * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
+ * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
+ * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is
+ * required or PR_TSC_SIGSEGV is set.
+ */
+static void update_cntkctl_el1(struct task_struct *next)
+{
+ struct thread_info *ti = task_thread_info(next);
+
+ if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) ||
+ has_erratum_handler(read_cntvct_el0) ||
+ (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ this_cpu_has_cap(ARM64_WORKAROUND_1418040) &&
+ is_compat_thread(ti)))
+ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
+ else
+ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
+}
- if (!next32)
- val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+static void cntkctl_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if ((read_ti_thread_flags(task_thread_info(prev)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)) !=
+ (read_ti_thread_flags(task_thread_info(next)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)))
+ update_cntkctl_el1(next);
+}
+
+static int do_set_tsc_mode(unsigned int val)
+{
+ bool tsc_sigsegv;
+
+ if (val == PR_TSC_SIGSEGV)
+ tsc_sigsegv = true;
+ else if (val == PR_TSC_ENABLE)
+ tsc_sigsegv = false;
else
- val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+ return -EINVAL;
+
+ preempt_disable();
+ update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv);
+ update_cntkctl_el1(current);
+ preempt_enable();
- write_sysreg(val, cntkctl_el1);
+ return 0;
}
-static void compat_thread_switch(struct task_struct *next)
+static void permission_overlay_switch(struct task_struct *next)
{
- if (!is_compat_thread(task_thread_info(next)))
+ if (!system_supports_poe())
return;
- if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
- set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+ current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+ if (current->thread.por_el0 != next->thread.por_el0) {
+ write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
+ /*
+ * No ISB required as we can tolerate spurious Overlay faults -
+ * the fault handler will check again based on the new value
+ * of POR_EL0.
+ */
+ }
}
-static void update_sctlr_el1(u64 sctlr)
+/*
+ * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
+ * this function must be called with preemption disabled and the update to
+ * sctlr_user must be made in the same preemption disabled block so that
+ * __switch_to() does not see the variable update before the SCTLR_EL1 one.
+ */
+void update_sctlr_el1(u64 sctlr)
{
/*
* EnIA must not be cleared while in the kernel as this is necessary for
@@ -489,23 +698,11 @@ static void update_sctlr_el1(u64 sctlr)
isb();
}
-void set_task_sctlr_el1(u64 sctlr)
-{
- /*
- * __switch_to() checks current->thread.sctlr as an
- * optimisation. Disable preemption so that it does not see
- * the variable update before the SCTLR_EL1 one.
- */
- preempt_disable();
- current->thread.sctlr_user = sctlr;
- update_sctlr_el1(sctlr);
- preempt_enable();
-}
-
/*
* Thread switching.
*/
-__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+__notrace_funcgraph __sched
+struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
@@ -516,15 +713,17 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
ssbs_thread_switch(next);
- erratum_1418040_thread_switch(prev, next);
+ cntkctl_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
- compat_thread_switch(next);
+ permission_overlay_switch(next);
+ gcs_thread_switch(next);
/*
- * Complete any pending TLB or cache maintenance on this CPU in case
- * the thread migrates to a different CPU.
- * This full barrier is also required by the membarrier system
- * call.
+ * Complete any pending TLB or cache maintenance on this CPU in case the
+ * thread migrates to a different CPU. This full barrier is also
+ * required by the membarrier system call. Additionally it makes any
+ * in-progress pgtable writes visible to the table walker; See
+ * emit_pte_barriers().
*/
dsb(ish);
@@ -544,41 +743,68 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
-unsigned long get_wchan(struct task_struct *p)
+struct wchan_info {
+ unsigned long pc;
+ int count;
+};
+
+static bool get_wchan_cb(void *arg, unsigned long pc)
{
- struct stackframe frame;
- unsigned long stack_page, ret = 0;
- int count = 0;
- if (!p || p == current || task_is_running(p))
- return 0;
+ struct wchan_info *wchan_info = arg;
- stack_page = (unsigned long)try_get_task_stack(p);
- if (!stack_page)
- return 0;
+ if (!in_sched_functions(pc)) {
+ wchan_info->pc = pc;
+ return false;
+ }
+ return wchan_info->count++ < 16;
+}
- start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
+unsigned long __get_wchan(struct task_struct *p)
+{
+ struct wchan_info wchan_info = {
+ .pc = 0,
+ .count = 0,
+ };
- do {
- if (unwind_frame(p, &frame))
- goto out;
- if (!in_sched_functions(frame.pc)) {
- ret = frame.pc;
- goto out;
- }
- } while (count++ < 16);
+ if (!try_get_task_stack(p))
+ return 0;
+
+ arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
-out:
put_task_stack(p);
- return ret;
+
+ return wchan_info.pc;
}
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
+#ifdef CONFIG_COMPAT
+int compat_elf_check_arch(const struct elf32_hdr *hdr)
+{
+ if (!system_supports_32bit_el0())
+ return false;
+
+ if ((hdr)->e_machine != EM_ARM)
+ return false;
+
+ if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
+ return false;
+
+ /*
+ * Prevent execve() of a 32-bit program from a deadline task
+ * if the restricted affinity mask would be inadmissible on an
+ * asymmetric system.
+ */
+ return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ !dl_task_check_affinity(current, system_32bit_el0_cpumask());
+}
+#endif
+
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
@@ -588,13 +814,26 @@ void arch_setup_new_exec(void)
if (is_compat_task()) {
mmflags = MMCF_AARCH32;
+
+ /*
+ * Restrict the CPU affinity mask for a 32-bit task so that
+ * it contains only 32-bit-capable CPUs.
+ *
+ * From the perspective of the task, this looks similar to
+ * what would happen if the 64-bit-only CPUs were hot-unplugged
+ * at the point of execve(), although we try a bit harder to
+ * honour the cpuset hierarchy.
+ */
if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
- set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ force_compatible_cpus_allowed_ptr(current);
+ } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
+ relax_compatible_cpus_allowed_ptr(current);
}
current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
+ do_set_tsc_mode(PR_TSC_ENABLE);
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
@@ -616,8 +855,13 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
if (is_compat_thread(ti))
return -EINVAL;
- if (system_supports_mte())
- valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
+ if (system_supports_mte()) {
+ valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
+ | PR_MTE_TAG_MASK;
+
+ if (cpus_have_cap(ARM64_MTE_STORE_ONLY))
+ valid_mask |= PR_MTE_STORE_ONLY;
+ }
if (arg & ~valid_mask)
return -EINVAL;
@@ -659,7 +903,7 @@ long get_tagged_addr_ctrl(struct task_struct *task)
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static struct ctl_table tagged_addr_sysctl_table[] = {
+static const struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
@@ -669,7 +913,6 @@ static struct ctl_table tagged_addr_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
static int __init tagged_addr_init(void)
@@ -703,3 +946,26 @@ int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
return prot;
}
#endif
+
+int get_tsc_mode(unsigned long adr)
+{
+ unsigned int val;
+
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TSC_SIGSEGV))
+ val = PR_TSC_SIGSEGV;
+ else
+ val = PR_TSC_ENABLE;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_tsc_mode(unsigned int val)
+{
+ if (is_compat_task())
+ return -EINVAL;
+
+ return do_set_tsc_mode(val);
+}