summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/process.c')
-rw-r--r--arch/arm64/kernel/process.c746
1 files changed, 607 insertions, 139 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a0f985a6ac50..fba7ca102a8c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -1,35 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/process.c
*
* Original Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-
-#include <stdarg.h>
-
#include <linux/compat.h>
#include <linux/efi.h>
+#include <linux/elf.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
+#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <linux/stddef.h>
+#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
@@ -49,20 +39,28 @@
#include <trace/events/power.h>
#include <linux/percpu.h>
#include <linux/thread_info.h>
+#include <linux/prctl.h>
+#include <linux/stacktrace.h>
#include <asm/alternative.h>
+#include <asm/arch_timer.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
+#include <asm/gcs.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
@@ -72,25 +70,8 @@ EXPORT_SYMBOL(__stack_chk_guard);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
-/*
- * This is our default idle handler.
- */
-void arch_cpu_idle(void)
-{
- /*
- * This should do all the clock switching and wait for interrupt
- * tricks
- */
- trace_cpu_idle_rcuidle(1, smp_processor_id());
- cpu_do_idle();
- local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
-}
-
#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
cpu_die();
}
@@ -103,11 +84,11 @@ void arch_cpu_idle_dead(void)
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
+ * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
- disable_nonboot_cpus();
+ smp_shutdown_nonboot_cpus(reboot_cpu);
}
/*
@@ -132,8 +113,7 @@ void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
/*
@@ -159,10 +139,7 @@ void machine_restart(char *cmd)
efi_reboot(reboot_mode, NULL);
/* Now call the architecture specific reboot code. */
- if (arm_pm_restart)
- arm_pm_restart(reboot_mode, cmd);
- else
- do_kernel_restart(cmd);
+ do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
@@ -171,12 +148,21 @@ void machine_restart(char *cmd)
while (1);
}
+#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
+static const char *const btypes[] = {
+ bstr(NONE, "--"),
+ bstr( JC, "jc"),
+ bstr( C, "-c"),
+ bstr( J , "j-")
+};
+#undef bstr
+
static void print_pstate(struct pt_regs *regs)
{
u64 pstate = regs->pstate;
if (compat_user_mode(regs)) {
- printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
pstate,
pstate & PSR_AA32_N_BIT ? 'N' : 'n',
pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
@@ -187,9 +173,14 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_AA32_E_BIT ? "BE" : "LE",
pstate & PSR_AA32_A_BIT ? 'A' : 'a',
pstate & PSR_AA32_I_BIT ? 'I' : 'i',
- pstate & PSR_AA32_F_BIT ? 'F' : 'f');
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f',
+ pstate & PSR_AA32_DIT_BIT ? '+' : '-',
+ pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
} else {
- printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
+ const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
+ PSR_BTYPE_SHIFT];
+
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
@@ -200,7 +191,11 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_I_BIT ? 'I' : 'i',
pstate & PSR_F_BIT ? 'F' : 'f',
pstate & PSR_PAN_BIT ? '+' : '-',
- pstate & PSR_UAO_BIT ? '+' : '-');
+ pstate & PSR_UAO_BIT ? '+' : '-',
+ pstate & PSR_TCO_BIT ? '+' : '-',
+ pstate & PSR_DIT_BIT ? '+' : '-',
+ pstate & PSR_SSBS_BIT ? '+' : '-',
+ btype_str);
}
}
@@ -224,7 +219,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)lr);
+ printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
@@ -232,30 +227,32 @@ void __show_regs(struct pt_regs *regs)
printk("sp : %016llx\n", sp);
+ if (system_uses_irq_prio_masking())
+ printk("pmr: %08x\n", regs->pmr);
+
i = top_reg;
while (i >= 0) {
- printk("x%-2d: %016llx ", i, regs->regs[i]);
- i--;
+ printk("x%-2d: %016llx", i, regs->regs[i]);
- if (i % 2 == 0) {
- pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
- i--;
- }
+ while (i-- % 3)
+ pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
pr_cont("\n");
}
}
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
- dump_backtrace(regs, NULL);
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
}
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
@@ -270,15 +267,75 @@ static void tls_thread_flush(void)
}
}
+static void flush_tagged_addr_state(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+ clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
+static void flush_poe(void)
+{
+ if (!system_supports_poe())
+ return;
+
+ write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
+}
+
+#ifdef CONFIG_ARM64_GCS
+
+static void flush_gcs(void)
+{
+ if (!system_supports_gcs())
+ return;
+
+ current->thread.gcspr_el0 = 0;
+ current->thread.gcs_base = 0;
+ current->thread.gcs_size = 0;
+ current->thread.gcs_el0_mode = 0;
+ write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
+ write_sysreg_s(0, SYS_GCSPR_EL0);
+}
+
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ unsigned long gcs;
+
+ if (!system_supports_gcs())
+ return 0;
+
+ p->thread.gcs_base = 0;
+ p->thread.gcs_size = 0;
+
+ p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
+ p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
+
+ gcs = gcs_alloc_thread_stack(p, args);
+ if (IS_ERR_VALUE(gcs))
+ return PTR_ERR((void *)gcs);
+
+ return 0;
+}
+
+#else
+
+static void flush_gcs(void) { }
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ return 0;
+}
+
+#endif
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
-}
-
-void release_thread(struct task_struct *dead_task)
-{
+ flush_tagged_addr_state();
+ flush_poe();
+ flush_gcs();
}
void arch_release_task_struct(struct task_struct *tsk)
@@ -286,42 +343,81 @@ void arch_release_task_struct(struct task_struct *tsk)
fpsimd_release_task(tsk);
}
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied. We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead. This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- if (current->mm)
- fpsimd_preserve_current_state();
+ /*
+ * The current/src task's FPSIMD state may or may not be live, and may
+ * have been altered by ptrace after entry to the kernel. Save the
+ * effective FPSIMD state so that this will be copied into dst.
+ */
+ fpsimd_save_and_flush_current_state();
+ fpsimd_sync_from_effective_state(src);
+
*dst = *src;
+ /*
+ * Drop stale reference to src's sve_state and convert dst to
+ * non-streaming FPSIMD mode.
+ */
+ dst->thread.fp_type = FP_STATE_FPSIMD;
+ dst->thread.sve_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SVE);
+ task_smstop_sm(dst);
+
+ /*
+ * Drop stale reference to src's sme_state and ensure dst has ZA
+ * disabled.
+ *
+ * When necessary, ZA will be inherited later in copy_thread_za().
+ */
+ dst->thread.sme_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr &= ~SVCR_ZA_MASK;
+
+ /* clear any pending asynchronous tag fault raised by the parent */
+ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
+
+ return 0;
+}
+
+static int copy_thread_za(struct task_struct *dst, struct task_struct *src)
+{
+ if (!thread_za_enabled(&src->thread))
+ return 0;
+
+ dst->thread.sve_state = kzalloc(sve_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sve_state)
+ return -ENOMEM;
+
+ dst->thread.sme_state = kmemdup(src->thread.sme_state,
+ sme_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sme_state) {
+ kfree(dst->thread.sve_state);
+ dst->thread.sve_state = NULL;
+ return -ENOMEM;
+ }
+
+ set_tsk_thread_flag(dst, TIF_SME);
+ dst->thread.svcr |= SVCR_ZA_MASK;
+
return 0;
}
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ u64 clone_flags = args->flags;
+ unsigned long stack_start = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
+ int ret;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
/*
- * Unalias p->thread.sve_state (if any) from the parent task
- * and disable discard SVE state for p:
- */
- clear_tsk_thread_flag(p, TIF_SVE);
- p->thread.sve_state = NULL;
-
- /*
* In case p was allocated the same task_struct pointer as some
* other recently-exited task, make sure p is disassociated from
* any cpu that may have run that now-exited task recently.
@@ -330,7 +426,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*/
fpsimd_flush_task_state(p);
- if (likely(!(p->flags & PF_KTHREAD))) {
+ ptrauth_thread_init_kernel(p);
+
+ if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -340,6 +438,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
+ if (system_supports_poe())
+ p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
@@ -348,26 +449,68 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
/*
- * If a TLS pointer was passed to clone (4th argument), use it
- * for the new thread.
+ * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and
+ * TPIDR2 need to be manipulated as a pair, and either both
+ * need to be inherited or both need to be reset.
+ *
+ * Within a process, child threads must not inherit their
+ * parent's TPIDR2 value or they may clobber their parent's
+ * stack at some later point.
+ *
+ * When a process is fork()'d, the child must inherit ZA and
+ * TPIDR2 from its parent in case there was dormant ZA state.
+ *
+ * Use CLONE_VM to determine when the child will share the
+ * address space with the parent, and cannot safely inherit the
+ * state.
+ */
+ if (system_supports_sme()) {
+ if (!(clone_flags & CLONE_VM)) {
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
+ ret = copy_thread_za(p, current);
+ if (ret)
+ return ret;
+ } else {
+ p->thread.tpidr2_el0 = 0;
+ WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK);
+ }
+ }
+
+ /*
+ * If a TLS pointer was passed to clone, use it for the new
+ * thread.
*/
if (clone_flags & CLONE_SETTLS)
- p->thread.uw.tp_value = childregs->regs[3];
+ p->thread.uw.tp_value = tls;
+
+ ret = copy_thread_gcs(p, args);
+ if (ret != 0)
+ return ret;
} else {
+ /*
+ * A kthread has no context to ERET to, so ensure any buggy
+ * ERET is treated as an illegal exception return.
+ *
+ * When a user task is created from a kthread, childregs will
+ * be initialized by start_thread() or start_compat_thread().
+ */
memset(childregs, 0, sizeof(struct pt_regs));
- childregs->pstate = PSR_MODE_EL1h;
- if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_const_cap(ARM64_HAS_UAO))
- childregs->pstate |= PSR_UAO_BIT;
+ childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
+ childregs->stackframe.type = FRAME_META_TYPE_FINAL;
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- childregs->pstate |= PSR_SSBS_BIT;
+ p->thread.cpu_context.x19 = (unsigned long)args->fn;
+ p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
- p->thread.cpu_context.x19 = stack_start;
- p->thread.cpu_context.x20 = stk_sz;
+ if (system_supports_poe())
+ p->thread.por_el0 = POR_EL0_INIT;
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
+ /*
+ * For the benefit of the unwinder, set up childregs->stackframe
+ * as the final frame for the new task.
+ */
+ p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe;
ptrace_hw_copy_thread(p);
@@ -377,6 +520,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2() && !is_compat_task())
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
@@ -385,21 +530,35 @@ static void tls_thread_switch(struct task_struct *next)
if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
- else if (!arm64_kernel_unmapped_at_el0())
+ else
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
-/* Restore the UAO state depending on next's addr_limit */
-void uao_thread_switch(struct task_struct *next)
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
{
- if (IS_ENABLED(CONFIG_ARM64_UAO)) {
- if (task_thread_info(next)->addr_limit == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
- }
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /*
+ * If all CPUs implement the SSBS extension, then we just need to
+ * context-switch the PSTATE field.
+ */
+ if (alternative_has_cap_unlikely(ARM64_SSBS))
+ return;
+
+ spectre_v4_enable_task_mitigation(next);
}
/*
@@ -416,10 +575,134 @@ static void entry_task_switch(struct task_struct *next)
__this_cpu_write(__entry_task, next);
}
+#ifdef CONFIG_ARM64_GCS
+
+void gcs_preserve_current_state(void)
+{
+ current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
+}
+
+static void gcs_thread_switch(struct task_struct *next)
+{
+ if (!system_supports_gcs())
+ return;
+
+ /* GCSPR_EL0 is always readable */
+ gcs_preserve_current_state();
+ write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0);
+
+ if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode)
+ gcs_set_el0_mode(next);
+
+ /*
+ * Ensure that GCS memory effects of the 'prev' thread are
+ * ordered before other memory accesses with release semantics
+ * (or preceded by a DMB) on the current PE. In addition, any
+ * memory accesses with acquire semantics (or succeeded by a
+ * DMB) are ordered before GCS memory effects of the 'next'
+ * thread. This will ensure that the GCS memory effects are
+ * visible to other PEs in case of migration.
+ */
+ if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next))
+ gcsb_dsync();
+}
+
+#else
+
+static void gcs_thread_switch(struct task_struct *next)
+{
+}
+
+#endif
+
+/*
+ * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
+ * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
+ * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is
+ * required or PR_TSC_SIGSEGV is set.
+ */
+static void update_cntkctl_el1(struct task_struct *next)
+{
+ struct thread_info *ti = task_thread_info(next);
+
+ if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) ||
+ has_erratum_handler(read_cntvct_el0) ||
+ (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ this_cpu_has_cap(ARM64_WORKAROUND_1418040) &&
+ is_compat_thread(ti)))
+ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
+ else
+ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
+}
+
+static void cntkctl_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if ((read_ti_thread_flags(task_thread_info(prev)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)) !=
+ (read_ti_thread_flags(task_thread_info(next)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)))
+ update_cntkctl_el1(next);
+}
+
+static int do_set_tsc_mode(unsigned int val)
+{
+ bool tsc_sigsegv;
+
+ if (val == PR_TSC_SIGSEGV)
+ tsc_sigsegv = true;
+ else if (val == PR_TSC_ENABLE)
+ tsc_sigsegv = false;
+ else
+ return -EINVAL;
+
+ preempt_disable();
+ update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv);
+ update_cntkctl_el1(current);
+ preempt_enable();
+
+ return 0;
+}
+
+static void permission_overlay_switch(struct task_struct *next)
+{
+ if (!system_supports_poe())
+ return;
+
+ current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+ if (current->thread.por_el0 != next->thread.por_el0) {
+ write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
+ /*
+ * No ISB required as we can tolerate spurious Overlay faults -
+ * the fault handler will check again based on the new value
+ * of POR_EL0.
+ */
+ }
+}
+
+/*
+ * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
+ * this function must be called with preemption disabled and the update to
+ * sctlr_user must be made in the same preemption disabled block so that
+ * __switch_to() does not see the variable update before the SCTLR_EL1 one.
+ */
+void update_sctlr_el1(u64 sctlr)
+{
+ /*
+ * EnIA must not be cleared while in the kernel as this is necessary for
+ * in-kernel PAC. It will be cleared on kernel exit if needed.
+ */
+ sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
+
+ /* ISB required for the kernel uaccess routines when setting TCF0. */
+ isb();
+}
+
/*
* Thread switching.
*/
-__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+__notrace_funcgraph __sched
+struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
@@ -429,75 +712,260 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
entry_task_switch(next);
- uao_thread_switch(next);
- ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
+ cntkctl_thread_switch(prev, next);
+ ptrauth_thread_switch_user(next);
+ permission_overlay_switch(next);
+ gcs_thread_switch(next);
/*
- * Complete any pending TLB or cache maintenance on this CPU in case
- * the thread migrates to a different CPU.
- * This full barrier is also required by the membarrier system
- * call.
+ * Complete any pending TLB or cache maintenance on this CPU in case the
+ * thread migrates to a different CPU. This full barrier is also
+ * required by the membarrier system call. Additionally it makes any
+ * in-progress pgtable writes visible to the table walker; See
+ * emit_pte_barriers().
*/
dsb(ish);
+ /*
+ * MTE thread switching must happen after the DSB above to ensure that
+ * any asynchronous tag check faults have been logged in the TFSR*_EL1
+ * registers.
+ */
+ mte_thread_switch(next);
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (prev->thread.sctlr_user != next->thread.sctlr_user)
+ update_sctlr_el1(next->thread.sctlr_user);
+
/* the actual thread switch */
last = cpu_switch_to(prev, next);
return last;
}
-unsigned long get_wchan(struct task_struct *p)
+struct wchan_info {
+ unsigned long pc;
+ int count;
+};
+
+static bool get_wchan_cb(void *arg, unsigned long pc)
{
- struct stackframe frame;
- unsigned long stack_page, ret = 0;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
+ struct wchan_info *wchan_info = arg;
+
+ if (!in_sched_functions(pc)) {
+ wchan_info->pc = pc;
+ return false;
+ }
+ return wchan_info->count++ < 16;
+}
- stack_page = (unsigned long)try_get_task_stack(p);
- if (!stack_page)
+unsigned long __get_wchan(struct task_struct *p)
+{
+ struct wchan_info wchan_info = {
+ .pc = 0,
+ .count = 0,
+ };
+
+ if (!try_get_task_stack(p))
return 0;
- frame.fp = thread_saved_fp(p);
- frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
- do {
- if (unwind_frame(p, &frame))
- goto out;
- if (!in_sched_functions(frame.pc)) {
- ret = frame.pc;
- goto out;
- }
- } while (count ++ < 16);
+ arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
-out:
put_task_stack(p);
- return ret;
+
+ return wchan_info.pc;
}
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+#ifdef CONFIG_COMPAT
+int compat_elf_check_arch(const struct elf32_hdr *hdr)
{
- if (is_compat_task())
- return randomize_page(mm->brk, SZ_32M);
- else
- return randomize_page(mm->brk, SZ_1G);
+ if (!system_supports_32bit_el0())
+ return false;
+
+ if ((hdr)->e_machine != EM_ARM)
+ return false;
+
+ if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
+ return false;
+
+ /*
+ * Prevent execve() of a 32-bit program from a deadline task
+ * if the restricted affinity mask would be inadmissible on an
+ * asymmetric system.
+ */
+ return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ !dl_task_check_affinity(current, system_32bit_el0_cpumask());
}
+#endif
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
void arch_setup_new_exec(void)
{
- current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+ unsigned long mmflags = 0;
+
+ if (is_compat_task()) {
+ mmflags = MMCF_AARCH32;
+
+ /*
+ * Restrict the CPU affinity mask for a 32-bit task so that
+ * it contains only 32-bit-capable CPUs.
+ *
+ * From the perspective of the task, this looks similar to
+ * what would happen if the 64-bit-only CPUs were hot-unplugged
+ * at the point of execve(), although we try a bit harder to
+ * honour the cpuset hierarchy.
+ */
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ force_compatible_cpus_allowed_ptr(current);
+ } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
+ relax_compatible_cpus_allowed_ptr(current);
+ }
+
+ current->mm->context.flags = mmflags;
+ ptrauth_thread_init_user();
+ mte_thread_init_user();
+ do_set_tsc_mode(PR_TSC_ENABLE);
+
+ if (task_spec_ssb_noexec(current)) {
+ arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+ PR_SPEC_ENABLE);
+ }
+}
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
+{
+ unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ if (system_supports_mte()) {
+ valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
+ | PR_MTE_TAG_MASK;
+
+ if (cpus_have_cap(ARM64_MTE_STORE_ONLY))
+ valid_mask |= PR_MTE_STORE_ONLY;
+ }
+
+ if (arg & ~valid_mask)
+ return -EINVAL;
+
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+ return -EINVAL;
+
+ if (set_mte_ctrl(task, arg) != 0)
+ return -EINVAL;
+
+ update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+ return 0;
+}
+
+long get_tagged_addr_ctrl(struct task_struct *task)
+{
+ long ret = 0;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
+ ret = PR_TAGGED_ADDR_ENABLE;
+
+ ret |= get_mte_ctrl(task);
+
+ return ret;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+
+static const struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
+static int __init tagged_addr_init(void)
+{
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+ return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+#ifdef CONFIG_BINFMT_ELF
+int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
+ bool has_interp, bool is_interp)
+{
+ /*
+ * For dynamically linked executables the interpreter is
+ * responsible for setting PROT_BTI on everything except
+ * itself.
+ */
+ if (is_interp != has_interp)
+ return prot;
+
+ if (!(state->flags & ARM64_ELF_BTI))
+ return prot;
+
+ if (prot & PROT_EXEC)
+ prot |= PROT_BTI;
+
+ return prot;
+}
+#endif
+
+int get_tsc_mode(unsigned long adr)
+{
+ unsigned int val;
+
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TSC_SIGSEGV))
+ val = PR_TSC_SIGSEGV;
+ else
+ val = PR_TSC_ENABLE;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_tsc_mode(unsigned int val)
+{
+ if (is_compat_task())
+ return -EINVAL;
- ptrauth_thread_init_user(current);
+ return do_set_tsc_mode(val);
}