diff options
Diffstat (limited to 'arch/arm64/kernel/entry-common.c')
-rw-r--r-- | arch/arm64/kernel/entry-common.c | 204 |
1 files changed, 179 insertions, 25 deletions
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index b260ddc4d3e9..2b0c5925502e 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -8,6 +8,7 @@ #include <linux/context_tracking.h> #include <linux/kasan.h> #include <linux/linkage.h> +#include <linux/livepatch.h> #include <linux/lockdep.h> #include <linux/ptrace.h> #include <linux/resume_user_mode.h> @@ -132,7 +133,7 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) do { local_irq_enable(); - if (thread_flags & _TIF_NEED_RESCHED) + if (thread_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) schedule(); if (thread_flags & _TIF_UPROBE) @@ -144,6 +145,9 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) (void __user *)NULL, current); } + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); @@ -344,7 +348,7 @@ static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); static void cortex_a76_erratum_1463225_svc_handler(void) { - u32 reg, val; + u64 reg, val; if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) return; @@ -354,7 +358,7 @@ static void cortex_a76_erratum_1463225_svc_handler(void) __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); reg = read_sysreg(mdscr_el1); - val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE; write_sysreg(val, mdscr_el1); asm volatile("msr daifclr, #8"); isb(); @@ -393,20 +397,16 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) * As per the ABI exit SME streaming mode and clear the SVE state not * shared with FPSIMD on syscall entry. */ -static inline void fp_user_discard(void) +static inline void fpsimd_syscall_enter(void) { - /* - * If SME is active then exit streaming mode. If ZA is active - * then flush the SVE registers but leave userspace access to - * both SVE and SME enabled, otherwise disable SME for the - * task and fall through to disabling SVE too. This means - * that after a syscall we never have any streaming mode - * register state to track, if this changes the KVM code will - * need updating. - */ + /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */ if (system_supports_sme()) sme_smstop_sm(); + /* + * The CPU is not in streaming mode. If non-streaming SVE is not + * supported, there is no SVE state that needs to be discarded. + */ if (!system_supports_sve()) return; @@ -416,7 +416,56 @@ static inline void fp_user_discard(void) sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; sve_flush_live(true, sve_vq_minus_one); } + + /* + * Any live non-FPSIMD SVE state has been zeroed. Allow + * fpsimd_save_user_state() to lazily discard SVE state until either + * the live state is unbound or fpsimd_syscall_exit() is called. + */ + __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD); +} + +static __always_inline void fpsimd_syscall_exit(void) +{ + if (!system_supports_sve()) + return; + + /* + * The current task's user FPSIMD/SVE/SME state is now bound to this + * CPU. The fpsimd_last_state.to_save value is either: + * + * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU + * since fpsimd_syscall_enter(). + * + * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at + * any point. + * + * Reset this to FP_STATE_CURRENT to stop lazy discarding. + */ + __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); +} + +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +static void debug_exception_enter(struct pt_regs *regs) +{ + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); } +NOKPROBE_SYMBOL(debug_exception_enter); + +static void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); +} +NOKPROBE_SYMBOL(debug_exception_exit); UNHANDLED(el1t, 64, sync) UNHANDLED(el1t, 64, irq) @@ -481,13 +530,51 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } -static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) +static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr) { + arm64_enter_el1_dbg(regs); + debug_exception_enter(regs); + do_breakpoint(esr, regs); + debug_exception_exit(regs); + arm64_exit_el1_dbg(regs); +} + +static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr) +{ + arm64_enter_el1_dbg(regs); + if (!cortex_a76_erratum_1463225_debug_handler(regs)) { + debug_exception_enter(regs); + /* + * After handling a breakpoint, we suspend the breakpoint + * and use single-step to move to the next instruction. + * If we are stepping a suspended breakpoint there's nothing more to do: + * the single-step is complete. + */ + if (!try_step_suspended_breakpoints(regs)) + do_el1_softstep(esr, regs); + debug_exception_exit(regs); + } + arm64_exit_el1_dbg(regs); +} + +static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr) +{ + /* Watchpoints are the only debug exception to write FAR_EL1 */ unsigned long far = read_sysreg(far_el1); arm64_enter_el1_dbg(regs); - if (!cortex_a76_erratum_1463225_debug_handler(regs)) - do_debug_exception(far, esr, regs); + debug_exception_enter(regs); + do_watchpoint(far, esr, regs); + debug_exception_exit(regs); + arm64_exit_el1_dbg(regs); +} + +static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr) +{ + arm64_enter_el1_dbg(regs); + debug_exception_enter(regs); + do_el1_brk64(esr, regs); + debug_exception_exit(regs); arm64_exit_el1_dbg(regs); } @@ -530,10 +617,16 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) el1_mops(regs, esr); break; case ESR_ELx_EC_BREAKPT_CUR: + el1_breakpt(regs, esr); + break; case ESR_ELx_EC_SOFTSTP_CUR: + el1_softstp(regs, esr); + break; case ESR_ELx_EC_WATCHPT_CUR: + el1_watchpt(regs, esr); + break; case ESR_ELx_EC_BRK64: - el1_dbg(regs, esr); + el1_brk64(regs, esr); break; case ESR_ELx_EC_FPAC: el1_fpac(regs, esr); @@ -724,14 +817,56 @@ static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } -static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) +static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr) { - /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ + if (!is_ttbr0_addr(regs->pc)) + arm64_apply_bp_hardening(); + + enter_from_user_mode(regs); + debug_exception_enter(regs); + do_breakpoint(esr, regs); + debug_exception_exit(regs); + local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); +} + +static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) +{ + if (!is_ttbr0_addr(regs->pc)) + arm64_apply_bp_hardening(); + + enter_from_user_mode(regs); + /* + * After handling a breakpoint, we suspend the breakpoint + * and use single-step to move to the next instruction. + * If we are stepping a suspended breakpoint there's nothing more to do: + * the single-step is complete. + */ + if (!try_step_suspended_breakpoints(regs)) { + local_daif_restore(DAIF_PROCCTX); + do_el0_softstep(esr, regs); + } + exit_to_user_mode(regs); +} + +static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr) +{ + /* Watchpoints are the only debug exception to write FAR_EL1 */ unsigned long far = read_sysreg(far_el1); enter_from_user_mode(regs); - do_debug_exception(far, esr, regs); + debug_exception_enter(regs); + do_watchpoint(far, esr, regs); + debug_exception_exit(regs); + local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); +} + +static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); + do_el0_brk64(esr, regs); exit_to_user_mode(regs); } @@ -739,10 +874,11 @@ static void noinstr el0_svc(struct pt_regs *regs) { enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); - fp_user_discard(); + fpsimd_syscall_enter(); local_daif_restore(DAIF_PROCCTX); do_el0_svc(regs); exit_to_user_mode(regs); + fpsimd_syscall_exit(); } static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) @@ -802,10 +938,16 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) el0_gcs(regs, esr); break; case ESR_ELx_EC_BREAKPT_LOW: + el0_breakpt(regs, esr); + break; case ESR_ELx_EC_SOFTSTP_LOW: + el0_softstp(regs, esr); + break; case ESR_ELx_EC_WATCHPT_LOW: + el0_watchpt(regs, esr); + break; case ESR_ELx_EC_BRK64: - el0_dbg(regs, esr); + el0_brk64(regs, esr); break; case ESR_ELx_EC_FPAC: el0_fpac(regs, esr); @@ -888,6 +1030,14 @@ static void noinstr el0_svc_compat(struct pt_regs *regs) exit_to_user_mode(regs); } +static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_bkpt32(esr, regs); + exit_to_user_mode(regs); +} + asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -922,10 +1072,16 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) el0_cp15(regs, esr); break; case ESR_ELx_EC_BREAKPT_LOW: + el0_breakpt(regs, esr); + break; case ESR_ELx_EC_SOFTSTP_LOW: + el0_softstp(regs, esr); + break; case ESR_ELx_EC_WATCHPT_LOW: + el0_watchpt(regs, esr); + break; case ESR_ELx_EC_BKPT32: - el0_dbg(regs, esr); + el0_bkpt32(regs, esr); break; default: el0_inv(regs, esr); @@ -953,7 +1109,6 @@ UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_COMPAT */ -#ifdef CONFIG_VMAP_STACK asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -962,7 +1117,6 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) arm64_enter_nmi(regs); panic_bad_stack(regs, esr, far); } -#endif /* CONFIG_VMAP_STACK */ #ifdef CONFIG_ARM_SDE_INTERFACE asmlinkage noinstr unsigned long |