diff options
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/cache.h | 17 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 28 | ||||
-rw-r--r-- | arch/arm64/include/asm/debug-monitors.h | 40 | ||||
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 90 | ||||
-rw-r--r-- | arch/arm64/include/asm/exception.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/gcs.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/hwcap.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kgdb.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/kprobes.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 62 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/stacktrace.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 16 | ||||
-rw-r--r-- | arch/arm64/include/asm/system_misc.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/traps.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/uprobes.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/vdso/vsyscall.h | 7 |
22 files changed, 206 insertions, 152 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index a407f9cd549e..c07a58b96329 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -150,7 +150,7 @@ acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor) {} #endif -static inline const char *acpi_get_enable_method(int cpu) +static __always_inline const char *acpi_get_enable_method(int cpu) { if (acpi_psci_present()) return "psci"; diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ad63457a05c5..23be85d93348 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -41,6 +41,11 @@ /* * Save/restore interrupts. */ + .macro save_and_disable_daif, flags + mrs \flags, daif + msr daifset, #0xf + .endm + .macro save_and_disable_irq, flags mrs \flags, daif msr daifset, #3 @@ -53,7 +58,7 @@ .macro disable_step_tsk, flgs, tmp tbz \flgs, #TIF_SINGLESTEP, 9990f mrs \tmp, mdscr_el1 - bic \tmp, \tmp, #DBG_MDSCR_SS + bic \tmp, \tmp, #MDSCR_EL1_SS msr mdscr_el1, \tmp isb // Take effect before a subsequent clear of DAIF.D 9990: @@ -63,7 +68,7 @@ .macro enable_step_tsk, flgs, tmp tbz \flgs, #TIF_SINGLESTEP, 9990f mrs \tmp, mdscr_el1 - orr \tmp, \tmp, #DBG_MDSCR_SS + orr \tmp, \tmp, #MDSCR_EL1_SS msr mdscr_el1, \tmp 9990: .endm diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 99cd6546e72e..09963004ceea 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -87,6 +87,23 @@ int cache_line_size(void); #define dma_get_cache_alignment cache_line_size +/* Compress a u64 MPIDR value into 32 bits. */ +static inline u64 arch_compact_of_hwid(u64 id) +{ + u64 aff3 = MPIDR_AFFINITY_LEVEL(id, 3); + + /* + * These bits are expected to be RES0. If not, return a value with + * the upper 32 bits set to force the caller to give up on 32 bit + * cache ids. + */ + if (FIELD_GET(GENMASK_ULL(63, 40), id)) + return id; + + return (aff3 << 24) | FIELD_GET(GENMASK_ULL(23, 0), id); +} +#define arch_compact_of_hwid arch_compact_of_hwid + /* * Read the effective value of CTR_EL0. * diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c4326f1cb917..bf13d676aae2 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -275,6 +275,14 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) /* Panic when a conflict is detected */ #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6)) +/* + * When paired with SCOPE_LOCAL_CPU, all early CPUs must satisfy the + * condition. This is different from SCOPE_SYSTEM where the check is performed + * only once at the end of the SMP boot on the sanitised ID registers. + * SCOPE_SYSTEM is not suitable for cases where the capability depends on + * properties local to a CPU like MIDR_EL1. + */ +#define ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS ((u16)BIT(7)) /* * CPU errata workarounds that need to be enabled at boot time if one or @@ -304,6 +312,16 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) +/* + * CPU feature detected at boot time and present on all early CPUs. Late CPUs + * are permitted to have the feature even if it hasn't been enabled, although + * the feature will not be used by Linux in this case. If all early CPUs have + * the feature, then every late CPU must have it. + */ +#define ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU | \ + ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS) /* * CPU feature detected at boot time, on one or more CPUs. A late CPU @@ -391,6 +409,11 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) return cap->type & ARM64_CPUCAP_SCOPE_MASK; } +static inline bool cpucap_match_all_early_cpus(const struct arm64_cpu_capabilities *cap) +{ + return cap->type & ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS; +} + /* * Generic helper for handling capabilities with multiple (match,enable) pairs * of call backs, sharing the same capability bit. @@ -848,6 +871,11 @@ static inline bool system_supports_pmuv3(void) return cpus_have_final_cap(ARM64_HAS_PMUV3); } +static inline bool system_supports_bbml2_noabort(void) +{ + return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT); +} + int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 8f6ba31b8658..f5e3ed2420ce 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -13,14 +13,8 @@ #include <asm/ptrace.h> /* Low-level stepping controls. */ -#define DBG_MDSCR_SS (1 << 0) #define DBG_SPSR_SS (1 << 21) -/* MDSCR_EL1 enabling bits */ -#define DBG_MDSCR_KDE (1 << 13) -#define DBG_MDSCR_MDE (1 << 15) -#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE) - #define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) /* AArch64 */ @@ -62,30 +56,6 @@ struct task_struct; #define DBG_HOOK_HANDLED 0 #define DBG_HOOK_ERROR 1 -struct step_hook { - struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned long esr); -}; - -void register_user_step_hook(struct step_hook *hook); -void unregister_user_step_hook(struct step_hook *hook); - -void register_kernel_step_hook(struct step_hook *hook); -void unregister_kernel_step_hook(struct step_hook *hook); - -struct break_hook { - struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned long esr); - u16 imm; - u16 mask; /* These bits are ignored when comparing with imm */ -}; - -void register_user_break_hook(struct break_hook *hook); -void unregister_user_break_hook(struct break_hook *hook); - -void register_kernel_break_hook(struct break_hook *hook); -void unregister_kernel_break_hook(struct break_hook *hook); - u8 debug_monitors_arch(void); enum dbg_active_el { @@ -108,17 +78,15 @@ void kernel_rewind_single_step(struct pt_regs *regs); void kernel_fastforward_single_step(struct pt_regs *regs); #ifdef CONFIG_HAVE_HW_BREAKPOINT -int reinstall_suspended_bps(struct pt_regs *regs); +bool try_step_suspended_breakpoints(struct pt_regs *regs); #else -static inline int reinstall_suspended_bps(struct pt_regs *regs) +static inline bool try_step_suspended_breakpoints(struct pt_regs *regs) { - return -ENODEV; + return false; } #endif -int aarch32_break_handler(struct pt_regs *regs); - -void debug_traps_init(void); +bool try_handle_aarch32_break(struct pt_regs *regs); #endif /* __ASSEMBLY */ #endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index ba5df0df02a4..b959115e9962 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -189,6 +189,28 @@ .Lskip_set_cptr_\@: .endm +/* + * Configure BRBE to permit recording cycle counts and branch mispredicts. + * + * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and + * BRBCR_EL1.CC=1. + * + * At any EL, to record branch mispredicts BRBE requires that both + * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. + * + * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are + * executing in EL1. + */ +.macro __init_el2_brbe + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_\@ + + mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED + msr_s SYS_BRBCR_EL2, x0 +.Lskip_brbe_\@: +.endm + /* Disable any fine grained traps */ .macro __init_el2_fgt mrs x1, id_aa64mmfr0_el1 @@ -196,20 +218,62 @@ cbz x1, .Lskip_fgt_\@ mov x0, xzr + mov x2, xzr mrs x1, id_aa64dfr0_el1 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cmp x1, #3 b.lt .Lskip_spe_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ - orr x0, x0, #(1 << 62) + orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK + orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK .Lskip_spe_fgt_\@: + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_fgt_\@ + + /* + * Disable read traps for the following registers + * + * [BRBSRC|BRBTGT|RBINF]_EL1 + * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 + */ + orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK + + /* + * Disable write traps for the following registers + * + * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 + */ + orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK + + /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ + orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK + orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK + + /* Disable read traps for BRBIDR_EL1 */ + orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK + +.Lskip_brbe_fgt_\@: .Lset_debug_fgt_\@: msr_s SYS_HDFGRTR_EL2, x0 - msr_s SYS_HDFGWTR_EL2, x0 + msr_s SYS_HDFGWTR_EL2, x2 mov x0, xzr + mov x2, xzr + + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_insn_fgt_\@ + + /* Disable traps for BRBIALL instruction */ + orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK + + /* Disable traps for BRBINJ instruction */ + orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK + +.Lskip_brbe_insn_fgt_\@: mrs x1, id_aa64pfr1_el1 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 cbz x1, .Lskip_sme_fgt_\@ @@ -250,7 +314,7 @@ .Lset_fgt_\@: msr_s SYS_HFGRTR_EL2, x0 msr_s SYS_HFGWTR_EL2, x0 - msr_s SYS_HFGITR_EL2, xzr + msr_s SYS_HFGITR_EL2, x2 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 @@ -287,17 +351,6 @@ .Lskip_fgt2_\@: .endm -.macro __init_el2_gcs - mrs_s x1, SYS_ID_AA64PFR1_EL1 - ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 - cbz x1, .Lskip_gcs_\@ - - /* Ensure GCS is not enabled when we start trying to do BLs */ - msr_s SYS_GCSCR_EL1, xzr - msr_s SYS_GCSCRE0_EL1, xzr -.Lskip_gcs_\@: -.endm - /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -311,6 +364,7 @@ __init_el2_hcrx __init_el2_timers __init_el2_debug + __init_el2_brbe __init_el2_lor __init_el2_stage2 __init_el2_gicv3 @@ -319,7 +373,6 @@ __init_el2_cptr __init_el2_fgt __init_el2_fgt2 - __init_el2_gcs .endm #ifndef __KVM_NVHE_HYPERVISOR__ @@ -371,6 +424,13 @@ msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 .Lskip_mpam_\@: + check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 + +.Linit_gcs_\@: + msr_s SYS_GCSCR_EL1, xzr + msr_s SYS_GCSCRE0_EL1, xzr + +.Lskip_gcs_\@: check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 .Linit_sve_\@: /* SVE register access */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index d48fc16584cd..e3874c4fc399 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -59,8 +59,20 @@ void do_el0_bti(struct pt_regs *regs); void do_el1_bti(struct pt_regs *regs, unsigned long esr); void do_el0_gcs(struct pt_regs *regs, unsigned long esr); void do_el1_gcs(struct pt_regs *regs, unsigned long esr); -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, +#ifdef CONFIG_HAVE_HW_BREAKPOINT +void do_breakpoint(unsigned long esr, struct pt_regs *regs); +void do_watchpoint(unsigned long addr, unsigned long esr, struct pt_regs *regs); +#else +static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {} +static inline void do_watchpoint(unsigned long addr, unsigned long esr, + struct pt_regs *regs) {} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +void do_el0_softstep(unsigned long esr, struct pt_regs *regs); +void do_el1_softstep(unsigned long esr, struct pt_regs *regs); +void do_el0_brk64(unsigned long esr, struct pt_regs *regs); +void do_el1_brk64(unsigned long esr, struct pt_regs *regs); +void do_bkpt32(unsigned long esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); void do_sve_acc(unsigned long esr, struct pt_regs *regs); void do_sme_acc(unsigned long esr, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h index f50660603ecf..5bc432234d3a 100644 --- a/arch/arm64/include/asm/gcs.h +++ b/arch/arm64/include/asm/gcs.h @@ -58,7 +58,7 @@ static inline u64 gcsss2(void) static inline bool task_gcs_el0_enabled(struct task_struct *task) { - return current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE; + return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE; } void gcs_set_el0_mode(struct task_struct *task); diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 1c3f9617d54f..13f94c8ddfc0 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -176,6 +176,8 @@ #define KERNEL_HWCAP_POE __khwcap2_feature(POE) #define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128) +#define KERNEL_HWCAP_MTE_FAR __khwcap3_feature(MTE_FAR) +#define KERNEL_HWCAP_MTE_STORE_ONLY __khwcap3_feature(MTE_STORE_ONLY) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 21fc85e9d2be..3184f5d1e3ae 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h @@ -24,6 +24,18 @@ static inline void arch_kgdb_breakpoint(void) extern void kgdb_handle_bus_error(void); extern int kgdb_fault_expected; +int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr); +int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr); +#ifdef CONFIG_KGDB +int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr); +#else +static inline int kgdb_single_step_handler(struct pt_regs *regs, + unsigned long esr) +{ + return DBG_HOOK_ERROR; +} +#endif + #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index be7a3680dadf..f2782560647b 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -41,4 +41,12 @@ void __kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ + +int __kprobes kprobe_brk_handler(struct pt_regs *regs, + unsigned long esr); +int __kprobes kprobe_ss_brk_handler(struct pt_regs *regs, + unsigned long esr); +int __kprobes kretprobe_brk_handler(struct pt_regs *regs, + unsigned long esr); + #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index bd020fc28aa9..0720898f563e 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -561,68 +561,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) vcpu_set_flag((v), e); \ } while (0) -#define __build_check_all_or_none(r, bits) \ - BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits)) - -#define __cpacr_to_cptr_clr(clr, set) \ - ({ \ - u64 cptr = 0; \ - \ - if ((set) & CPACR_EL1_FPEN) \ - cptr |= CPTR_EL2_TFP; \ - if ((set) & CPACR_EL1_ZEN) \ - cptr |= CPTR_EL2_TZ; \ - if ((set) & CPACR_EL1_SMEN) \ - cptr |= CPTR_EL2_TSM; \ - if ((clr) & CPACR_EL1_TTA) \ - cptr |= CPTR_EL2_TTA; \ - if ((clr) & CPTR_EL2_TAM) \ - cptr |= CPTR_EL2_TAM; \ - if ((clr) & CPTR_EL2_TCPAC) \ - cptr |= CPTR_EL2_TCPAC; \ - \ - cptr; \ - }) - -#define __cpacr_to_cptr_set(clr, set) \ - ({ \ - u64 cptr = 0; \ - \ - if ((clr) & CPACR_EL1_FPEN) \ - cptr |= CPTR_EL2_TFP; \ - if ((clr) & CPACR_EL1_ZEN) \ - cptr |= CPTR_EL2_TZ; \ - if ((clr) & CPACR_EL1_SMEN) \ - cptr |= CPTR_EL2_TSM; \ - if ((set) & CPACR_EL1_TTA) \ - cptr |= CPTR_EL2_TTA; \ - if ((set) & CPTR_EL2_TAM) \ - cptr |= CPTR_EL2_TAM; \ - if ((set) & CPTR_EL2_TCPAC) \ - cptr |= CPTR_EL2_TCPAC; \ - \ - cptr; \ - }) - -#define cpacr_clear_set(clr, set) \ - do { \ - BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \ - BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \ - __build_check_all_or_none((clr), CPACR_EL1_FPEN); \ - __build_check_all_or_none((set), CPACR_EL1_FPEN); \ - __build_check_all_or_none((clr), CPACR_EL1_ZEN); \ - __build_check_all_or_none((set), CPACR_EL1_ZEN); \ - __build_check_all_or_none((clr), CPACR_EL1_SMEN); \ - __build_check_all_or_none((set), CPACR_EL1_SMEN); \ - \ - if (has_vhe() || has_hvhe()) \ - sysreg_clear_set(cpacr_el1, clr, set); \ - else \ - sysreg_clear_set(cptr_el2, \ - __cpacr_to_cptr_clr(clr, set), \ - __cpacr_to_cptr_set(clr, set));\ - } while (0) - /* * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE * format if E2H isn't set. diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5ccca509dff1..673925f17cdd 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -704,6 +704,7 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 +#define KVM_HOST_DATA_FLAG_HAS_BRBE 8 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -737,6 +738,7 @@ struct kvm_host_data { u64 trfcr_el1; /* Values of trap registers for the host before guest entry. */ u64 mdcr_el2; + u64 brbcr_el1; } host_debug_state; /* Guest trace filter value */ @@ -1289,9 +1291,8 @@ void kvm_arm_resume_guest(struct kvm *kvm); }) /* - * The couple of isb() below are there to guarantee the same behaviour - * on VHE as on !VHE, where the eret to EL1 acts as a context - * synchronization event. + * The isb() below is there to guarantee the same behaviour on VHE as on !VHE, + * where the eret to EL1 acts as a context synchronization event. */ #define kvm_call_hyp(f, ...) \ do { \ @@ -1309,7 +1310,6 @@ void kvm_arm_resume_guest(struct kvm *kvm); \ if (has_vhe()) { \ ret = f(__VA_ARGS__); \ - isb(); \ } else { \ ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ } \ @@ -1482,7 +1482,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range); /* Guest/host FPSIMD coordination helpers */ -int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 717829df294e..5213248e081b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -118,7 +118,7 @@ * VMAP'd stacks are allocated at page granularity, so we must ensure that such * stacks are a multiple of page size. */ -#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) +#if (MIN_THREAD_SHIFT < PAGE_SHIFT) #define THREAD_SHIFT PAGE_SHIFT #else #define THREAD_SHIFT MIN_THREAD_SHIFT @@ -135,11 +135,7 @@ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry * assembly. */ -#ifdef CONFIG_VMAP_STACK #define THREAD_ALIGN (2 * THREAD_SIZE) -#else -#define THREAD_ALIGN THREAD_SIZE -#endif #define IRQ_STACK_SIZE THREAD_SIZE diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 1bf1a3b16e88..61d62bfd5a7b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -23,6 +23,8 @@ #define MTE_CTRL_TCF_ASYNC (1UL << 17) #define MTE_CTRL_TCF_ASYMM (1UL << 18) +#define MTE_CTRL_STORE_ONLY (1UL << 19) + #ifndef __ASSEMBLY__ #include <linux/build_bug.h> diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 66ec8caa6ac0..6d3280932bf5 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -59,7 +59,6 @@ static inline bool on_task_stack(const struct task_struct *tsk, #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1)) -#ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); static inline struct stack_info stackinfo_get_overflow(void) @@ -72,11 +71,8 @@ static inline struct stack_info stackinfo_get_overflow(void) .high = high, }; } -#else -#define stackinfo_get_overflow() stackinfo_get_unknown() -#endif -#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) +#if defined(CONFIG_ARM_SDE_INTERFACE) DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f1bb0d10c39a..ef76a4b9e240 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -202,16 +202,8 @@ #define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) #define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0)) -#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0) #define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1)) -#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1) #define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2)) -#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2) -#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2) - -#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0) -#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1) -#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0) #define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3) #define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3))) @@ -277,8 +269,6 @@ /* ETM */ #define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4) -#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0) - #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) @@ -821,6 +811,12 @@ #define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6) #define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7) +/* + * BRBE Instructions + */ +#define BRB_IALL_INSN __emit_inst(0xd5000000 | OP_BRB_IALL | (0x1f)) +#define BRB_INJ_INSN __emit_inst(0xd5000000 | OP_BRB_INJ | (0x1f)) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index c34344256762..344b1c1a4bbb 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -25,10 +25,6 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, int signo, int sicode, unsigned long far, unsigned long err); -void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long, - struct pt_regs *), - int sig, int code, const char *name); - struct mm_struct; extern void __show_regs(struct pt_regs *); diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 1269c2487574..f241b8601ebd 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -70,6 +70,7 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ #define TIF_SECCOMP 11 /* syscall secure computing */ #define TIF_SYSCALL_EMU 12 /* syscall emulation active */ +#define TIF_PATCH_PENDING 13 /* pending live patching update */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 @@ -96,6 +97,7 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) @@ -107,7 +109,8 @@ void arch_setup_new_exec(void); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING) + _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING | \ + _TIF_PATCH_PENDING) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 82cf1f879c61..e3e8944a71c3 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -29,6 +29,12 @@ void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey); void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str); +int bug_brk_handler(struct pt_regs *regs, unsigned long esr); +int cfi_brk_handler(struct pt_regs *regs, unsigned long esr); +int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr); +int kasan_brk_handler(struct pt_regs *regs, unsigned long esr); +int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr); + int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs); /* diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h index 014b02897f8e..89bfb0213a50 100644 --- a/arch/arm64/include/asm/uprobes.h +++ b/arch/arm64/include/asm/uprobes.h @@ -28,4 +28,15 @@ struct arch_uprobe { bool simulate; }; +int uprobe_brk_handler(struct pt_regs *regs, unsigned long esr); +#ifdef CONFIG_UPROBES +int uprobe_single_step_handler(struct pt_regs *regs, unsigned long esr); +#else +static inline int uprobe_single_step_handler(struct pt_regs *regs, + unsigned long esr) +{ + return DBG_HOOK_ERROR; +} +#endif + #endif diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index de58951b8df6..417aae5763a8 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -13,12 +13,11 @@ * Update the vDSO data page to keep in sync with kernel timekeeping. */ static __always_inline -void __arm64_update_vsyscall(struct vdso_time_data *vdata) +void __arch_update_vdso_clock(struct vdso_clock *vc) { - vdata->clock_data[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK; - vdata->clock_data[CS_RAW].mask = VDSO_PRECISION_MASK; + vc->mask = VDSO_PRECISION_MASK; } -#define __arch_update_vsyscall __arm64_update_vsyscall +#define __arch_update_vdso_clock __arch_update_vdso_clock /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> |