summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h155
-rw-r--r--arch/arm64/include/asm/atomic_lse.h17
-rw-r--r--arch/arm64/include/asm/barrier.h10
-rw-r--r--arch/arm64/include/asm/compat.h4
-rw-r--r--arch/arm64/include/asm/compiler.h36
-rw-r--r--arch/arm64/include/asm/debug-monitors.h1
-rw-r--r--arch/arm64/include/asm/exception.h4
-rw-r--r--arch/arm64/include/asm/fixmap.h22
-rw-r--r--arch/arm64/include/asm/ftrace.h22
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h5
-rw-r--r--arch/arm64/include/asm/kexec.h6
-rw-r--r--arch/arm64/include/asm/kfence.h10
-rw-r--r--arch/arm64/include/asm/kvm_host.h51
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
-rw-r--r--arch/arm64/include/asm/memory.h13
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/mte-kasan.h81
-rw-r--r--arch/arm64/include/asm/mte.h12
-rw-r--r--arch/arm64/include/asm/perf_event.h249
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/pointer_auth.h13
-rw-r--r--arch/arm64/include/asm/smp.h9
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h12
-rw-r--r--arch/arm64/include/asm/uaccess.h72
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h4
27 files changed, 415 insertions, 409 deletions
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..d6b51deb7bf0
--- /dev/null
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <linux/kvm_host.h>
+
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_cpuid(PMMIR_EL1);
+}
+
+static inline u32 read_pmuver(void)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ return cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, pmcr_el0);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(pmcr_el0);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, pmselr_el0);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, pmccntr_el0);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+static inline void write_pmxevcntr(u32 val)
+{
+ write_sysreg(val, pmxevcntr_el0);
+}
+
+static inline u32 read_pmxevcntr(void)
+{
+ return read_sysreg(pmxevcntr_el0);
+}
+
+static inline void write_pmxevtyper(u32 val)
+{
+ write_sysreg(val, pmxevtyper_el0);
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, pmcntenset_el0);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, pmcntenclr_el0);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, pmintenset_el1);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, pmintenclr_el1);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, pmccfiltr_el0);
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, pmovsclr_el0);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(pmovsclr_el0);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, pmuserenr_el0);
+}
+
+static inline u32 read_pmceid0(void)
+{
+ return read_sysreg(pmceid0_el0);
+}
+
+static inline u32 read_pmceid1(void)
+{
+ return read_sysreg(pmceid1_el0);
+}
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
+ pmuver == ID_AA64DFR0_EL1_PMUVer_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index a94d6dacc029..319958b95cfd 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -251,22 +251,15 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
{ \
- register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
- register u##sz x1 asm ("x1") = old; \
- register u##sz x2 asm ("x2") = new; \
- unsigned long tmp; \
- \
asm volatile( \
__LSE_PREAMBLE \
- " mov %" #w "[tmp], %" #w "[old]\n" \
- " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
- " mov %" #w "[ret], %" #w "[tmp]" \
- : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \
- [tmp] "=&r" (tmp) \
- : [old] "r" (x1), [new] "r" (x2) \
+ " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \
+ : [v] "+Q" (*(u##sz *)ptr), \
+ [old] "+r" (old) \
+ : [new] "rZ" (new) \
: cl); \
\
- return x0; \
+ return old; \
}
__CMPXCHG_CASE(w, b, , 8, )
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 3dd8982a9ce3..cf2987464c18 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -131,25 +131,25 @@ do { \
case 1: \
asm volatile ("stlrb %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u8 *)__u.__c) \
+ : "rZ" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u16 *)__u.__c) \
+ : "rZ" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u32 *)__u.__c) \
+ : "rZ" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
- asm volatile ("stlr %1, %0" \
+ asm volatile ("stlr %x1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u64 *)__u.__c) \
+ : "rZ" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
} \
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 9f362274a4f7..74575c3d6987 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -83,10 +83,6 @@ struct compat_statfs {
int f_spare[4];
};
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
#define COMPAT_MINSIGSTKSZ 2048
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 6fb2e6bcc392..9bbd7b7097ff 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -8,19 +8,33 @@
#define ARM64_ASM_PREAMBLE
#endif
-/*
- * The EL0/EL1 pointer bits used by a pointer authentication code.
- * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
- */
-#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
-#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
+#define xpaclri(ptr) \
+({ \
+ register unsigned long __xpaclri_ptr asm("x30") = (ptr); \
+ \
+ asm( \
+ ARM64_ASM_PREAMBLE \
+ " hint #7\n" \
+ : "+r" (__xpaclri_ptr)); \
+ \
+ __xpaclri_ptr; \
+})
-/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
-#define ptrauth_clear_pac(ptr) \
- ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
- (ptr & ~ptrauth_user_pac_mask()))
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr)
+#else
+#define ptrauth_strip_kernel_insn_pac(ptr) (ptr)
+#endif
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr)
+#else
+#define ptrauth_strip_user_insn_pac(ptr) (ptr)
+#endif
+#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
#define __builtin_return_address(val) \
- (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+ (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val)))
+#endif
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7b7e05c02691..13d437bcbf58 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
+void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 92963f98afec..e73af709cb7a 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,7 +31,7 @@ static inline unsigned long disr_to_esr(u64 disr)
return esr;
}
-asmlinkage void handle_bad_stack(struct pt_regs *regs);
+asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs);
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
@@ -80,5 +80,5 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
-void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
+void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 71ed5fdf718b..58c294a96676 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/sizes.h>
#include <asm/boot.h>
#include <asm/page.h>
@@ -36,17 +37,13 @@ enum fixed_addresses {
FIX_HOLE,
/*
- * Reserve a virtual window for the FDT that is 2 MB larger than the
- * maximum supported size, and put it at the top of the fixmap region.
- * The additional space ensures that any FDT that does not exceed
- * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
- * 2 MB alignment boundaries.
- *
- * Keep this at the top so it remains 2 MB aligned.
+ * Reserve a virtual window for the FDT that is a page bigger than the
+ * maximum supported size. The additional space ensures that any FDT
+ * that does not exceed MAX_FDT_SIZE can be mapped regardless of
+ * whether it crosses any page boundary.
*/
-#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
FIX_FDT_END,
- FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+ FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
@@ -95,12 +92,15 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void);
+void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1c2672bbbf37..b87d70b693c6 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -70,10 +70,19 @@ struct ftrace_ops;
#define arch_ftrace_get_regs(regs) NULL
+/*
+ * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct
+ * stack alignment
+ */
struct ftrace_regs {
/* x0 - x8 */
unsigned long regs[9];
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_tramp;
+#else
unsigned long __unused;
+#endif
unsigned long fp;
unsigned long lr;
@@ -136,6 +145,19 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
+ unsigned long addr)
+{
+ /*
+ * The ftrace trampoline will return to this address instead of the
+ * instrumented function.
+ */
+ fregs->direct_tramp = addr;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#endif
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index fcd14197756f..186dd7f85b14 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -59,8 +59,11 @@
#define EARLY_KASLR (0)
#endif
+#define SPAN_NR_ENTRIES(vstart, vend, shift) \
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
+
#define EARLY_ENTRIES(vstart, vend, shift, add) \
- ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
+ (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 559bfae26715..9ac9572a3bbe 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load
-
-void arch_kexec_protect_crashkres(void);
-#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
-
-void arch_kexec_unprotect_crashkres(void);
-#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index aa855c6a0ae6..a81937fae9f6 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#ifdef CONFIG_KFENCE
+extern bool kfence_early_init;
+static inline bool arm64_kfence_can_set_direct_map(void)
+{
+ return !kfence_early_init;
+}
+#else /* CONFIG_KFENCE */
+static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
+#endif /* CONFIG_KFENCE */
+
#endif /* __ASM_KFENCE_H */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a1892a8f6032..7e7e19ef6993 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
+#include <linux/maple_tree.h>
#include <linux/percpu.h>
#include <linux/psci.h>
#include <asm/arch_gicv3.h>
@@ -193,9 +194,15 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
+ /* Timers */
+ struct arch_timer_vm_data timer_data;
+
/* Mandated version of PSCI */
u32 psci_version;
+ /* Protects VM-scoped configuration data */
+ struct mutex config_lock;
+
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
@@ -218,7 +225,12 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_EL1_32BIT 4
/* PSCI SYSTEM_SUSPEND enabled for the guest */
#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
-
+ /* VM counter offset */
+#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6
+ /* Timer PPIs made immutable */
+#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7
+ /* SMCCC filter initialized for the VM */
+#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 8
unsigned long flags;
/*
@@ -239,6 +251,7 @@ struct kvm_arch {
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
+ struct maple_tree smccc_filter;
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
@@ -362,6 +375,10 @@ enum vcpu_sysreg {
TPIDR_EL2, /* EL2 Software Thread ID Register */
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
+ CNTHP_CTL_EL2,
+ CNTHP_CVAL_EL2,
+ CNTHV_CTL_EL2,
+ CNTHV_CVAL_EL2,
NR_SYS_REGS /* Nothing after this line! */
};
@@ -519,6 +536,7 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
+ spinlock_t mp_state_lock;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -573,9 +591,22 @@ struct kvm_vcpu_arch {
({ \
__build_check_flag(v, flagset, f, m); \
\
- v->arch.flagset & (m); \
+ READ_ONCE(v->arch.flagset) & (m); \
})
+/*
+ * Note that the set/clear accessors must be preempt-safe in order to
+ * avoid nesting them with load/put which also manipulate flags...
+ */
+#ifdef __KVM_NVHE_HYPERVISOR__
+/* the nVHE hypervisor is always non-preemptible */
+#define __vcpu_flags_preempt_disable()
+#define __vcpu_flags_preempt_enable()
+#else
+#define __vcpu_flags_preempt_disable() preempt_disable()
+#define __vcpu_flags_preempt_enable() preempt_enable()
+#endif
+
#define __vcpu_set_flag(v, flagset, f, m) \
do { \
typeof(v->arch.flagset) *fset; \
@@ -583,9 +614,11 @@ struct kvm_vcpu_arch {
__build_check_flag(v, flagset, f, m); \
\
fset = &v->arch.flagset; \
+ __vcpu_flags_preempt_disable(); \
if (HWEIGHT(m) > 1) \
*fset &= ~(m); \
*fset |= (f); \
+ __vcpu_flags_preempt_enable(); \
} while (0)
#define __vcpu_clear_flag(v, flagset, f, m) \
@@ -595,7 +628,9 @@ struct kvm_vcpu_arch {
__build_check_flag(v, flagset, f, m); \
\
fset = &v->arch.flagset; \
+ __vcpu_flags_preempt_disable(); \
*fset &= ~(m); \
+ __vcpu_flags_preempt_enable(); \
} while (0)
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
@@ -919,6 +954,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
int __init kvm_sys_reg_table_init(void);
+bool lock_all_vcpus(struct kvm *kvm);
+void unlock_all_vcpus(struct kvm *kvm);
+
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
@@ -1002,8 +1040,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
-long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
- struct kvm_arm_copy_mte_tags *copy_tags);
+int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags);
+int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
+ struct kvm_arm_counter_offset *offset);
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
@@ -1058,6 +1098,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
(system_supports_32bit_el0() && \
!static_branch_unlikely(&arm64_mismatched_32bit_el0))
+#define kvm_vm_has_ran_once(kvm) \
+ (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
+
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 083cc47dca08..27e63c111f78 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -63,6 +63,7 @@
* specific registers encoded in the instructions).
*/
.macro kern_hyp_va reg
+#ifndef __KVM_VHE_HYPERVISOR__
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
and \reg, \reg, #1 /* mask with va_mask */
ror \reg, \reg, #1 /* rotate to the first tag bit */
@@ -70,6 +71,7 @@ alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
ror \reg, \reg, #63 /* rotate back */
alternative_cb_end
+#endif
.endm
/*
@@ -127,6 +129,7 @@ void kvm_apply_hyp_relocations(void);
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
+#ifndef __KVM_VHE_HYPERVISOR__
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
"add %0, %0, #0\n"
@@ -135,6 +138,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask)
: "+r" (v));
+#endif
return v;
}
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 78e5163836a0..c735afdf639b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -261,9 +261,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
-#define arch_enable_tagging_sync() mte_enable_kernel_sync()
-#define arch_enable_tagging_async() mte_enable_kernel_async()
-#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
+#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
+#define arch_enable_tag_checks_async() mte_enable_kernel_async()
+#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
+#define arch_suppress_tag_checks_start() mte_enable_tco()
+#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
@@ -374,11 +376,6 @@ static inline void *phys_to_virt(phys_addr_t x)
})
void dump_mem_limit(void);
-
-static inline bool defer_reserve_crashkernel(void)
-{
- return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
-}
#endif /* !ASSEMBLY */
/*
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 48f8466a4be9..4384eaa0aeb7 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -65,6 +65,8 @@ extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
+extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 72dbd6400549..56911691bef0 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -288,6 +288,12 @@ void post_ttbr_update_workaround(void);
unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);
+#define mm_untag_mask mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL >> 8;
+}
+
#include <asm-generic/mmu_context.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 9f79425fc65a..2e98028c1965 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -13,9 +13,74 @@
#include <linux/types.h>
+#ifdef CONFIG_KASAN_HW_TAGS
+
+/* Whether the MTE asynchronous mode is enabled. */
+DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
+
+static inline bool system_uses_mte_async_or_asymm_mode(void)
+{
+ return static_branch_unlikely(&mte_async_or_asymm_mode);
+}
+
+#else /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool system_uses_mte_async_or_asymm_mode(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
#ifdef CONFIG_ARM64_MTE
/*
+ * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
+ * affects EL0 and TCF affects EL1 irrespective of which TTBR is
+ * used.
+ * The kernel accesses TTBR0 usually with LDTR/STTR instructions
+ * when UAO is available, so these would act as EL0 accesses using
+ * TCF0.
+ * However futex.h code uses exclusives which would be executed as
+ * EL1, this can potentially cause a tag check fault even if the
+ * user disables TCF0.
+ *
+ * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
+ * and reset it in uaccess_disable().
+ *
+ * The Tag check override (TCO) bit disables temporarily the tag checking
+ * preventing the issue.
+ */
+static inline void mte_disable_tco(void)
+{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void mte_enable_tco(void)
+{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+/*
+ * These functions disable tag checking only if in MTE async mode
+ * since the sync mode generates exceptions synchronously and the
+ * nofault or load_unaligned_zeropad can handle them.
+ */
+static inline void __mte_disable_tco_async(void)
+{
+ if (system_uses_mte_async_or_asymm_mode())
+ mte_disable_tco();
+}
+
+static inline void __mte_enable_tco_async(void)
+{
+ if (system_uses_mte_async_or_asymm_mode())
+ mte_enable_tco();
+}
+
+/*
* These functions are meant to be only used from KASAN runtime through
* the arch_*() interface defined in asm/memory.h.
* These functions don't include system_supports_mte() checks,
@@ -138,6 +203,22 @@ void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */
+static inline void mte_disable_tco(void)
+{
+}
+
+static inline void mte_enable_tco(void)
+{
+}
+
+static inline void __mte_disable_tco_async(void)
+{
+}
+
+static inline void __mte_enable_tco_async(void)
+{
+}
+
static inline u8 mte_get_ptr_tag(void *ptr)
{
return 0xFF;
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 20dd06d70af5..c028afb1cd0b 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -178,14 +178,6 @@ static inline void mte_disable_tco_entry(struct task_struct *task)
}
#ifdef CONFIG_KASAN_HW_TAGS
-/* Whether the MTE asynchronous mode is enabled. */
-DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
-
-static inline bool system_uses_mte_async_or_asymm_mode(void)
-{
- return static_branch_unlikely(&mte_async_or_asymm_mode);
-}
-
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
@@ -212,10 +204,6 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1();
}
#else
-static inline bool system_uses_mte_async_or_asymm_mode(void)
-{
- return false;
-}
static inline void mte_check_tfsr_el1(void)
{
}
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 3eaf462f5752..eb7071c9eb34 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -9,255 +9,6 @@
#include <asm/stack_pointer.h>
#include <asm/ptrace.h>
-#define ARMV8_PMU_MAX_COUNTERS 32
-#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/*
- * Common architectural and microarchitectural event numbers.
- */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
-#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
-#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
-#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
-#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
-#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
-#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
-#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
-#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
-#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
-#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
-#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
-#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
-#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
-#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
-#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
-#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
-#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
-#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
-#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
-#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
-#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
-#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
-#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
-#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
-#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
-#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
-#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
-
-/* Statistical profiling extension microarchitectural events */
-#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
-#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
-#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
-#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
-
-/* AMUv1 architecture events */
-#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
-#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
-
-/* long-latency read miss events */
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
-
-/* Trace buffer events */
-#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
-#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
-
-/* Trace unit events */
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
-
-/* additional latency from alignment events */
-#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
-#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
-#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
-
-/* Armv8.5 Memory Tagging Extension events */
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
-
-/* ARMv8 recommended implementation defined event types */
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
-
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
-
-#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
-#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
-#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
-#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
-#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
-#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
-#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
-#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
-#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
-#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
-#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
-#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
-#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
-#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
-#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
-
-#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
-#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
-#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
-#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
-#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
-#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
-#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
-#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
-#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
-#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
-#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
-#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
-#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
-#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
-
-/*
- * PMUSERENR: user enable reg
- */
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
-#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
-#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
-#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
-#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
-
-/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
-
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b6ba466e2e8a..0bd18de9fd97 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void)
* fault on one CPU which has been handled concurrently by another CPU
* does not need to perform additional invalidation.
*/
-#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index efb098de3a84..d2e0306e65d3 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -10,6 +10,13 @@
#include <asm/memory.h>
#include <asm/sysreg.h>
+/*
+ * The EL0/EL1 pointer bits used by a pointer authentication code.
+ * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
+ */
+#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
+#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
+
#define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled);
extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
-static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
-{
- return ptrauth_clear_pac(ptr);
-}
-
static __always_inline void ptrauth_enable(void)
{
if (!system_supports_address_auth())
@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
-#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit()
#define ptrauth_thread_init_user()
#define ptrauth_thread_switch_user(tsk)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index fc55f5a57a06..f2d26235bfb4 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -100,10 +100,10 @@ static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
-extern void cpu_die_early(void);
+extern void __noreturn cpu_die(void);
+extern void __noreturn cpu_die_early(void);
-static inline void cpu_park_loop(void)
+static inline void __noreturn cpu_park_loop(void)
{
for (;;) {
wfe();
@@ -123,7 +123,7 @@ static inline void update_cpu_boot_status(int val)
* which calls for a kernel panic. Update the boot status and park the calling
* CPU.
*/
-static inline void cpu_panic_kernel(void)
+static inline void __noreturn cpu_panic_kernel(void)
{
update_cpu_boot_status(CPU_PANIC_KERNEL);
cpu_park_loop();
@@ -143,7 +143,6 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void);
-extern void panic_smp_self_stop(void);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 4b73463423c3..5f5437621029 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -10,7 +10,7 @@
/*
* Section size must be at least 512MB for 64K base
* page size config. Otherwise it will be less than
- * (MAX_ORDER - 1) and the build process will fail.
+ * MAX_ORDER and the build process will fail.
*/
#ifdef CONFIG_ARM64_64K_PAGES
#define SECTION_SIZE_BITS 29
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9e3ecba3c4e6..e72d9aaab6b1 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -388,6 +388,7 @@
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
@@ -400,7 +401,9 @@
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
@@ -419,9 +422,6 @@
#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3)
-#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
-#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
-#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7)
#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0)
@@ -758,12 +758,6 @@
#define ICH_VTR_TDS_SHIFT 19
#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
-/* HFG[WR]TR_EL2 bit definitions */
-#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55
-#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT)
-#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54
-#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT)
-
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5c7b2f9d5913..05f4fc265428 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -136,55 +136,9 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-/*
- * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
- * affects EL0 and TCF affects EL1 irrespective of which TTBR is
- * used.
- * The kernel accesses TTBR0 usually with LDTR/STTR instructions
- * when UAO is available, so these would act as EL0 accesses using
- * TCF0.
- * However futex.h code uses exclusives which would be executed as
- * EL1, this can potentially cause a tag check fault even if the
- * user disables TCF0.
- *
- * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
- * and reset it in uaccess_disable().
- *
- * The Tag check override (TCO) bit disables temporarily the tag checking
- * preventing the issue.
- */
-static inline void __uaccess_disable_tco(void)
-{
- asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
- ARM64_MTE, CONFIG_KASAN_HW_TAGS));
-}
-
-static inline void __uaccess_enable_tco(void)
-{
- asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
- ARM64_MTE, CONFIG_KASAN_HW_TAGS));
-}
-
-/*
- * These functions disable tag checking only if in MTE async mode
- * since the sync mode generates exceptions synchronously and the
- * nofault or load_unaligned_zeropad can handle them.
- */
-static inline void __uaccess_disable_tco_async(void)
-{
- if (system_uses_mte_async_or_asymm_mode())
- __uaccess_disable_tco();
-}
-
-static inline void __uaccess_enable_tco_async(void)
-{
- if (system_uses_mte_async_or_asymm_mode())
- __uaccess_enable_tco();
-}
-
static inline void uaccess_disable_privileged(void)
{
- __uaccess_disable_tco();
+ mte_disable_tco();
if (uaccess_ttbr0_disable())
return;
@@ -194,7 +148,7 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void)
{
- __uaccess_enable_tco();
+ mte_enable_tco();
if (uaccess_ttbr0_enable())
return;
@@ -237,7 +191,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
- : "+r" (err), "=&r" (x) \
+ : "+r" (err), "=r" (x) \
: "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err, type) \
@@ -302,8 +256,8 @@ do { \
#define get_user __get_user
/*
- * We must not call into the scheduler between __uaccess_enable_tco_async() and
- * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * We must not call into the scheduler between __mte_enable_tco_async() and
+ * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __get_kernel_nofault(dst, src, type, err_label) \
@@ -312,10 +266,10 @@ do { \
__typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \
\
- __uaccess_enable_tco_async(); \
+ __mte_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_err, K); \
- __uaccess_disable_tco_async(); \
+ __mte_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
@@ -327,7 +281,7 @@ do { \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
- : "r" (x), "r" (addr))
+ : "rZ" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err, type) \
do { \
@@ -388,8 +342,8 @@ do { \
#define put_user __put_user
/*
- * We must not call into the scheduler between __uaccess_enable_tco_async() and
- * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * We must not call into the scheduler between __mte_enable_tco_async() and
+ * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -398,10 +352,10 @@ do { \
__typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\
- __uaccess_enable_tco_async(); \
+ __mte_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
(__force type *)(__pkn_dst), __pkn_err, K); \
- __uaccess_disable_tco_async(); \
+ __mte_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
goto err_label; \
@@ -449,8 +403,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-struct page;
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 1c8e4f2490bf..f3b151ed0d7a 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -55,7 +55,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret;
- __uaccess_enable_tco_async();
+ __mte_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
@@ -65,7 +65,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret)
: "r" (addr), "Q" (*(unsigned long *)addr));
- __uaccess_disable_tco_async();
+ __mte_disable_tco_async();
return ret;
}