summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig48
-rw-r--r--arch/arm64/Makefile17
-rw-r--r--arch/arm64/include/asm/alternative-macros.h4
-rw-r--r--arch/arm64/include/asm/assembler.h8
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h17
-rw-r--r--arch/arm64/include/asm/ftrace.h72
-rw-r--r--arch/arm64/include/asm/hugetlb.h9
-rw-r--r--arch/arm64/include/asm/hwcap.h3
-rw-r--r--arch/arm64/include/asm/insn.h156
-rw-r--r--arch/arm64/include/asm/jump_label.h8
-rw-r--r--arch/arm64/include/asm/kvm_host.h12
-rw-r--r--arch/arm64/include/asm/module.lds.h8
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h13
-rw-r--r--arch/arm64/include/asm/processor.h20
-rw-r--r--arch/arm64/include/asm/scs.h49
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h150
-rw-r--r--arch/arm64/include/asm/uprobes.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/asm-offsets.c13
-rw-r--r--arch/arm64/kernel/cpu_errata.c7
-rw-r--r--arch/arm64/kernel/cpufeature.c230
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S156
-rw-r--r--arch/arm64/kernel/entry.S3
-rw-r--r--arch/arm64/kernel/fpsimd.c169
-rw-r--r--arch/arm64/kernel/ftrace.c87
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/irq.c11
-rw-r--r--arch/arm64/kernel/module.c11
-rw-r--r--arch/arm64/kernel/patch-scs.c257
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/kernel/pi/Makefile1
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c86
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kernel/sdei.c2
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/stacktrace.c10
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/syscall.c19
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S13
-rw-r--r--arch/arm64/kvm/fpsimd.c26
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile1
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/arm64/lib/insn.c165
-rw-r--r--arch/arm64/lib/mte.S2
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/hugetlbpage.c21
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/mmu.c21
-rw-r--r--arch/arm64/tools/cpucaps2
-rwxr-xr-xarch/arm64/tools/gen-sysreg.awk2
-rw-r--r--arch/arm64/tools/sysreg766
59 files changed, 1947 insertions, 808 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6a4704d4e36b..c6d15dd6cae3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config ARM64
def_bool y
+ select ACPI_APMT if ACPI
select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_GTDT if ACPI
@@ -117,6 +118,7 @@ config ARM64
select CPU_PM if (SUSPEND || CPU_IDLE)
select CRC32
select DCACHE_WORD_ACCESS
+ select DYNAMIC_FTRACE if FUNCTION_TRACER
select DMA_DIRECT_REMAP
select EDAC_SUPPORT
select FRAME_POINTER
@@ -181,8 +183,10 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
+ if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
- if DYNAMIC_FTRACE_WITH_REGS
+ if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
@@ -233,16 +237,16 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
-config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
- select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS
-config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=2)
- select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS
config 64BIT
def_bool y
@@ -370,6 +374,9 @@ config KASAN_SHADOW_OFFSET
default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
default 0xffffffffffffffff
+config UNWIND_TABLES
+ bool
+
source "arch/arm64/Kconfig.platforms"
menu "Kernel Features"
@@ -964,6 +971,22 @@ config ARM64_ERRATUM_2457168
If unsure, say Y.
+config ARM64_ERRATUM_2645198
+ bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A715 erratum 2645198.
+
+ If a Cortex-A715 cpu sees a page mapping permissions change from executable
+ to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
+ next instruction abort caused by permission fault.
+
+ Only user-space does executable to non-executable permission transition via
+ mprotect() system call. Workaround the problem by doing a break-before-make
+ TLB invalidation, for all changes to executable user space mappings.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -1815,7 +1838,7 @@ config ARM64_PTR_AUTH_KERNEL
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help
If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
@@ -1825,7 +1848,7 @@ config ARM64_PTR_AUTH_KERNEL
disabled with minimal loss of protection.
This feature works with FUNCTION_GRAPH_TRACER option only if
- DYNAMIC_FTRACE_WITH_REGS is enabled.
+ DYNAMIC_FTRACE_WITH_ARGS is enabled.
config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later
@@ -1923,7 +1946,7 @@ config ARM64_BTI_KERNEL
depends on !CC_IS_GCC
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help
Build the kernel with Branch Target Identification annotations
and enable enforcement of this for kernel code. When this option
@@ -2156,6 +2179,15 @@ config ARCH_NR_GPIO
If unsure, leave the default value.
+config UNWIND_PATCH_PAC_INTO_SCS
+ bool "Enable shadow call stack dynamically using code patching"
+ # needs Clang with https://reviews.llvm.org/D111780 incorporated
+ depends on CC_IS_CLANG && CLANG_VERSION >= 150000
+ depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET
+ depends on SHADOW_CALL_STACK
+ select UNWIND_TABLES
+ select DYNAMIC_SCS
+
endmenu # "Kernel Features"
menu "Boot options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 5e56d26a2239..d62bd221828f 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -45,8 +45,13 @@ KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
# Avoid generating .eh_frame* sections.
+ifneq ($(CONFIG_UNWIND_TABLES),y)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+else
+KBUILD_CFLAGS += -fasynchronous-unwind-tables
+KBUILD_AFLAGS += -fasynchronous-unwind-tables
+endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
@@ -72,10 +77,16 @@ branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=
# We enable additional protection for leaf functions as there is some
# narrow potential for ROP protection benefits and no substantial
# performance impact has been observed.
+PACRET-y := pac-ret+leaf
+
+# Using a shadow call stack in leaf functions is too costly, so avoid PAC there
+# as well when we may be patching PAC into SCS
+PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret
+
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
-branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
+branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti
else
-branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
+branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y)
endif
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
# compiler to generate them and consequently to break the single image contract
@@ -128,7 +139,7 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 3622e9f4fb44..bdf1f6bcd010 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -224,7 +224,7 @@ alternative_endif
#include <linux/types.h>
static __always_inline bool
-alternative_has_feature_likely(unsigned long feature)
+alternative_has_feature_likely(const unsigned long feature)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
@@ -242,7 +242,7 @@ l_no:
}
static __always_inline bool
-alternative_has_feature_unlikely(unsigned long feature)
+alternative_has_feature_unlikely(const unsigned long feature)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index d8d6627be0f6..376a980f2bad 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -635,12 +635,10 @@ alternative_endif
.endm
.macro pte_to_phys, phys, pte
-#ifdef CONFIG_ARM64_PA_BITS_52
- ubfiz \phys, \pte, #(48 - 16 - 12), #16
- bfxil \phys, \pte, #16, #32
- lsl \phys, \phys, #16
-#else
and \phys, \pte, #PTE_ADDR_MASK
+#ifdef CONFIG_ARM64_PA_BITS_52
+ orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
+ and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
#endif
.endm
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index abc418650fec..4b1ad810436f 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -80,6 +80,7 @@
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47
+#define ARM_CPU_PART_CORTEX_A715 0xD4D
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
@@ -142,6 +143,7 @@
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 6f86b7ab6c28..e6fa1e2982c8 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -56,11 +56,20 @@ extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
+extern void fpsimd_kvm_prepare(void);
+
+struct cpu_fp_state {
+ struct user_fpsimd_state *st;
+ void *sve_state;
+ void *za_state;
+ u64 *svcr;
+ unsigned int sve_vl;
+ unsigned int sme_vl;
+ enum fp_type *fp_type;
+ enum fp_type to_save;
+};
-extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
- void *sve_state, unsigned int sve_vl,
- void *za_state, unsigned int sme_vl,
- u64 *svcr);
+extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 329dbbd4d50b..5664729800ae 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -23,7 +23,7 @@
*/
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
#define MCOUNT_ADDR ((unsigned long)_mcount)
@@ -33,8 +33,7 @@
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#define FTRACE_PLT_IDX 0
-#define FTRACE_REGS_PLT_IDX 1
-#define NR_FTRACE_PLTS 2
+#define NR_FTRACE_PLTS 1
/*
* Currently, gcc tends to save the link register after the local variables
@@ -69,7 +68,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
* Adjust addr to point at the BL in the callsite.
* See ftrace_init_nop() for the callsite sequence.
*/
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
return addr + AARCH64_INSN_SIZE;
/*
* addr is the address of the mcount call instruction.
@@ -78,10 +77,71 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
struct dyn_ftrace;
struct ftrace_ops;
-struct ftrace_regs;
+
+#define arch_ftrace_get_regs(regs) NULL
+
+struct ftrace_regs {
+ /* x0 - x8 */
+ unsigned long regs[9];
+ unsigned long __unused;
+
+ unsigned long fp;
+ unsigned long lr;
+
+ unsigned long sp;
+ unsigned long pc;
+};
+
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->pc;
+}
+
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long pc)
+{
+ fregs->pc = pc;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->sp;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n)
+{
+ if (n < 8)
+ return fregs->regs[n];
+ return 0;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
+{
+ return fregs->regs[0];
+}
+
+static __always_inline void
+ftrace_regs_set_return_value(struct ftrace_regs *fregs,
+ unsigned long ret)
+{
+ fregs->regs[0] = ret;
+}
+
+static __always_inline void
+ftrace_override_function_with_return(struct ftrace_regs *fregs)
+{
+ fregs->pc = fregs->lr;
+}
+
+int ftrace_regs_query_register_offset(const char *name);
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index d20f5da2d76f..6a4a1ab8eb23 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
void __init arm64_hugetlb_cma_reserve(void);
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t new_pte);
+
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 298b386d3ebe..06dd12c514e6 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -120,6 +120,9 @@
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
+#define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC)
+#define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM)
+#define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 834bff720582..aaf1f52fbf3e 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -13,31 +13,6 @@
#include <asm/insn-def.h>
#ifndef __ASSEMBLY__
-/*
- * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
- * Section C3.1 "A64 instruction index by encoding":
- * AArch64 main encoding table
- * Bit position
- * 28 27 26 25 Encoding Group
- * 0 0 - - Unallocated
- * 1 0 0 - Data processing, immediate
- * 1 0 1 - Branch, exception generation and system instructions
- * - 1 - 0 Loads and stores
- * - 1 0 1 Data processing - register
- * 0 1 1 1 Data processing - SIMD and floating point
- * 1 1 1 1 Data processing - SIMD and floating point
- * "-" means "don't care"
- */
-enum aarch64_insn_encoding_class {
- AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
- AARCH64_INSN_CLS_SVE, /* SVE instructions */
- AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
- AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
- AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
- AARCH64_INSN_CLS_LDST, /* Loads and stores */
- AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
- * system instructions */
-};
enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_NOP = 0x0 << 5,
@@ -326,6 +301,23 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
return (val); \
}
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+__AARCH64_INSN_FUNCS(class_branch_sys, 0x1c000000, 0x14000000)
+
__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
@@ -431,58 +423,122 @@ __AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F)
#undef __AARCH64_INSN_FUNCS
-bool aarch64_insn_is_steppable_hint(u32 insn);
-bool aarch64_insn_is_branch_imm(u32 insn);
+static __always_inline bool aarch64_insn_is_steppable_hint(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_XPACLRI:
+ case AARCH64_INSN_HINT_PACIA_1716:
+ case AARCH64_INSN_HINT_PACIB_1716:
+ case AARCH64_INSN_HINT_PACIAZ:
+ case AARCH64_INSN_HINT_PACIASP:
+ case AARCH64_INSN_HINT_PACIBZ:
+ case AARCH64_INSN_HINT_PACIBSP:
+ case AARCH64_INSN_HINT_BTI:
+ case AARCH64_INSN_HINT_BTIC:
+ case AARCH64_INSN_HINT_BTIJ:
+ case AARCH64_INSN_HINT_BTIJC:
+ case AARCH64_INSN_HINT_NOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static __always_inline bool aarch64_insn_is_branch(u32 insn)
+{
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_ret_auth(insn) ||
+ aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_br_auth(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_blr_auth(insn) ||
+ aarch64_insn_is_bcond(insn);
+}
-static inline bool aarch64_insn_is_adr_adrp(u32 insn)
+static __always_inline bool aarch64_insn_is_branch_imm(u32 insn)
{
- return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn);
}
-static inline bool aarch64_insn_is_dsb(u32 insn)
+static __always_inline bool aarch64_insn_is_adr_adrp(u32 insn)
{
- return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn);
+ return aarch64_insn_is_adr(insn) ||
+ aarch64_insn_is_adrp(insn);
}
-static inline bool aarch64_insn_is_barrier(u32 insn)
+static __always_inline bool aarch64_insn_is_dsb(u32 insn)
{
- return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) ||
- aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) ||
- aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) ||
+ return aarch64_insn_is_dsb_base(insn) ||
+ aarch64_insn_is_dsb_nxs(insn);
+}
+
+static __always_inline bool aarch64_insn_is_barrier(u32 insn)
+{
+ return aarch64_insn_is_dmb(insn) ||
+ aarch64_insn_is_dsb(insn) ||
+ aarch64_insn_is_isb(insn) ||
+ aarch64_insn_is_sb(insn) ||
+ aarch64_insn_is_clrex(insn) ||
+ aarch64_insn_is_ssbb(insn) ||
aarch64_insn_is_pssbb(insn);
}
-static inline bool aarch64_insn_is_store_single(u32 insn)
+static __always_inline bool aarch64_insn_is_store_single(u32 insn)
{
return aarch64_insn_is_store_imm(insn) ||
aarch64_insn_is_store_pre(insn) ||
aarch64_insn_is_store_post(insn);
}
-static inline bool aarch64_insn_is_store_pair(u32 insn)
+static __always_inline bool aarch64_insn_is_store_pair(u32 insn)
{
return aarch64_insn_is_stp(insn) ||
aarch64_insn_is_stp_pre(insn) ||
aarch64_insn_is_stp_post(insn);
}
-static inline bool aarch64_insn_is_load_single(u32 insn)
+static __always_inline bool aarch64_insn_is_load_single(u32 insn)
{
return aarch64_insn_is_load_imm(insn) ||
aarch64_insn_is_load_pre(insn) ||
aarch64_insn_is_load_post(insn);
}
-static inline bool aarch64_insn_is_load_pair(u32 insn)
+static __always_inline bool aarch64_insn_is_load_pair(u32 insn)
{
return aarch64_insn_is_ldp(insn) ||
aarch64_insn_is_ldp_pre(insn) ||
aarch64_insn_is_ldp_post(insn);
}
+static __always_inline bool aarch64_insn_uses_literal(u32 insn)
+{
+ /* ldr/ldrsw (literal), prfm */
+
+ return aarch64_insn_is_ldr_lit(insn) ||
+ aarch64_insn_is_ldrsw_lit(insn) ||
+ aarch64_insn_is_adr_adrp(insn) ||
+ aarch64_insn_is_prfm_lit(insn);
+}
+
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
-bool aarch64_insn_uses_literal(u32 insn);
-bool aarch64_insn_is_branch(u32 insn);
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
@@ -496,8 +552,18 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_condition cond);
-u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op);
-u32 aarch64_insn_gen_nop(void);
+
+static __always_inline u32
+aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+static __always_inline u32 aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
+
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
@@ -580,10 +646,6 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
enum aarch64_insn_register Rn,
enum aarch64_insn_register Rd,
u8 lsb);
-u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
- enum aarch64_insn_prfm_type type,
- enum aarch64_insn_prfm_target target,
- enum aarch64_insn_prfm_policy policy);
#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index cea441b6aa5d..48ddc0f45d22 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -15,8 +15,8 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
-static __always_inline bool arch_static_branch(struct static_key *key,
- bool branch)
+static __always_inline bool arch_static_branch(struct static_key * const key,
+ const bool branch)
{
asm_volatile_goto(
"1: nop \n\t"
@@ -32,8 +32,8 @@ l_yes:
return true;
}
-static __always_inline bool arch_static_branch_jump(struct static_key *key,
- bool branch)
+static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ const bool branch)
{
asm_volatile_goto(
"1: b %l[l_yes] \n\t"
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 45e2136322ba..fd34ab155d0b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -306,8 +306,18 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
- /* Guest floating point state */
+ /*
+ * Guest floating point state
+ *
+ * The architecture has two main floating point extensions,
+ * the original FPSIMD and SVE. These have overlapping
+ * register views, with the FPSIMD V registers occupying the
+ * low 128 bits of the SVE Z registers. When the core
+ * floating point code saves the register state of a task it
+ * records which view it saved in fp_type.
+ */
void *sve_state;
+ enum fp_type fp_type;
unsigned int sve_max_vl;
u64 svcr;
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index 094701ec5500..dbba4b7559aa 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -17,4 +17,12 @@ SECTIONS {
*/
.text.hot : { *(.text.hot) }
#endif
+
+#ifdef CONFIG_UNWIND_TABLES
+ /*
+ * Currently, we only use unwind info at module load time, so we can
+ * put it into the .init allocation.
+ */
+ .init.eh_frame : { *(.eh_frame) }
+#endif
}
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5ab8d163198f..f658aafc47df 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -159,6 +159,7 @@
#ifdef CONFIG_ARM64_PA_BITS_52
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
+#define PTE_ADDR_HIGH_SHIFT 36
#else
#define PTE_ADDR_MASK PTE_ADDR_LOW
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3efedacc014f..0dd3cd38f54d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -77,11 +77,11 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
return (pte_val(pte) & PTE_ADDR_LOW) |
- ((pte_val(pte) & PTE_ADDR_HIGH) << 36);
+ ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
}
static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
- return (phys | (phys >> 36)) & PTE_ADDR_MASK;
+ return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
}
#else
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
@@ -1095,6 +1095,15 @@ static inline bool pud_sect_supported(void)
}
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+#define ptep_modify_prot_start ptep_modify_prot_start
+extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define ptep_modify_prot_commit ptep_modify_prot_commit
+extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t new_pte);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 400f8956328b..d51b32a69309 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -122,6 +122,12 @@ enum vec_type {
ARM64_VEC_MAX,
};
+enum fp_type {
+ FP_STATE_CURRENT, /* Save based on current task state. */
+ FP_STATE_FPSIMD,
+ FP_STATE_SVE,
+};
+
struct cpu_context {
unsigned long x19;
unsigned long x20;
@@ -152,6 +158,7 @@ struct thread_struct {
struct user_fpsimd_state fpsimd_state;
} uw;
+ enum fp_type fp_type; /* registers FPSIMD or SVE? */
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
void *za_state; /* ZA register, if any */
@@ -396,18 +403,5 @@ long get_tagged_addr_ctrl(struct task_struct *task);
#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
#endif
-/*
- * For CONFIG_GCC_PLUGIN_STACKLEAK
- *
- * These need to be macros because otherwise we get stuck in a nightmare
- * of header definitions for the use of task_stack_page.
- */
-
-/*
- * The top of the current task's task stack
- */
-#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
-#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index 8297bccf0784..ff7da1268a52 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -5,6 +5,7 @@
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
+#include <asm/sysreg.h>
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
@@ -24,6 +25,54 @@
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#else
+
+#include <linux/scs.h>
+#include <asm/cpufeature.h>
+
+#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
+static inline bool should_patch_pac_into_scs(void)
+{
+ u64 reg;
+
+ /*
+ * We only enable the shadow call stack dynamically if we are running
+ * on a system that does not implement PAC or BTI. PAC and SCS provide
+ * roughly the same level of protection, and BTI relies on the PACIASP
+ * instructions serving as landing pads, preventing us from patching
+ * those instructions into something else.
+ */
+ reg = read_sysreg_s(SYS_ID_AA64ISAR1_EL1);
+ if (SYS_FIELD_GET(ID_AA64ISAR1_EL1, APA, reg) |
+ SYS_FIELD_GET(ID_AA64ISAR1_EL1, API, reg))
+ return false;
+
+ reg = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
+ if (SYS_FIELD_GET(ID_AA64ISAR2_EL1, APA3, reg))
+ return false;
+
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
+ reg = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
+ if (reg & (0xf << ID_AA64PFR1_EL1_BT_SHIFT))
+ return false;
+ }
+ return true;
+}
+
+static inline void dynamic_scs_init(void)
+{
+ if (should_patch_pac_into_scs()) {
+ pr_info("Enabling dynamic shadow call stack\n");
+ static_branch_enable(&dynamic_scs_enabled);
+ }
+}
+#else
+static inline void dynamic_scs_init(void) {}
+#endif
+
+int scs_patch(const u8 eh_frame[], int size);
+
#endif /* __ASSEMBLY __ */
#endif /* _ASM_SCS_H */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 5a0edb064ea4..4e5354beafb0 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -57,6 +57,8 @@ static inline bool on_task_stack(const struct task_struct *tsk,
return stackinfo_on_stack(&info, sp, size);
}
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
+
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7d301700d1a9..1312fb48f18b 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -90,20 +90,24 @@
*/
#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
#define PSTATE_Imm_shift CRm_shift
+#define SET_PSTATE(x, r) __emit_inst(0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
#define PSTATE_PAN pstate_field(0, 4)
#define PSTATE_UAO pstate_field(0, 3)
#define PSTATE_SSBS pstate_field(3, 1)
+#define PSTATE_DIT pstate_field(3, 2)
#define PSTATE_TCO pstate_field(3, 4)
-#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
-#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
-#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
-#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_PAN(x) SET_PSTATE((x), PAN)
+#define SET_PSTATE_UAO(x) SET_PSTATE((x), UAO)
+#define SET_PSTATE_SSBS(x) SET_PSTATE((x), SSBS)
+#define SET_PSTATE_DIT(x) SET_PSTATE((x), DIT)
+#define SET_PSTATE_TCO(x) SET_PSTATE((x), TCO)
#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
+#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -165,31 +169,6 @@
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
-#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
-#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
-#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4)
-#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
-#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5)
-#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
-#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
-#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
-#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
-#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
-#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
-#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6)
-
-#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
-#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
-#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
-#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
-#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
-#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
-#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
-
-#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
-#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
-#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
-
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
@@ -692,112 +671,6 @@
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif
-#define ID_DFR0_PERFMON_SHIFT 24
-
-#define ID_DFR0_PERFMON_8_0 0x3
-#define ID_DFR0_PERFMON_8_1 0x4
-#define ID_DFR0_PERFMON_8_4 0x5
-#define ID_DFR0_PERFMON_8_5 0x6
-
-#define ID_ISAR4_SWP_FRAC_SHIFT 28
-#define ID_ISAR4_PSR_M_SHIFT 24
-#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20
-#define ID_ISAR4_BARRIER_SHIFT 16
-#define ID_ISAR4_SMC_SHIFT 12
-#define ID_ISAR4_WRITEBACK_SHIFT 8
-#define ID_ISAR4_WITHSHIFTS_SHIFT 4
-#define ID_ISAR4_UNPRIV_SHIFT 0
-
-#define ID_DFR1_MTPMU_SHIFT 0
-
-#define ID_ISAR0_DIVIDE_SHIFT 24
-#define ID_ISAR0_DEBUG_SHIFT 20
-#define ID_ISAR0_COPROC_SHIFT 16
-#define ID_ISAR0_CMPBRANCH_SHIFT 12
-#define ID_ISAR0_BITFIELD_SHIFT 8
-#define ID_ISAR0_BITCOUNT_SHIFT 4
-#define ID_ISAR0_SWAP_SHIFT 0
-
-#define ID_ISAR5_RDM_SHIFT 24
-#define ID_ISAR5_CRC32_SHIFT 16
-#define ID_ISAR5_SHA2_SHIFT 12
-#define ID_ISAR5_SHA1_SHIFT 8
-#define ID_ISAR5_AES_SHIFT 4
-#define ID_ISAR5_SEVL_SHIFT 0
-
-#define ID_ISAR6_I8MM_SHIFT 24
-#define ID_ISAR6_BF16_SHIFT 20
-#define ID_ISAR6_SPECRES_SHIFT 16
-#define ID_ISAR6_SB_SHIFT 12
-#define ID_ISAR6_FHM_SHIFT 8
-#define ID_ISAR6_DP_SHIFT 4
-#define ID_ISAR6_JSCVT_SHIFT 0
-
-#define ID_MMFR0_INNERSHR_SHIFT 28
-#define ID_MMFR0_FCSE_SHIFT 24
-#define ID_MMFR0_AUXREG_SHIFT 20
-#define ID_MMFR0_TCM_SHIFT 16
-#define ID_MMFR0_SHARELVL_SHIFT 12
-#define ID_MMFR0_OUTERSHR_SHIFT 8
-#define ID_MMFR0_PMSA_SHIFT 4
-#define ID_MMFR0_VMSA_SHIFT 0
-
-#define ID_MMFR4_EVT_SHIFT 28
-#define ID_MMFR4_CCIDX_SHIFT 24
-#define ID_MMFR4_LSM_SHIFT 20
-#define ID_MMFR4_HPDS_SHIFT 16
-#define ID_MMFR4_CNP_SHIFT 12
-#define ID_MMFR4_XNX_SHIFT 8
-#define ID_MMFR4_AC2_SHIFT 4
-#define ID_MMFR4_SPECSEI_SHIFT 0
-
-#define ID_MMFR5_ETS_SHIFT 0
-
-#define ID_PFR0_DIT_SHIFT 24
-#define ID_PFR0_CSV2_SHIFT 16
-#define ID_PFR0_STATE3_SHIFT 12
-#define ID_PFR0_STATE2_SHIFT 8
-#define ID_PFR0_STATE1_SHIFT 4
-#define ID_PFR0_STATE0_SHIFT 0
-
-#define ID_DFR0_PERFMON_SHIFT 24
-#define ID_DFR0_MPROFDBG_SHIFT 20
-#define ID_DFR0_MMAPTRC_SHIFT 16
-#define ID_DFR0_COPTRC_SHIFT 12
-#define ID_DFR0_MMAPDBG_SHIFT 8
-#define ID_DFR0_COPSDBG_SHIFT 4
-#define ID_DFR0_COPDBG_SHIFT 0
-
-#define ID_PFR2_SSBS_SHIFT 4
-#define ID_PFR2_CSV3_SHIFT 0
-
-#define MVFR0_FPROUND_SHIFT 28
-#define MVFR0_FPSHVEC_SHIFT 24
-#define MVFR0_FPSQRT_SHIFT 20
-#define MVFR0_FPDIVIDE_SHIFT 16
-#define MVFR0_FPTRAP_SHIFT 12
-#define MVFR0_FPDP_SHIFT 8
-#define MVFR0_FPSP_SHIFT 4
-#define MVFR0_SIMD_SHIFT 0
-
-#define MVFR1_SIMDFMAC_SHIFT 28
-#define MVFR1_FPHP_SHIFT 24
-#define MVFR1_SIMDHP_SHIFT 20
-#define MVFR1_SIMDSP_SHIFT 16
-#define MVFR1_SIMDINT_SHIFT 12
-#define MVFR1_SIMDLS_SHIFT 8
-#define MVFR1_FPDNAN_SHIFT 4
-#define MVFR1_FPFTZ_SHIFT 0
-
-#define ID_PFR1_GIC_SHIFT 28
-#define ID_PFR1_VIRT_FRAC_SHIFT 24
-#define ID_PFR1_SEC_FRAC_SHIFT 20
-#define ID_PFR1_GENTIMER_SHIFT 16
-#define ID_PFR1_VIRTUALIZATION_SHIFT 12
-#define ID_PFR1_MPROGMOD_SHIFT 8
-#define ID_PFR1_SECURITY_SHIFT 4
-#define ID_PFR1_PROGMOD_SHIFT 0
-
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
@@ -815,9 +688,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif
-#define MVFR2_FPMISC_SHIFT 4
-#define MVFR2_SIMDMISC_SHIFT 0
-
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
@@ -851,10 +721,6 @@
#define SYS_RGSR_EL1_SEED_SHIFT 8
#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
-/* GMID_EL1 field definitions */
-#define GMID_EL1_BS_SHIFT 0
-#define GMID_EL1_BS_SIZE 4
-
/* TFSR{,E0}_EL1 bit definitions */
#define SYS_TFSR_EL1_TF0_SHIFT 0
#define SYS_TFSR_EL1_TF1_SHIFT 1
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
index 315eef654e39..ba4bff5ca674 100644
--- a/arch/arm64/include/asm/uprobes.h
+++ b/arch/arm64/include/asm/uprobes.h
@@ -12,7 +12,7 @@
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
-#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
+#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 9b245da6f507..b713d30544f1 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -93,5 +93,8 @@
#define HWCAP2_WFXT (1UL << 31)
#define HWCAP2_EBF16 (1UL << 32)
#define HWCAP2_SVE_EBF16 (1UL << 33)
+#define HWCAP2_CSSC (1UL << 34)
+#define HWCAP2_RPRFM (1UL << 35)
+#define HWCAP2_SVE2P1 (1UL << 36)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 2f361a883d8c..8dd925f4a4c6 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
+obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
+CFLAGS_patch-scs.o += -mbranch-protection=none
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 1197e7679882..2234624536d9 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -82,6 +82,19 @@ int main(void)
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+ DEFINE(FREGS_X0, offsetof(struct ftrace_regs, regs[0]));
+ DEFINE(FREGS_X2, offsetof(struct ftrace_regs, regs[2]));
+ DEFINE(FREGS_X4, offsetof(struct ftrace_regs, regs[4]));
+ DEFINE(FREGS_X6, offsetof(struct ftrace_regs, regs[6]));
+ DEFINE(FREGS_X8, offsetof(struct ftrace_regs, regs[8]));
+ DEFINE(FREGS_FP, offsetof(struct ftrace_regs, fp));
+ DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr));
+ DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
+ DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc));
+ DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs));
+ BLANK();
+#endif
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 89ac00084f38..307faa2b4395 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2645198
+ {
+ .desc = "ARM erratum 2645198",
+ .capability = ARM64_WORKAROUND_2645198,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2077057
{
.desc = "ARM erratum 2077057",
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b3f37e2209ad..ff74d78145bd 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -212,6 +212,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
@@ -402,14 +404,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -429,32 +431,32 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
};
static const struct arm64_ftr_bits ftr_mvfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr1[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -470,34 +472,34 @@ static const struct arm64_ftr_bits ftr_gmid[] = {
};
static const struct arm64_ftr_bits ftr_id_isar0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_isar5[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0),
/*
* SpecSEI = 1 indicates that the PE might generate an SError on an
@@ -505,80 +507,80 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
* SError might be generated than it will not be. Hence it has been
* classified as FTR_HIGHER_SAFE.
*/
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_isar4[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_isar6[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr1[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_dfr0[] = {
/* [31:28] TraceFilt */
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_dfr1[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1119,12 +1121,12 @@ static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
* EL1-dependent register fields to avoid spurious sanity check fails.
*/
if (!id_aa64pfr0_32bit_el1(pfr0)) {
- relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
- relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
- relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
- relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
- relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
- relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_EL1_SMC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virt_frac_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Sec_frac_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virtualization_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Security_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_ProgMod_SHIFT);
}
taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
@@ -2101,6 +2103,11 @@ static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP);
}
+static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused)
+{
+ set_pstate_dit(1);
+}
+
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -2664,6 +2671,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.cpu_enable = cpu_trap_el0_impdef,
},
+ {
+ .desc = "Data independent timing control (DIT)",
+ .capability = ARM64_HAS_DIT,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_EL1_DIT_SHIFT,
+ .field_width = 4,
+ .min_field_value = ID_AA64PFR0_EL1_DIT_IMP,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_dit,
+ },
{},
};
@@ -2772,6 +2791,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
@@ -2798,6 +2818,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_CSSC_IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRFM_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME
@@ -2829,24 +2851,24 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
else
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
- return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
- cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
- cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDSP_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDInt_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDLS_SHIFT);
}
#endif
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
- HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
- HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
- HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
#endif
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 28d4f442b0bc..379695262b77 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -116,6 +116,9 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_WFXT] = "wfxt",
[KERNEL_HWCAP_EBF16] = "ebf16",
[KERNEL_HWCAP_SVE_EBF16] = "sveebf16",
+ [KERNEL_HWCAP_CSSC] = "cssc",
+ [KERNEL_HWCAP_RPRFM] = "rprfm",
+ [KERNEL_HWCAP_SVE2P1] = "sve2p1",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 795344ab4ec4..30cc2a9d1757 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -13,83 +13,58 @@
#include <asm/ftrace.h>
#include <asm/insn.h>
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
* Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
* the regular function prologue. For an enabled callsite, ftrace_init_nop() and
* ftrace_make_call() have patched those NOPs to:
*
* MOV X9, LR
- * BL <entry>
- *
- * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
+ * BL ftrace_caller
*
* Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
* live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
* clobber.
*
- * We save the callsite's context into a pt_regs before invoking any ftrace
- * callbacks. So that we can get a sensible backtrace, we create a stack record
- * for the callsite and the ftrace entry assembly. This is not sufficient for
- * reliable stacktrace: until we create the callsite stack record, its caller
- * is missing from the LR and existing chain of frame records.
+ * We save the callsite's context into a struct ftrace_regs before invoking any
+ * ftrace callbacks. So that we can get a sensible backtrace, we create frame
+ * records for the callsite and the ftrace entry assembly. This is not
+ * sufficient for reliable stacktrace: until we create the callsite stack
+ * record, its caller is missing from the LR and existing chain of frame
+ * records.
*/
- .macro ftrace_regs_entry, allregs=0
- /* Make room for pt_regs, plus a callee frame */
- sub sp, sp, #(PT_REGS_SIZE + 16)
-
- /* Save function arguments (and x9 for simplicity) */
- stp x0, x1, [sp, #S_X0]
- stp x2, x3, [sp, #S_X2]
- stp x4, x5, [sp, #S_X4]
- stp x6, x7, [sp, #S_X6]
- stp x8, x9, [sp, #S_X8]
-
- /* Optionally save the callee-saved registers, always save the FP */
- .if \allregs == 1
- stp x10, x11, [sp, #S_X10]
- stp x12, x13, [sp, #S_X12]
- stp x14, x15, [sp, #S_X14]
- stp x16, x17, [sp, #S_X16]
- stp x18, x19, [sp, #S_X18]
- stp x20, x21, [sp, #S_X20]
- stp x22, x23, [sp, #S_X22]
- stp x24, x25, [sp, #S_X24]
- stp x26, x27, [sp, #S_X26]
- stp x28, x29, [sp, #S_X28]
- .else
- str x29, [sp, #S_FP]
- .endif
-
- /* Save the callsite's SP and LR */
- add x10, sp, #(PT_REGS_SIZE + 16)
- stp x9, x10, [sp, #S_LR]
+SYM_CODE_START(ftrace_caller)
+ bti c
- /* Save the PC after the ftrace callsite */
- str x30, [sp, #S_PC]
+ /* Save original SP */
+ mov x10, sp
- /* Create a frame record for the callsite above pt_regs */
- stp x29, x9, [sp, #PT_REGS_SIZE]
- add x29, sp, #PT_REGS_SIZE
+ /* Make room for ftrace regs, plus two frame records */
+ sub sp, sp, #(FREGS_SIZE + 32)
- /* Create our frame record within pt_regs. */
- stp x29, x30, [sp, #S_STACKFRAME]
- add x29, sp, #S_STACKFRAME
- .endm
+ /* Save function arguments */
+ stp x0, x1, [sp, #FREGS_X0]
+ stp x2, x3, [sp, #FREGS_X2]
+ stp x4, x5, [sp, #FREGS_X4]
+ stp x6, x7, [sp, #FREGS_X6]
+ str x8, [sp, #FREGS_X8]
-SYM_CODE_START(ftrace_regs_caller)
- bti c
- ftrace_regs_entry 1
- b ftrace_common
-SYM_CODE_END(ftrace_regs_caller)
+ /* Save the callsite's FP, LR, SP */
+ str x29, [sp, #FREGS_FP]
+ str x9, [sp, #FREGS_LR]
+ str x10, [sp, #FREGS_SP]
-SYM_CODE_START(ftrace_caller)
- bti c
- ftrace_regs_entry 0
- b ftrace_common
-SYM_CODE_END(ftrace_caller)
+ /* Save the PC after the ftrace callsite */
+ str x30, [sp, #FREGS_PC]
+
+ /* Create a frame record for the callsite above the ftrace regs */
+ stp x29, x9, [sp, #FREGS_SIZE + 16]
+ add x29, sp, #FREGS_SIZE + 16
+
+ /* Create our frame record above the ftrace regs */
+ stp x29, x30, [sp, #FREGS_SIZE]
+ add x29, sp, #FREGS_SIZE
-SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op
@@ -104,24 +79,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
* to restore x0-x8, x29, and x30.
*/
/* Restore function arguments */
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldr x8, [sp, #S_X8]
+ ldp x0, x1, [sp, #FREGS_X0]
+ ldp x2, x3, [sp, #FREGS_X2]
+ ldp x4, x5, [sp, #FREGS_X4]
+ ldp x6, x7, [sp, #FREGS_X6]
+ ldr x8, [sp, #FREGS_X8]
/* Restore the callsite's FP, LR, PC */
- ldr x29, [sp, #S_FP]
- ldr x30, [sp, #S_LR]
- ldr x9, [sp, #S_PC]
+ ldr x29, [sp, #FREGS_FP]
+ ldr x30, [sp, #FREGS_LR]
+ ldr x9, [sp, #FREGS_PC]
/* Restore the callsite's SP */
- add sp, sp, #PT_REGS_SIZE + 16
+ add sp, sp, #FREGS_SIZE + 32
ret x9
-SYM_CODE_END(ftrace_common)
+SYM_CODE_END(ftrace_caller)
-#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
/*
* Gcc with -pg will put the following code in the beginning of each function:
@@ -195,44 +170,6 @@ SYM_CODE_END(ftrace_common)
add \reg, \reg, #8
.endm
-#ifndef CONFIG_DYNAMIC_FTRACE
-/*
- * void _mcount(unsigned long return_address)
- * @return_address: return address to instrumented function
- *
- * This function makes calls, if enabled, to:
- * - tracer function to probe instrumented function's entry,
- * - ftrace_graph_caller to set up an exit hook
- */
-SYM_FUNC_START(_mcount)
- mcount_enter
-
- ldr_l x2, ftrace_trace_function
- adr x0, ftrace_stub
- cmp x0, x2 // if (ftrace_trace_function
- b.eq skip_ftrace_call // != ftrace_stub) {
-
- mcount_get_pc x0 // function's pc
- mcount_get_lr x1 // function's lr (= parent's pc)
- blr x2 // (*ftrace_trace_function)(pc, lr);
-
-skip_ftrace_call: // }
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ldr_l x2, ftrace_graph_return
- cmp x0, x2 // if ((ftrace_graph_return
- b.ne ftrace_graph_caller // != ftrace_stub)
-
- ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
- adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
- cmp x0, x2
- b.ne ftrace_graph_caller // ftrace_graph_caller();
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
- mcount_exit
-SYM_FUNC_END(_mcount)
-EXPORT_SYMBOL(_mcount)
-NOKPROBE(_mcount)
-
-#else /* CONFIG_DYNAMIC_FTRACE */
/*
* _mcount() is used to build the kernel with -pg option, but all the branch
* instructions to _mcount() are replaced to NOP initially at kernel start up,
@@ -272,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
mcount_exit
SYM_FUNC_END(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -293,7 +229,7 @@ SYM_FUNC_START(ftrace_graph_caller)
mcount_exit
SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
SYM_TYPED_FUNC_START(ftrace_stub)
ret
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e28137d64b76..11cb99c4d298 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -197,6 +197,9 @@ alternative_cb_end
.endm
.macro kernel_entry, el, regsize = 64
+ .if \el == 0
+ alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT
+ .endif
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 23834d96d1e7..dcc81e7200d4 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -118,16 +118,8 @@
* returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
* whatever is in the FPSIMD registers is not saved to memory, but discarded.
*/
-struct fpsimd_last_state_struct {
- struct user_fpsimd_state *st;
- void *sve_state;
- void *za_state;
- u64 *svcr;
- unsigned int sve_vl;
- unsigned int sme_vl;
-};
-static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
+static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
__ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
#ifdef CONFIG_ARM64_SVE
@@ -330,15 +322,6 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* The task can execute SVE instructions while in userspace without
* trapping to the kernel.
*
- * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
- * corresponding Zn), P0-P15 and FFR are encoded in
- * task->thread.sve_state, formatted appropriately for vector
- * length task->thread.sve_vl or, if SVCR.SM is set,
- * task->thread.sme_vl.
- *
- * task->thread.sve_state must point to a valid buffer at least
- * sve_state_size(task) bytes in size.
- *
* During any syscall, the kernel may optionally clear TIF_SVE and
* discard the vector state except for the FPSIMD subset.
*
@@ -348,7 +331,15 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* do_sve_acc() to be called, which does some preparation and then
* sets TIF_SVE.
*
- * When stored, FPSIMD registers V0-V31 are encoded in
+ * During any syscall, the kernel may optionally clear TIF_SVE and
+ * discard the vector state except for the FPSIMD subset.
+ *
+ * The data will be stored in one of two formats:
+ *
+ * * FPSIMD only - FP_STATE_FPSIMD:
+ *
+ * When the FPSIMD only state stored task->thread.fp_type is set to
+ * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
* task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
* logically zero but not stored anywhere; P0-P15 and FFR are not
* stored and have unspecified values from userspace's point of
@@ -356,7 +347,23 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* but userspace is discouraged from relying on this.
*
* task->thread.sve_state does not need to be non-NULL, valid or any
- * particular size: it must not be dereferenced.
+ * particular size: it must not be dereferenced and any data stored
+ * there should be considered stale and not referenced.
+ *
+ * * SVE state - FP_STATE_SVE:
+ *
+ * When the full SVE state is stored task->thread.fp_type is set to
+ * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
+ * corresponding Zn), P0-P15 and FFR are encoded in in
+ * task->thread.sve_state, formatted appropriately for vector
+ * length task->thread.sve_vl or, if SVCR.SM is set,
+ * task->thread.sme_vl. The storage for the vector registers in
+ * task->thread.uw.fpsimd_state should be ignored.
+ *
+ * task->thread.sve_state must point to a valid buffer at least
+ * sve_state_size(task) bytes in size. The data stored in
+ * task->thread.uw.fpsimd_state.vregs should be considered stale
+ * and not referenced.
*
* * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
* irrespective of whether TIF_SVE is clear or set, since these are
@@ -378,11 +385,37 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
- /* Check if we should restore SVE first */
- if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
- sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
- restore_sve_regs = true;
- restore_ffr = true;
+ if (system_supports_sve()) {
+ switch (current->thread.fp_type) {
+ case FP_STATE_FPSIMD:
+ /* Stop tracking SVE for this task until next use. */
+ if (test_and_clear_thread_flag(TIF_SVE))
+ sve_user_disable();
+ break;
+ case FP_STATE_SVE:
+ if (!thread_sm_enabled(&current->thread) &&
+ !WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE)))
+ sve_user_enable();
+
+ if (test_thread_flag(TIF_SVE))
+ sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
+
+ restore_sve_regs = true;
+ restore_ffr = true;
+ break;
+ default:
+ /*
+ * This indicates either a bug in
+ * fpsimd_save() or memory corruption, we
+ * should always record an explicit format
+ * when we save. We always at least have the
+ * memory allocated for FPSMID registers so
+ * try that and hope for the best.
+ */
+ WARN_ON_ONCE(1);
+ clear_thread_flag(TIF_SVE);
+ break;
+ }
}
/* Restore SME, override SVE register configuration if needed */
@@ -398,18 +431,19 @@ static void task_fpsimd_load(void)
if (thread_za_enabled(&current->thread))
za_load_state(current->thread.za_state);
- if (thread_sm_enabled(&current->thread)) {
- restore_sve_regs = true;
+ if (thread_sm_enabled(&current->thread))
restore_ffr = system_supports_fa64();
- }
}
- if (restore_sve_regs)
+ if (restore_sve_regs) {
+ WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
sve_load_state(sve_pffr(&current->thread),
&current->thread.uw.fpsimd_state.fpsr,
restore_ffr);
- else
+ } else {
+ WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
fpsimd_load_state(&current->thread.uw.fpsimd_state);
+ }
}
/*
@@ -419,12 +453,12 @@ static void task_fpsimd_load(void)
* last, if KVM is involved this may be the guest VM context rather
* than the host thread for the VM pointed to by current. This means
* that we must always reference the state storage via last rather
- * than via current, other than the TIF_ flags which KVM will
- * carefully maintain for us.
+ * than via current, if we are saving KVM state then it will have
+ * ensured that the type of registers to save is set in last->to_save.
*/
static void fpsimd_save(void)
{
- struct fpsimd_last_state_struct const *last =
+ struct cpu_fp_state const *last =
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
bool save_sve_regs = false;
@@ -437,7 +471,14 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
- if (test_thread_flag(TIF_SVE)) {
+ /*
+ * If a task is in a syscall the ABI allows us to only
+ * preserve the state shared with FPSIMD so don't bother
+ * saving the full SVE state in that case.
+ */
+ if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) &&
+ !in_syscall(current_pt_regs())) ||
+ last->to_save == FP_STATE_SVE) {
save_sve_regs = true;
save_ffr = true;
vl = last->sve_vl;
@@ -474,8 +515,10 @@ static void fpsimd_save(void)
sve_save_state((char *)last->sve_state +
sve_ffr_offset(vl),
&last->st->fpsr, save_ffr);
+ *last->fp_type = FP_STATE_SVE;
} else {
fpsimd_save_state(last->st);
+ *last->fp_type = FP_STATE_FPSIMD;
}
}
@@ -768,8 +811,7 @@ void fpsimd_sync_to_sve(struct task_struct *task)
*/
void sve_sync_to_fpsimd(struct task_struct *task)
{
- if (test_tsk_thread_flag(task, TIF_SVE) ||
- thread_sm_enabled(&task->thread))
+ if (task->thread.fp_type == FP_STATE_SVE)
sve_to_fpsimd(task);
}
@@ -848,8 +890,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
fpsimd_flush_task_state(task);
if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
- thread_sm_enabled(&task->thread))
+ thread_sm_enabled(&task->thread)) {
sve_to_fpsimd(task);
+ task->thread.fp_type = FP_STATE_FPSIMD;
+ }
if (system_supports_sme() && type == ARM64_VEC_SME) {
task->thread.svcr &= ~(SVCR_SM_MASK |
@@ -1368,6 +1412,7 @@ static void sve_init_regs(void)
fpsimd_bind_task_to_cpu();
} else {
fpsimd_to_sve(current);
+ current->thread.fp_type = FP_STATE_SVE;
}
}
@@ -1596,6 +1641,8 @@ void fpsimd_flush_thread(void)
current->thread.svcr = 0;
}
+ current->thread.fp_type = FP_STATE_FPSIMD;
+
put_cpu_fpsimd_context();
kfree(sve_state);
kfree(za_state);
@@ -1628,14 +1675,38 @@ void fpsimd_signal_preserve_current_state(void)
}
/*
+ * Called by KVM when entering the guest.
+ */
+void fpsimd_kvm_prepare(void)
+{
+ if (!system_supports_sve())
+ return;
+
+ /*
+ * KVM does not save host SVE state since we can only enter
+ * the guest from a syscall so the ABI means that only the
+ * non-saved SVE state needs to be saved. If we have left
+ * SVE enabled for performance reasons then update the task
+ * state to be FPSIMD only.
+ */
+ get_cpu_fpsimd_context();
+
+ if (test_and_clear_thread_flag(TIF_SVE)) {
+ sve_to_fpsimd(current);
+ current->thread.fp_type = FP_STATE_FPSIMD;
+ }
+
+ put_cpu_fpsimd_context();
+}
+
+/*
* Associate current's FPSIMD context with this cpu
* The caller must have ownership of the cpu FPSIMD context before calling
* this function.
*/
static void fpsimd_bind_task_to_cpu(void)
{
- struct fpsimd_last_state_struct *last =
- this_cpu_ptr(&fpsimd_last_state);
+ struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
@@ -1644,6 +1715,8 @@ static void fpsimd_bind_task_to_cpu(void)
last->sve_vl = task_get_sve_vl(current);
last->sme_vl = task_get_sme_vl(current);
last->svcr = &current->thread.svcr;
+ last->fp_type = &current->thread.fp_type;
+ last->to_save = FP_STATE_CURRENT;
current->thread.fpsimd_cpu = smp_processor_id();
/*
@@ -1665,22 +1738,14 @@ static void fpsimd_bind_task_to_cpu(void)
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
- unsigned int sve_vl, void *za_state,
- unsigned int sme_vl, u64 *svcr)
+void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
{
- struct fpsimd_last_state_struct *last =
- this_cpu_ptr(&fpsimd_last_state);
+ struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());
- last->st = st;
- last->svcr = svcr;
- last->sve_state = sve_state;
- last->za_state = za_state;
- last->sve_vl = sve_vl;
- last->sme_vl = sme_vl;
+ *last = *state;
}
/*
@@ -1838,7 +1903,7 @@ void kernel_neon_begin(void)
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
}
-EXPORT_SYMBOL(kernel_neon_begin);
+EXPORT_SYMBOL_GPL(kernel_neon_begin);
/*
* kernel_neon_end(): give the CPU FPSIMD registers back to the current task
@@ -1856,7 +1921,7 @@ void kernel_neon_end(void)
put_cpu_fpsimd_context();
}
-EXPORT_SYMBOL(kernel_neon_end);
+EXPORT_SYMBOL_GPL(kernel_neon_end);
#ifdef CONFIG_EFI
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8745175f4a75..b30b955a8921 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -17,7 +17,49 @@
#include <asm/insn.h>
#include <asm/patching.h>
-#ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+struct fregs_offset {
+ const char *name;
+ int offset;
+};
+
+#define FREGS_OFFSET(n, field) \
+{ \
+ .name = n, \
+ .offset = offsetof(struct ftrace_regs, field), \
+}
+
+static const struct fregs_offset fregs_offsets[] = {
+ FREGS_OFFSET("x0", regs[0]),
+ FREGS_OFFSET("x1", regs[1]),
+ FREGS_OFFSET("x2", regs[2]),
+ FREGS_OFFSET("x3", regs[3]),
+ FREGS_OFFSET("x4", regs[4]),
+ FREGS_OFFSET("x5", regs[5]),
+ FREGS_OFFSET("x6", regs[6]),
+ FREGS_OFFSET("x7", regs[7]),
+ FREGS_OFFSET("x8", regs[8]),
+
+ FREGS_OFFSET("x29", fp),
+ FREGS_OFFSET("x30", lr),
+ FREGS_OFFSET("lr", lr),
+
+ FREGS_OFFSET("sp", sp),
+ FREGS_OFFSET("pc", pc),
+};
+
+int ftrace_regs_query_register_offset(const char *name)
+{
+ for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
+ const struct fregs_offset *roff = &fregs_offsets[i];
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ }
+
+ return -EINVAL;
+}
+#endif
+
/*
* Replace a single instruction, which may be a branch or NOP.
* If @validate == true, a replaced instruction is checked against 'old'.
@@ -70,9 +112,6 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
- if (addr == FTRACE_REGS_ADDR &&
- IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
- return &plt[FTRACE_REGS_PLT_IDX];
#endif
return NULL;
}
@@ -154,25 +193,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true);
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
- unsigned long addr)
-{
- unsigned long pc = rec->ip;
- u32 old, new;
-
- if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
- return -EINVAL;
- if (!ftrace_find_callable_addr(rec, NULL, &addr))
- return -EINVAL;
-
- old = aarch64_insn_gen_branch_imm(pc, old_addr,
- AARCH64_INSN_BRANCH_LINK);
- new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
-
- return ftrace_modify_code(pc, old, new, true);
-}
-
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
* The compiler has inserted two NOPs before the regular function prologue.
* All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
@@ -228,7 +249,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
*
* Note: 'mod' is only set at module load time.
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
@@ -246,7 +267,6 @@ void arch_ftrace_update_code(int command)
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -277,21 +297,11 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}
}
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- /*
- * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
- * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
- * in which we can safely modify the LR.
- */
- struct pt_regs *regs = arch_ftrace_get_regs(fregs);
- unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
-
- prepare_ftrace_return(ip, parent, frame_pointer(regs));
+ prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
}
#else
/*
@@ -323,6 +333,5 @@ int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2196aad7b55b..952e17bd1c0b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -462,6 +462,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl early_fdt_map // Try mapping the FDT early
mov x0, x20 // pass the full boot status
bl init_feature_override // Parse cpu feature overrides
+#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
+ bl scs_patch_vmlinux
+#endif
mov x0, x20
bl finalise_el2 // Prefer VHE if possible
ldp x29, x30, [sp], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 38dbd3828f13..6ad5c6ef5329 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -10,20 +10,21 @@
* Copyright (C) 2012 ARM Ltd.
*/
-#include <linux/irq.h>
-#include <linux/memory.h>
-#include <linux/smp.h>
#include <linux/hardirq.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/kprobes.h>
+#include <linux/memory.h>
#include <linux/scs.h>
#include <linux/seq_file.h>
+#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/daifflags.h>
#include <asm/exception.h>
-#include <asm/vmap_stack.h>
#include <asm/softirq_stack.h>
+#include <asm/stacktrace.h>
+#include <asm/vmap_stack.h>
/* Only access this in an NMI enter/exit */
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
@@ -41,7 +42,7 @@ static void init_irq_scs(void)
{
int cpu;
- if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ if (!scs_is_enabled())
return;
for_each_possible_cpu(cpu)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 76b41e4ca9fa..5af4975caeb5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -15,9 +15,11 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleloader.h>
+#include <linux/scs.h>
#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/insn.h>
+#include <asm/scs.h>
#include <asm/sections.h>
void *module_alloc(unsigned long size)
@@ -497,9 +499,6 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
- __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
-
mod->arch.ftrace_trampolines = plts;
#endif
return 0;
@@ -514,5 +513,11 @@ int module_finalize(const Elf_Ehdr *hdr,
if (s)
apply_alternatives_module((void *)s->sh_addr, s->sh_size);
+ if (scs_is_dynamic()) {
+ s = find_section(hdr, sechdrs, ".init.eh_frame");
+ if (s)
+ scs_patch((void *)s->sh_addr, s->sh_size);
+ }
+
return module_init_ftrace_plt(hdr, sechdrs, me);
}
diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c
new file mode 100644
index 000000000000..1b3da02d5b74
--- /dev/null
+++ b/arch/arm64/kernel/patch-scs.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 - Google LLC
+ * Author: Ard Biesheuvel <ardb@google.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/scs.h>
+
+//
+// This minimal DWARF CFI parser is partially based on the code in
+// arch/arc/kernel/unwind.c, and on the document below:
+// https://refspecs.linuxbase.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html
+//
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_negate_ra_state 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+extern const u8 __eh_frame_start[], __eh_frame_end[];
+
+enum {
+ PACIASP = 0xd503233f,
+ AUTIASP = 0xd50323bf,
+ SCS_PUSH = 0xf800865e,
+ SCS_POP = 0xf85f8e5e,
+};
+
+static void __always_inline scs_patch_loc(u64 loc)
+{
+ u32 insn = le32_to_cpup((void *)loc);
+
+ switch (insn) {
+ case PACIASP:
+ *(u32 *)loc = cpu_to_le32(SCS_PUSH);
+ break;
+ case AUTIASP:
+ *(u32 *)loc = cpu_to_le32(SCS_POP);
+ break;
+ default:
+ /*
+ * While the DW_CFA_negate_ra_state directive is guaranteed to
+ * appear right after a PACIASP/AUTIASP instruction, it may
+ * also appear after a DW_CFA_restore_state directive that
+ * restores a state that is only partially accurate, and is
+ * followed by DW_CFA_negate_ra_state directive to toggle the
+ * PAC bit again. So we permit other instructions here, and ignore
+ * them.
+ */
+ return;
+ }
+ dcache_clean_pou(loc, loc + sizeof(u32));
+}
+
+/*
+ * Skip one uleb128/sleb128 encoded quantity from the opcode stream. All bytes
+ * except the last one have bit #7 set.
+ */
+static int __always_inline skip_xleb128(const u8 **opcode, int size)
+{
+ u8 c;
+
+ do {
+ c = *(*opcode)++;
+ size--;
+ } while (c & BIT(7));
+
+ return size;
+}
+
+struct eh_frame {
+ /*
+ * The size of this frame if 0 < size < U32_MAX, 0 terminates the list.
+ */
+ u32 size;
+
+ /*
+ * The first frame is a Common Information Entry (CIE) frame, followed
+ * by one or more Frame Description Entry (FDE) frames. In the former
+ * case, this field is 0, otherwise it is the negated offset relative
+ * to the associated CIE frame.
+ */
+ u32 cie_id_or_pointer;
+
+ union {
+ struct { // CIE
+ u8 version;
+ u8 augmentation_string[];
+ };
+
+ struct { // FDE
+ s32 initial_loc;
+ s32 range;
+ u8 opcodes[];
+ };
+ };
+};
+
+static int noinstr scs_handle_fde_frame(const struct eh_frame *frame,
+ bool fde_has_augmentation_data,
+ int code_alignment_factor)
+{
+ int size = frame->size - offsetof(struct eh_frame, opcodes) + 4;
+ u64 loc = (u64)offset_to_ptr(&frame->initial_loc);
+ const u8 *opcode = frame->opcodes;
+
+ if (fde_has_augmentation_data) {
+ int l;
+
+ // assume single byte uleb128_t
+ if (WARN_ON(*opcode & BIT(7)))
+ return -ENOEXEC;
+
+ l = *opcode++;
+ opcode += l;
+ size -= l + 1;
+ }
+
+ /*
+ * Starting from 'loc', apply the CFA opcodes that advance the location
+ * pointer, and identify the locations of the PAC instructions.
+ */
+ while (size-- > 0) {
+ switch (*opcode++) {
+ case DW_CFA_nop:
+ case DW_CFA_remember_state:
+ case DW_CFA_restore_state:
+ break;
+
+ case DW_CFA_advance_loc1:
+ loc += *opcode++ * code_alignment_factor;
+ size--;
+ break;
+
+ case DW_CFA_advance_loc2:
+ loc += *opcode++ * code_alignment_factor;
+ loc += (*opcode++ << 8) * code_alignment_factor;
+ size -= 2;
+ break;
+
+ case DW_CFA_def_cfa:
+ case DW_CFA_offset_extended:
+ size = skip_xleb128(&opcode, size);
+ fallthrough;
+ case DW_CFA_def_cfa_offset:
+ case DW_CFA_def_cfa_offset_sf:
+ case DW_CFA_def_cfa_register:
+ case DW_CFA_same_value:
+ case DW_CFA_restore_extended:
+ case 0x80 ... 0xbf:
+ size = skip_xleb128(&opcode, size);
+ break;
+
+ case DW_CFA_negate_ra_state:
+ scs_patch_loc(loc - 4);
+ break;
+
+ case 0x40 ... 0x7f:
+ // advance loc
+ loc += (opcode[-1] & 0x3f) * code_alignment_factor;
+ break;
+
+ case 0xc0 ... 0xff:
+ break;
+
+ default:
+ pr_err("unhandled opcode: %02x in FDE frame %lx\n", opcode[-1], (uintptr_t)frame);
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int noinstr scs_patch(const u8 eh_frame[], int size)
+{
+ const u8 *p = eh_frame;
+
+ while (size > 4) {
+ const struct eh_frame *frame = (const void *)p;
+ bool fde_has_augmentation_data = true;
+ int code_alignment_factor = 1;
+ int ret;
+
+ if (frame->size == 0 ||
+ frame->size == U32_MAX ||
+ frame->size > size)
+ break;
+
+ if (frame->cie_id_or_pointer == 0) {
+ const u8 *p = frame->augmentation_string;
+
+ /* a 'z' in the augmentation string must come first */
+ fde_has_augmentation_data = *p == 'z';
+
+ /*
+ * The code alignment factor is a uleb128 encoded field
+ * but given that the only sensible values are 1 or 4,
+ * there is no point in decoding the whole thing.
+ */
+ p += strlen(p) + 1;
+ if (!WARN_ON(*p & BIT(7)))
+ code_alignment_factor = *p;
+ } else {
+ ret = scs_handle_fde_frame(frame,
+ fde_has_augmentation_data,
+ code_alignment_factor);
+ if (ret)
+ return ret;
+ }
+
+ p += sizeof(frame->size) + frame->size;
+ size -= sizeof(frame->size) + frame->size;
+ }
+ return 0;
+}
+
+asmlinkage void __init scs_patch_vmlinux(void)
+{
+ if (!should_patch_pac_into_scs())
+ return;
+
+ WARN_ON(scs_patch(__eh_frame_start, __eh_frame_end - __eh_frame_start));
+ icache_inval_all_pou();
+ isb();
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 7b0643fe2f13..a15b3c1d15d9 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1146,7 +1146,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_EL1_PMUVer_SHIFT);
- if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
+ pmuver == ID_AA64DFR0_EL1_PMUVer_NI)
return;
cpu_pmu->pmuver = pmuver;
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
index 839291430cb3..4c0ea3cd4ea4 100644
--- a/arch/arm64/kernel/pi/Makefile
+++ b/arch/arm64/kernel/pi/Makefile
@@ -7,6 +7,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
-include $(srctree)/include/linux/hidden.h \
-D__DISABLE_EXPORTS -ffreestanding -D__NO_FORTIFY \
+ -fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
# remove SCS flags from all objects in this directory
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 104101f633b1..968d5fffe233 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -24,7 +24,7 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* currently safe. Lastly, MSR instructions can do any number of nasty
* things we can't handle during single-stepping.
*/
- if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+ if (aarch64_insn_is_class_branch_sys(insn)) {
if (aarch64_insn_is_branch(insn) ||
aarch64_insn_is_msr_imm(insn) ||
aarch64_insn_is_msr_reg(insn) ||
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index c9e4d0720285..f35d059a9a36 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -294,19 +294,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
}
break;
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
- if (fixup_exception(regs))
- return 1;
}
return 0;
}
-static void __kprobes kprobe_handler(struct pt_regs *regs)
+static int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
@@ -316,39 +309,44 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
cur_kprobe = kprobe_running();
p = get_kprobe((kprobe_opcode_t *) addr);
+ if (WARN_ON_ONCE(!p)) {
+ /*
+ * Something went wrong. This BRK used an immediate reserved
+ * for kprobes, but we couldn't find any corresponding probe.
+ */
+ return DBG_HOOK_ERROR;
+ }
- if (p) {
- if (cur_kprobe) {
- if (reenter_kprobe(p, regs, kcb))
- return;
- } else {
- /* Probe hit */
- set_current_kprobe(p);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- /*
- * If we have no pre-handler or it returned 0, we
- * continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it will
- * modify the execution path and no need to single
- * stepping. Let's just reset current kprobe and exit.
- */
- if (!p->pre_handler || !p->pre_handler(p, regs)) {
- setup_singlestep(p, regs, kcb, 0);
- } else
- reset_current_kprobe();
- }
+ if (cur_kprobe) {
+ /* Hit a kprobe inside another kprobe */
+ if (!reenter_kprobe(p, regs, kcb))
+ return DBG_HOOK_ERROR;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and not need to single-step
+ * Let's just reset current kprobe and exit.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
}
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Return back to original instruction, and continue.
- */
+
+ return DBG_HOOK_HANDLED;
}
+static struct break_hook kprobes_break_hook = {
+ .imm = KPROBES_BRK_IMM,
+ .fn = kprobe_breakpoint_handler,
+};
+
static int __kprobes
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
{
@@ -373,18 +371,6 @@ static struct break_hook kprobes_break_ss_hook = {
.fn = kprobe_breakpoint_ss_handler,
};
-static int __kprobes
-kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
-{
- kprobe_handler(regs);
- return DBG_HOOK_HANDLED;
-}
-
-static struct break_hook kprobes_break_hook = {
- .imm = KPROBES_BRK_IMM,
- .fn = kprobe_breakpoint_handler,
-};
-
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 044a7d7f1f6a..19cd05eea3f0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -331,6 +331,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
clear_tsk_thread_flag(dst, TIF_SME);
}
+ dst->thread.fp_type = FP_STATE_FPSIMD;
+
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index c2fb5755bbec..979dbdc36d52 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -907,8 +907,7 @@ static int sve_set_common(struct task_struct *target,
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
SVE_PT_FPSIMD_OFFSET);
clear_tsk_thread_flag(target, TIF_SVE);
- if (type == ARM64_VEC_SME)
- fpsimd_force_sync_to_sve(target);
+ target->thread.fp_type = FP_STATE_FPSIMD;
goto out;
}
@@ -931,6 +930,7 @@ static int sve_set_common(struct task_struct *target,
if (!target->thread.sve_state) {
ret = -ENOMEM;
clear_tsk_thread_flag(target, TIF_SVE);
+ target->thread.fp_type = FP_STATE_FPSIMD;
goto out;
}
@@ -942,6 +942,7 @@ static int sve_set_common(struct task_struct *target,
*/
fpsimd_sync_to_sve(target);
set_tsk_thread_flag(target, TIF_SVE);
+ target->thread.fp_type = FP_STATE_SVE;
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
start = SVE_PT_SVE_OFFSET;
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index d56e170e1ca7..830be01af32d 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -144,7 +144,7 @@ static int init_sdei_scs(void)
int cpu;
int err = 0;
- if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ if (!scs_is_enabled())
return 0;
for_each_possible_cpu(cpu) {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index fea3223704b6..12cfe9d0d3fa 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -30,6 +30,7 @@
#include <linux/efi.h>
#include <linux/psci.h>
#include <linux/sched/task.h>
+#include <linux/scs.h>
#include <linux/mm.h>
#include <asm/acpi.h>
@@ -42,6 +43,7 @@
#include <asm/cpu_ops.h>
#include <asm/kasan.h>
#include <asm/numa.h>
+#include <asm/scs.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -312,6 +314,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
jump_label_init();
parse_early_param();
+ dynamic_scs_init();
+
/*
* Unmask asynchronous aborts and fiq after bringing up possible
* earlycon. (Report possible System Errors once we can report this
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 9ad911f1647c..e0d09bf5b01b 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -207,6 +207,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
clear_thread_flag(TIF_SVE);
+ current->thread.fp_type = FP_STATE_FPSIMD;
/* load the hardware registers from the fpsimd_state structure */
if (!err)
@@ -292,6 +293,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE);
current->thread.svcr &= ~SVCR_SM_MASK;
+ current->thread.fp_type = FP_STATE_FPSIMD;
goto fpsimd_only;
}
@@ -327,6 +329,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
current->thread.svcr |= SVCR_SM_MASK;
else
set_thread_flag(TIF_SVE);
+ current->thread.fp_type = FP_STATE_SVE;
fpsimd_only:
/* copy the FP and status/control registers */
@@ -932,9 +935,11 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
* FPSIMD register state - flush the saved FPSIMD
* register state in case it gets loaded.
*/
- if (current->thread.svcr & SVCR_SM_MASK)
+ if (current->thread.svcr & SVCR_SM_MASK) {
memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
+ current->thread.fp_type = FP_STATE_FPSIMD;
+ }
current->thread.svcr &= ~(SVCR_ZA_MASK |
SVCR_SM_MASK);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 634279b3b03d..117e2c180f3c 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -23,8 +23,8 @@
*
* The regs must be on a stack currently owned by the calling task.
*/
-static inline void unwind_init_from_regs(struct unwind_state *state,
- struct pt_regs *regs)
+static __always_inline void unwind_init_from_regs(struct unwind_state *state,
+ struct pt_regs *regs)
{
unwind_init_common(state, current);
@@ -58,8 +58,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
* duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task.
*/
-static inline void unwind_init_from_task(struct unwind_state *state,
- struct task_struct *task)
+static __always_inline void unwind_init_from_task(struct unwind_state *state,
+ struct task_struct *task)
{
unwind_init_common(state, task);
@@ -186,7 +186,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
: stackinfo_get_unknown(); \
})
-noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 8b02d310838f..e7163f31f716 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -60,6 +60,8 @@ void notrace __cpu_suspend_exit(void)
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
+ if (cpus_have_const_cap(ARM64_HAS_DIT))
+ set_pstate_dit(1);
__uaccess_enable_hw_pan();
/*
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index d72e8f23422d..a5de47e3df2b 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -183,21 +183,12 @@ static inline void fp_user_discard(void)
if (!system_supports_sve())
return;
- /*
- * If SME is not active then disable SVE, the registers will
- * be cleared when userspace next attempts to access them and
- * we do not need to track the SVE register state until then.
- */
- clear_thread_flag(TIF_SVE);
+ if (test_thread_flag(TIF_SVE)) {
+ unsigned int sve_vq_minus_one;
- /*
- * task_fpsimd_load() won't be called to update CPACR_EL1 in
- * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
- * happens if a context switch or kernel_neon_begin() or context
- * modification (sigreturn, ptrace) intervenes.
- * So, ensure that CPACR_EL1 is already correct for the fast-path case.
- */
- sve_user_disable();
+ sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
+ sve_flush_live(true, sve_vq_minus_one);
+ }
}
void do_el0_svc(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 45131e354e27..4c13dafc98b8 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -121,6 +121,17 @@ jiffies = jiffies_64;
#define TRAMP_TEXT
#endif
+#ifdef CONFIG_UNWIND_TABLES
+#define UNWIND_DATA_SECTIONS \
+ .eh_frame : { \
+ __eh_frame_start = .; \
+ *(.eh_frame) \
+ __eh_frame_end = .; \
+ }
+#else
+#define UNWIND_DATA_SECTIONS
+#endif
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from _stext to _edata, must be a round multiple of the PE/COFF
@@ -231,6 +242,8 @@ SECTIONS
__alt_instructions_end = .;
}
+ UNWIND_DATA_SECTIONS
+
. = ALIGN(SEGMENT_ALIGN);
__inittext_end = .;
__initdata_begin = .;
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index ec8e4494873d..02dd7e9ebd39 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -75,11 +75,12 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{
BUG_ON(!current->mm);
- BUG_ON(test_thread_flag(TIF_SVE));
if (!system_supports_fpsimd())
return;
+ fpsimd_kvm_prepare();
+
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
@@ -129,20 +130,31 @@ void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
*/
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
{
+ struct cpu_fp_state fp_state;
+
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+
/*
* Currently we do not support SME guests so SVCR is
* always 0 and we just need a variable to point to.
*/
- fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
- vcpu->arch.sve_state,
- vcpu->arch.sve_max_vl,
- NULL, 0, &vcpu->arch.svcr);
+ fp_state.st = &vcpu->arch.ctxt.fp_regs;
+ fp_state.sve_state = vcpu->arch.sve_state;
+ fp_state.sve_vl = vcpu->arch.sve_max_vl;
+ fp_state.za_state = NULL;
+ fp_state.svcr = &vcpu->arch.svcr;
+ fp_state.fp_type = &vcpu->arch.fp_type;
+
+ if (vcpu_has_sve(vcpu))
+ fp_state.to_save = FP_STATE_SVE;
+ else
+ fp_state.to_save = FP_STATE_FPSIMD;
+
+ fpsimd_bind_state_to_cpu(&fp_state);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
- update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
}
}
@@ -199,7 +211,5 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
- update_thread_flag(TIF_SVE, 0);
-
local_irq_restore(flags);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index be0a2bc3e20d..530347cdebe3 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -96,6 +96,7 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI)
# when profile optimization is applied. gen-hyprel does not support SHT_REL and
# causes a build failure. Remove profile optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
# KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f4a7c5abcbca..608e4f25161d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1121,8 +1121,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
- ID_DFR0_PERFMON_SHIFT,
- kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
+ ID_DFR0_EL1_PerfMon_SHIFT,
+ kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_EL1_PerfMon_PMUv3p4 : 0);
break;
}
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 49e972beeac7..924934cb85ee 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -20,91 +20,6 @@
#define AARCH64_INSN_N_BIT BIT(22)
#define AARCH64_INSN_LSL_12 BIT(22)
-static const int aarch64_insn_encoding_class[] = {
- AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_SVE,
- AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_LDST,
- AARCH64_INSN_CLS_DP_REG,
- AARCH64_INSN_CLS_LDST,
- AARCH64_INSN_CLS_DP_FPSIMD,
- AARCH64_INSN_CLS_DP_IMM,
- AARCH64_INSN_CLS_DP_IMM,
- AARCH64_INSN_CLS_BR_SYS,
- AARCH64_INSN_CLS_BR_SYS,
- AARCH64_INSN_CLS_LDST,
- AARCH64_INSN_CLS_DP_REG,
- AARCH64_INSN_CLS_LDST,
- AARCH64_INSN_CLS_DP_FPSIMD,
-};
-
-enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
-{
- return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
-}
-
-bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
-{
- if (!aarch64_insn_is_hint(insn))
- return false;
-
- switch (insn & 0xFE0) {
- case AARCH64_INSN_HINT_XPACLRI:
- case AARCH64_INSN_HINT_PACIA_1716:
- case AARCH64_INSN_HINT_PACIB_1716:
- case AARCH64_INSN_HINT_PACIAZ:
- case AARCH64_INSN_HINT_PACIASP:
- case AARCH64_INSN_HINT_PACIBZ:
- case AARCH64_INSN_HINT_PACIBSP:
- case AARCH64_INSN_HINT_BTI:
- case AARCH64_INSN_HINT_BTIC:
- case AARCH64_INSN_HINT_BTIJ:
- case AARCH64_INSN_HINT_BTIJC:
- case AARCH64_INSN_HINT_NOP:
- return true;
- default:
- return false;
- }
-}
-
-bool aarch64_insn_is_branch_imm(u32 insn)
-{
- return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
- aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
- aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
- aarch64_insn_is_bcond(insn));
-}
-
-bool __kprobes aarch64_insn_uses_literal(u32 insn)
-{
- /* ldr/ldrsw (literal), prfm */
-
- return aarch64_insn_is_ldr_lit(insn) ||
- aarch64_insn_is_ldrsw_lit(insn) ||
- aarch64_insn_is_adr_adrp(insn) ||
- aarch64_insn_is_prfm_lit(insn);
-}
-
-bool __kprobes aarch64_insn_is_branch(u32 insn)
-{
- /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
-
- return aarch64_insn_is_b(insn) ||
- aarch64_insn_is_bl(insn) ||
- aarch64_insn_is_cbz(insn) ||
- aarch64_insn_is_cbnz(insn) ||
- aarch64_insn_is_tbz(insn) ||
- aarch64_insn_is_tbnz(insn) ||
- aarch64_insn_is_ret(insn) ||
- aarch64_insn_is_ret_auth(insn) ||
- aarch64_insn_is_br(insn) ||
- aarch64_insn_is_br_auth(insn) ||
- aarch64_insn_is_blr(insn) ||
- aarch64_insn_is_blr_auth(insn) ||
- aarch64_insn_is_bcond(insn);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
@@ -435,16 +350,6 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
offset >> 2);
}
-u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
-{
- return aarch64_insn_get_hint_value() | op;
-}
-
-u32 __kprobes aarch64_insn_gen_nop(void)
-{
- return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
-}
-
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type)
{
@@ -816,76 +721,6 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
}
#endif
-static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
- enum aarch64_insn_prfm_target target,
- enum aarch64_insn_prfm_policy policy,
- u32 insn)
-{
- u32 imm_type = 0, imm_target = 0, imm_policy = 0;
-
- switch (type) {
- case AARCH64_INSN_PRFM_TYPE_PLD:
- break;
- case AARCH64_INSN_PRFM_TYPE_PLI:
- imm_type = BIT(0);
- break;
- case AARCH64_INSN_PRFM_TYPE_PST:
- imm_type = BIT(1);
- break;
- default:
- pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
- return AARCH64_BREAK_FAULT;
- }
-
- switch (target) {
- case AARCH64_INSN_PRFM_TARGET_L1:
- break;
- case AARCH64_INSN_PRFM_TARGET_L2:
- imm_target = BIT(0);
- break;
- case AARCH64_INSN_PRFM_TARGET_L3:
- imm_target = BIT(1);
- break;
- default:
- pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
- return AARCH64_BREAK_FAULT;
- }
-
- switch (policy) {
- case AARCH64_INSN_PRFM_POLICY_KEEP:
- break;
- case AARCH64_INSN_PRFM_POLICY_STRM:
- imm_policy = BIT(0);
- break;
- default:
- pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
- return AARCH64_BREAK_FAULT;
- }
-
- /* In this case, imm5 is encoded into Rt field. */
- insn &= ~GENMASK(4, 0);
- insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
-
- return insn;
-}
-
-u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
- enum aarch64_insn_prfm_type type,
- enum aarch64_insn_prfm_target target,
- enum aarch64_insn_prfm_policy policy)
-{
- u32 insn = aarch64_insn_get_prfm_value();
-
- insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
-
- insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
-
- insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
- base);
-
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
-}
-
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 1b7c93ae7e63..5018ac03b6bf 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -18,7 +18,7 @@
*/
.macro multitag_transfer_size, reg, tmp
mrs_s \reg, SYS_GMID_EL1
- ubfx \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_SIZE
+ ubfx \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_WIDTH
mov \tmp, #4
lsl \reg, \tmp, \reg
.endm
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3e9cf9826417..3eb2825d08cf 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -354,6 +354,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
return false;
}
+static bool is_translation_fault(unsigned long esr)
+{
+ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+}
+
static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
@@ -386,7 +391,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
- if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ if (is_translation_fault(esr) &&
+ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return;
msg = "paging request";
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 35e9a468d13e..cd8d96e1fa1a 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
{
return __hugetlb_valid_size(size);
}
+
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+ /*
+ * Break-before-make (BBM) is required for all user space mappings
+ * when the permission changes from executable to non-executable
+ * in cases where cpu is affected with errata #2645198.
+ */
+ if (pte_user_exec(READ_ONCE(*ptep)))
+ return huge_ptep_clear_flush(vma, addr, ptep);
+ }
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 4b4651ee47f2..58a0bb2c17f1 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -96,6 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+
static int __init reserve_crashkernel_low(unsigned long long low_size)
{
unsigned long long low_base;
@@ -130,6 +132,7 @@ static void __init reserve_crashkernel(void)
unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
char *cmdline = boot_command_line;
int ret;
+ bool fixed_base = false;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
@@ -147,7 +150,9 @@ static void __init reserve_crashkernel(void)
* is not allowed.
*/
ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
- if (ret && (ret != -ENOENT))
+ if (ret == -ENOENT)
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ else if (ret)
return;
crash_max = CRASH_ADDR_HIGH_MAX;
@@ -159,18 +164,32 @@ static void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(crash_size);
/* User specifies base address explicitly. */
- if (crash_base)
+ if (crash_base) {
+ fixed_base = true;
crash_max = crash_base + crash_size;
+ }
+retry:
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
if (!crash_base) {
+ /*
+ * If the first attempt was for low memory, fall back to
+ * high memory, the minimum required low memory will be
+ * reserved later.
+ */
+ if (!fixed_base && (crash_max == CRASH_ADDR_LOW_MAX)) {
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ goto retry;
+ }
+
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
crash_size);
return;
}
- if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ if ((crash_base > CRASH_ADDR_LOW_MAX - crash_low_size) &&
crash_low_size && reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d386033a074c..2368e4daa23d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1702,3 +1702,24 @@ static int __init prevent_bootmem_remove_init(void)
}
early_initcall(prevent_bootmem_remove_init);
#endif
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+ /*
+ * Break-before-make (BBM) is required for all user space mappings
+ * when the permission changes from executable to non-executable
+ * in cases where cpu is affected with errata #2645198.
+ */
+ if (pte_user_exec(READ_ONCE(*ptep)))
+ return ptep_clear_flush(vma, addr, ptep);
+ }
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index f1c0347ec31a..dfeb2c51e257 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -20,6 +20,7 @@ HAS_CNP
HAS_CRC32
HAS_DCPODP
HAS_DCPOP
+HAS_DIT
HAS_E0PD
HAS_ECV
HAS_EPAN
@@ -70,6 +71,7 @@ WORKAROUND_2038923
WORKAROUND_2064142
WORKAROUND_2077057
WORKAROUND_2457168
+WORKAROUND_2645198
WORKAROUND_2658417
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk
index db461921d256..c350164a3955 100755
--- a/arch/arm64/tools/gen-sysreg.awk
+++ b/arch/arm64/tools/gen-sysreg.awk
@@ -33,7 +33,7 @@ function expect_fields(nf) {
# Print a CPP macro definition, padded with spaces so that the macro bodies
# line up in a column
function define(name, val) {
- printf "%-48s%s\n", "#define " name, val
+ printf "%-56s%s\n", "#define " name, val
}
# Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 384757a7eda9..184e58fd5631 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -46,6 +46,760 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else.
+Sysreg ID_PFR0_EL1 3 0 0 1 0
+Res0 63:32
+Enum 31:28 RAS
+ 0b0000 NI
+ 0b0001 RAS
+ 0b0010 RASv1p1
+EndEnum
+Enum 27:24 DIT
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 AMU
+ 0b0000 NI
+ 0b0001 AMUv1
+ 0b0010 AMUv1p1
+EndEnum
+Enum 19:16 CSV2
+ 0b0000 UNDISCLOSED
+ 0b0001 IMP
+ 0b0010 CSV2p1
+EndEnum
+Enum 15:12 State3
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 State2
+ 0b0000 NI
+ 0b0001 NO_CV
+ 0b0010 CV
+EndEnum
+Enum 7:4 State1
+ 0b0000 NI
+ 0b0001 THUMB
+ 0b0010 THUMB2
+EndEnum
+Enum 3:0 State0
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_PFR1_EL1 3 0 0 1 1
+Res0 63:32
+Enum 31:28 GIC
+ 0b0000 NI
+ 0b0001 GICv3
+ 0b0010 GICv4p1
+EndEnum
+Enum 27:24 Virt_frac
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 Sec_frac
+ 0b0000 NI
+ 0b0001 WALK_DISABLE
+ 0b0010 SECURE_MEMORY
+EndEnum
+Enum 19:16 GenTimer
+ 0b0000 NI
+ 0b0001 IMP
+ 0b0010 ECV
+EndEnum
+Enum 15:12 Virtualization
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 MProgMod
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 Security
+ 0b0000 NI
+ 0b0001 EL3
+ 0b0001 NSACR_RFR
+EndEnum
+Enum 3:0 ProgMod
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_DFR0_EL1 3 0 0 1 2
+Res0 63:32
+Enum 31:28 TraceFilt
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 PerfMon
+ 0b0000 NI
+ 0b0001 PMUv1
+ 0b0010 PMUv2
+ 0b0011 PMUv3
+ 0b0100 PMUv3p1
+ 0b0101 PMUv3p4
+ 0b0110 PMUv3p5
+ 0b0111 PMUv3p7
+ 0b1000 PMUv3p8
+ 0b1111 IMPDEF
+EndEnum
+Enum 23:20 MProfDbg
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 MMapTrc
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 CopTrc
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 MMapDbg
+ 0b0000 NI
+ 0b0100 Armv7
+ 0b0101 Armv7p1
+EndEnum
+Field 7:4 CopSDbg
+Enum 3:0 CopDbg
+ 0b0000 NI
+ 0b0010 Armv6
+ 0b0011 Armv6p1
+ 0b0100 Armv7
+ 0b0101 Armv7p1
+ 0b0110 Armv8
+ 0b0111 VHE
+ 0b1000 Debugv8p2
+ 0b1001 Debugv8p4
+ 0b1010 Debugv8p8
+EndEnum
+EndSysreg
+
+Sysreg ID_AFR0_EL1 3 0 0 1 3
+Res0 63:16
+Field 15:12 IMPDEF3
+Field 11:8 IMPDEF2
+Field 7:4 IMPDEF1
+Field 3:0 IMPDEF0
+EndSysreg
+
+Sysreg ID_MMFR0_EL1 3 0 0 1 4
+Res0 63:32
+Enum 31:28 InnerShr
+ 0b0000 NC
+ 0b0001 HW
+ 0b1111 IGNORED
+EndEnum
+Enum 27:24 FCSE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 AuxReg
+ 0b0000 NI
+ 0b0001 ACTLR
+ 0b0010 AIFSR
+EndEnum
+Enum 19:16 TCM
+ 0b0000 NI
+ 0b0001 IMPDEF
+ 0b0010 TCM
+ 0b0011 TCM_DMA
+EndEnum
+Enum 15:12 ShareLvl
+ 0b0000 ONE
+ 0b0001 TWO
+EndEnum
+Enum 11:8 OuterShr
+ 0b0000 NC
+ 0b0001 HW
+ 0b1111 IGNORED
+EndEnum
+Enum 7:4 PMSA
+ 0b0000 NI
+ 0b0001 IMPDEF
+ 0b0010 PMSAv6
+ 0b0011 PMSAv7
+EndEnum
+Enum 3:0 VMSA
+ 0b0000 NI
+ 0b0001 IMPDEF
+ 0b0010 VMSAv6
+ 0b0011 VMSAv7
+ 0b0100 VMSAv7_PXN
+ 0b0101 VMSAv7_LONG
+EndEnum
+EndSysreg
+
+Sysreg ID_MMFR1_EL1 3 0 0 1 5
+Res0 63:32
+Enum 31:28 BPred
+ 0b0000 NI
+ 0b0001 BP_SW_MANGED
+ 0b0010 BP_ASID_AWARE
+ 0b0011 BP_NOSNOOP
+ 0b0100 BP_INVISIBLE
+EndEnum
+Enum 27:24 L1TstCln
+ 0b0000 NI
+ 0b0001 NOINVALIDATE
+ 0b0010 INVALIDATE
+EndEnum
+Enum 23:20 L1Uni
+ 0b0000 NI
+ 0b0001 INVALIDATE
+ 0b0010 CLEAN_AND_INVALIDATE
+EndEnum
+Enum 19:16 L1Hvd
+ 0b0000 NI
+ 0b0001 INVALIDATE_ISIDE_ONLY
+ 0b0010 INVALIDATE
+ 0b0011 CLEAN_AND_INVALIDATE
+EndEnum
+Enum 15:12 L1UniSW
+ 0b0000 NI
+ 0b0001 CLEAN
+ 0b0010 CLEAN_AND_INVALIDATE
+ 0b0011 INVALIDATE
+EndEnum
+Enum 11:8 L1HvdSW
+ 0b0000 NI
+ 0b0001 CLEAN_AND_INVALIDATE
+ 0b0010 INVALIDATE_DSIDE_ONLY
+ 0b0011 INVALIDATE
+EndEnum
+Enum 7:4 L1UniVA
+ 0b0000 NI
+ 0b0001 CLEAN_AND_INVALIDATE
+ 0b0010 INVALIDATE_BP
+EndEnum
+Enum 3:0 L1HvdVA
+ 0b0000 NI
+ 0b0001 CLEAN_AND_INVALIDATE
+ 0b0010 INVALIDATE_BP
+EndEnum
+EndSysreg
+
+Sysreg ID_MMFR2_EL1 3 0 0 1 6
+Res0 63:32
+Enum 31:28 HWAccFlg
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 WFIStall
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 MemBarr
+ 0b0000 NI
+ 0b0001 DSB_ONLY
+ 0b0010 IMP
+EndEnum
+Enum 19:16 UniTLB
+ 0b0000 NI
+ 0b0001 BY_VA
+ 0b0010 BY_MATCH_ASID
+ 0b0011 BY_ALL_ASID
+ 0b0100 OTHER_TLBS
+ 0b0101 BROADCAST
+ 0b0110 BY_IPA
+EndEnum
+Enum 15:12 HvdTLB
+ 0b0000 NI
+EndEnum
+Enum 11:8 L1HvdRng
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 L1HvdBG
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 L1HvdFG
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_MMFR3_EL1 3 0 0 1 7
+Res0 63:32
+Enum 31:28 Supersec
+ 0b0000 IMP
+ 0b1111 NI
+EndEnum
+Enum 27:24 CMemSz
+ 0b0000 4GB
+ 0b0001 64GB
+ 0b0010 1TB
+EndEnum
+Enum 23:20 CohWalk
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 PAN
+ 0b0000 NI
+ 0b0001 PAN
+ 0b0010 PAN2
+EndEnum
+Enum 15:12 MaintBcst
+ 0b0000 NI
+ 0b0001 NO_TLB
+ 0b0010 ALL
+EndEnum
+Enum 11:8 BPMaint
+ 0b0000 NI
+ 0b0001 ALL
+ 0b0010 BY_VA
+EndEnum
+Enum 7:4 CMaintSW
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 CMaintVA
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR0_EL1 3 0 0 2 0
+Res0 63:28
+Enum 27:24 Divide
+ 0b0000 NI
+ 0b0001 xDIV_T32
+ 0b0010 xDIV_A32
+EndEnum
+Enum 23:20 Debug
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 Coproc
+ 0b0000 NI
+ 0b0001 MRC
+ 0b0010 MRC2
+ 0b0011 MRRC
+ 0b0100 MRRC2
+EndEnum
+Enum 15:12 CmpBranch
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 BitField
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 BitCount
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 Swap
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR1_EL1 3 0 0 2 1
+Res0 63:32
+Enum 31:28 Jazelle
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 Interwork
+ 0b0000 NI
+ 0b0001 BX
+ 0b0010 BLX
+ 0b0011 A32_BX
+EndEnum
+Enum 23:20 Immediate
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 IfThen
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 Extend
+ 0b0000 NI
+ 0b0001 SXTB
+ 0b0010 SXTB16
+EndEnum
+Enum 11:8 Except_AR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 Except
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 Endian
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR2_EL1 3 0 0 2 2
+Res0 63:32
+Enum 31:28 Reversal
+ 0b0000 NI
+ 0b0001 REV
+ 0b0010 RBIT
+EndEnum
+Enum 27:24 PSR_AR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 MultU
+ 0b0000 NI
+ 0b0001 UMULL
+ 0b0010 UMAAL
+EndEnum
+Enum 19:16 MultS
+ 0b0000 NI
+ 0b0001 SMULL
+ 0b0010 SMLABB
+ 0b0011 SMLAD
+EndEnum
+Enum 15:12 Mult
+ 0b0000 NI
+ 0b0001 MLA
+ 0b0010 MLS
+EndEnum
+Enum 11:8 MultiAccessInt
+ 0b0000 NI
+ 0b0001 RESTARTABLE
+ 0b0010 CONTINUABLE
+EndEnum
+Enum 7:4 MemHint
+ 0b0000 NI
+ 0b0001 PLD
+ 0b0010 PLD2
+ 0b0011 PLI
+ 0b0100 PLDW
+EndEnum
+Enum 3:0 LoadStore
+ 0b0000 NI
+ 0b0001 DOUBLE
+ 0b0010 ACQUIRE
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR3_EL1 3 0 0 2 3
+Res0 63:32
+Enum 31:28 T32EE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 TrueNOP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 T32Copy
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 TabBranch
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SynchPrim
+ 0b0000 NI
+ 0b0001 EXCLUSIVE
+ 0b0010 DOUBLE
+EndEnum
+Enum 11:8 SVC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 SIMD
+ 0b0000 NI
+ 0b0001 SSAT
+ 0b0011 PKHBT
+EndEnum
+Enum 3:0 Saturate
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR4_EL1 3 0 0 2 4
+Res0 63:32
+Enum 31:28 SWP_frac
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 PSR_M
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 SynchPrim_frac
+ 0b0000 NI
+ 0b0011 IMP
+EndEnum
+Enum 19:16 Barrier
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SMC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 Writeback
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 WithShifts
+ 0b0000 NI
+ 0b0001 LSL3
+ 0b0011 LS
+ 0b0100 REG
+EndEnum
+Enum 3:0 Unpriv
+ 0b0000 NI
+ 0b0001 REG_BYTE
+ 0b0010 SIGNED_HALFWORD
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR5_EL1 3 0 0 2 5
+Res0 63:32
+Enum 31:28 VCMA
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 RDM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 23:20
+Enum 19:16 CRC32
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SHA2
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 SHA1
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 AES
+ 0b0000 NI
+ 0b0001 IMP
+ 0b0010 VMULL
+EndEnum
+Enum 3:0 SEVL
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_ISAR6_EL1 3 0 0 2 7
+Res0 63:28
+Enum 27:24 I8MM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 BF16
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 SPECRES
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SB
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 FHM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 DP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 JSCVT
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_MMFR4_EL1 3 0 0 2 6
+Res0 63:32
+Enum 31:28 EVT
+ 0b0000 NI
+ 0b0001 NO_TLBIS
+ 0b0010 TLBIS
+EndEnum
+Enum 27:24 CCIDX
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 LSM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 HPDS
+ 0b0000 NI
+ 0b0001 AA32HPD
+ 0b0010 HPDS2
+EndEnum
+Enum 15:12 CnP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 XNX
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 AC2
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 SpecSEI
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg MVFR0_EL1 3 0 0 3 0
+Res0 63:32
+Enum 31:28 FPRound
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 FPShVec
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 23:20 FPSqrt
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 19:16 FPDivide
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 FPTrap
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 FPDP
+ 0b0000 NI
+ 0b0001 VFPv2
+ 0b0001 VFPv3
+EndEnum
+Enum 7:4 FPSP
+ 0b0000 NI
+ 0b0001 VFPv2
+ 0b0001 VFPv3
+EndEnum
+Enum 3:0 SIMDReg
+ 0b0000 NI
+ 0b0001 IMP_16x64
+ 0b0001 IMP_32x64
+EndEnum
+EndSysreg
+
+Sysreg MVFR1_EL1 3 0 0 3 1
+Res0 63:32
+Enum 31:28 SIMDFMAC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 27:24 FPHP
+ 0b0000 NI
+ 0b0001 FPHP
+ 0b0010 FPHP_CONV
+ 0b0011 FP16
+EndEnum
+Enum 23:20 SIMDHP
+ 0b0000 NI
+ 0b0001 SIMDHP
+ 0b0001 SIMDHP_FLOAT
+EndEnum
+Enum 19:16 SIMDSP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 15:12 SIMDInt
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 11:8 SIMDLS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 7:4 FPDNaN
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 FPFtZ
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg MVFR2_EL1 3 0 0 3 2
+Res0 63:8
+Enum 7:4 FPMisc
+ 0b0000 NI
+ 0b0001 FP
+ 0b0010 FP_DIRECTED_ROUNDING
+ 0b0011 FP_ROUNDING
+ 0b0100 FP_MAX_MIN
+EndEnum
+Enum 3:0 SIMDMisc
+ 0b0000 NI
+ 0b0001 SIMD_DIRECTED_ROUNDING
+ 0b0010 SIMD_ROUNDING
+ 0b0011 SIMD_MAX_MIN
+EndEnum
+EndSysreg
+
+Sysreg ID_PFR2_EL1 3 0 0 3 4
+Res0 63:12
+Enum 11:8 RAS_frac
+ 0b0000 NI
+ 0b0001 RASv1p1
+EndEnum
+Enum 7:4 SSBS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 CSV3
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
+Sysreg ID_DFR1_EL1 3 0 0 3 5
+Res0 63:8
+Enum 7:4 HPMN0
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 MTPMU
+ 0b0000 IMPDEF
+ 0b0001 IMP
+ 0b1111 NI
+EndEnum
+EndSysreg
+
+Sysreg ID_MMFR5_EL1 3 0 0 3 6
+Res0 63:8
+Enum 7:4 nTLBPA
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 3:0 ETS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
Sysreg ID_AA64PFR0_EL1 3 0 0 4 0
Enum 63:60 CSV3
0b0000 NI
@@ -210,6 +964,7 @@ EndEnum
Enum 3:0 SVEver
0b0000 IMP
0b0001 SVE2
+ 0b0010 SVE2p1
EndEnum
EndSysreg
@@ -484,7 +1239,16 @@ EndEnum
EndSysreg
Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2
-Res0 63:28
+Res0 63:56
+Enum 55:52 CSSC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 51:48 RPRFM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 47:28
Enum 27:24 PAC_frac
0b0000 NI
0b0001 IMP