diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-31 10:05:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-31 10:05:01 -0700 |
commit | 3cd86a58f7734bf9cef38f6f899608ebcaa3da13 (patch) | |
tree | 6ae5b8109011ee40deef645a9701e2d8dc4e4fce /arch/arm64/include/asm/cpufeature.h | |
parent | a8222fd5b80c7ec83f257060670becbeea9b50b9 (diff) | |
parent | b2a84de2a2deb76a6a51609845341f508c518c03 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
"The bulk is in-kernel pointer authentication, activity monitors and
lots of asm symbol annotations. I also queued the sys_mremap() patch
commenting the asymmetry in the address untagging.
Summary:
- In-kernel Pointer Authentication support (previously only offered
to user space).
- ARM Activity Monitors (AMU) extension support allowing better CPU
utilisation numbers for the scheduler (frequency invariance).
- Memory hot-remove support for arm64.
- Lots of asm annotations (SYM_*) in preparation for the in-kernel
Branch Target Identification (BTI) support.
- arm64 perf updates: ARMv8.5-PMU 64-bit counters, refactoring the
PMU init callbacks, support for new DT compatibles.
- IPv6 header checksum optimisation.
- Fixes: SDEI (software delegated exception interface) double-lock on
hibernate with shared events.
- Minor clean-ups and refactoring: cpu_ops accessor,
cpu_do_switch_mm() converted to C, cpufeature finalisation helper.
- sys_mremap() comment explaining the asymmetric address untagging
behaviour"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (81 commits)
mm/mremap: Add comment explaining the untagging behaviour of mremap()
arm64: head: Convert install_el2_stub to SYM_INNER_LABEL
arm64: Introduce get_cpu_ops() helper function
arm64: Rename cpu_read_ops() to init_cpu_ops()
arm64: Declare ACPI parking protocol CPU operation if needed
arm64: move kimage_vaddr to .rodata
arm64: use mov_q instead of literal ldr
arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH
lkdtm: arm64: test kernel pointer authentication
arm64: compile the kernel with ptrauth return address signing
kconfig: Add support for 'as-option'
arm64: suspend: restore the kernel ptrauth keys
arm64: __show_regs: strip PAC from lr in printk
arm64: unwind: strip PAC from kernel addresses
arm64: mask PAC bits of __builtin_return_address
arm64: initialize ptrauth keys for kernel booting task
arm64: initialize and switch ptrauth kernel keys
arm64: enable ptrauth earlier
arm64: cpufeature: handle conflicts based on capability
arm64: cpufeature: Move cpu capability helpers inside C file
...
Diffstat (limited to 'arch/arm64/include/asm/cpufeature.h')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 125 |
1 files changed, 96 insertions, 29 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 2a746b99e937..afe08251ff95 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; * In some non-typical cases either both (a) and (b), or neither, * should be permitted. This can be described by including neither * or both flags in the capability's type field. + * + * In case of a conflict, the CPU is prevented from booting. If the + * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability, + * then a kernel panic is triggered. */ @@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) /* Is it safe for a late CPU to miss this capability when system has it */ #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) +/* Panic when a conflict is detected */ +#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6)) /* * CPU errata workarounds that need to be enabled at boot time if one or @@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU feature used early in the boot based on the boot CPU. All secondary - * CPUs must match the state of the capability as detected by the boot CPU. + * CPUs must match the state of the capability as detected by the boot CPU. In + * case of a conflict, a kernel panic is triggered. + */ +#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) + +/* + * CPU feature used early in the boot based on the boot CPU. It is safe for a + * late CPU to have this feature even though the boot CPU hasn't enabled it, + * although the feature will not be used by Linux in this case. If the boot CPU + * has enabled this feature already, then every late CPU must have it. */ -#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU +#define ARM64_CPUCAP_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) struct arm64_cpu_capabilities { const char *desc; @@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) return cap->type & ARM64_CPUCAP_SCOPE_MASK; } -static inline bool -cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); -} - -static inline bool -cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); -} - /* * Generic helper for handling capabilties with multiple (match,enable) pairs * of call backs, sharing the same capability bit. @@ -390,14 +395,16 @@ unsigned long cpu_get_elf_hwcap2(void); #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) -/* System capability check for constant caps */ -static __always_inline bool __cpus_have_const_cap(int num) +static __always_inline bool system_capabilities_finalized(void) { - if (num >= ARM64_NCAPS) - return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return static_branch_likely(&arm64_const_caps_ready); } +/* + * Test for a capability with a runtime check. + * + * Before the capability is detected, this returns false. + */ static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) @@ -405,14 +412,53 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this returns false. + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool __cpus_have_const_cap(int num) +{ + if (num >= ARM64_NCAPS) + return false; + return static_branch_unlikely(&cpu_hwcap_keys[num]); +} + +/* + * Test for a capability, possibly with a runtime check. + * + * Before capabilities are finalized, this behaves as cpus_have_cap(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ static __always_inline bool cpus_have_const_cap(int num) { - if (static_branch_likely(&arm64_const_caps_ready)) + if (system_capabilities_finalized()) return __cpus_have_const_cap(num); else return cpus_have_cap(num); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this will BUG(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool cpus_have_final_cap(int num) +{ + if (system_capabilities_finalized()) + return __cpus_have_const_cap(num); + else + BUG(); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { @@ -447,6 +493,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field) return cpuid_feature_extract_unsigned_field_width(features, field, 4); } +/* + * Fields that identify the version of the Performance Monitors Extension do + * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825, + * "Alternative ID scheme used for the Performance Monitors Extension version". + */ +static inline u64 __attribute_const__ +cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) +{ + u64 val = cpuid_feature_extract_unsigned_field(features, field); + u64 mask = GENMASK_ULL(field + 3, field); + + /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ + if (val == 0xf) + val = 0; + + if (val > cap) { + features &= ~mask; + features |= (cap << field) & mask; + } + + return features; +} + static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -590,15 +659,13 @@ static __always_inline bool system_supports_cnp(void) static inline bool system_supports_address_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH); } static inline bool system_supports_generic_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); } static inline bool system_uses_irq_prio_masking(void) @@ -613,11 +680,6 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } -static inline bool system_capabilities_finalized(void) -{ - return static_branch_likely(&arm64_const_caps_ready); -} - #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 @@ -678,6 +740,11 @@ static inline bool cpu_has_hw_af(void) ID_AA64MMFR1_HADBS_SHIFT); } +#ifdef CONFIG_ARM64_AMU_EXTN +/* Check whether the cpu supports the Activity Monitors Unit (AMU) */ +extern bool cpu_has_amu_feat(int cpu); +#endif + #endif /* __ASSEMBLY__ */ #endif |