summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2023-09-13 15:48:12 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2023-09-25 16:14:49 +0100
commitabef0695f9665c3dbe9473f964c4da3c1f7c5d3f (patch)
tree1999ca1aab4f24a5f9665ef442b9b9e8489c7426
parent6465e260f48790807eef06b583b38ca9789b6072 (diff)
arm64/sve: Remove ZCR pseudo register from cpufeature code
For reasons that are not currently apparent during cpufeature enumeration we maintain a pseudo register for ZCR which records the maximum supported vector length using the value that would be written to ZCR_EL1.LEN to configure it. This is not exposed to userspace and is not sufficient for detecting unsupportable configurations, we need the more detailed checks in vec_update_vq_map() for that since we can't cope with missing vector lengths on late CPUs and KVM requires an exactly matching set of supported vector lengths as EL1 can enumerate VLs directly with the hardware. Remove the code, replacing the usage in sve_setup() with a query of the vq_map. Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20230913-arm64-vec-len-cpufeature-v1-1-cc69b0600a8a@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/cpu.h3
-rw-r--r--arch/arm64/include/asm/fpsimd.h1
-rw-r--r--arch/arm64/kernel/cpufeature.c30
-rw-r--r--arch/arm64/kernel/fpsimd.c35
4 files changed, 9 insertions, 60 deletions
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index e749838b9c5d..f85650dc561c 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -64,9 +64,6 @@ struct cpuinfo_arm64 {
struct cpuinfo_32bit aarch32;
- /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
- u64 reg_zcr;
-
/* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */
u64 reg_smcr;
};
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 8df46f186c64..9e5d3a0812b6 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -128,7 +128,6 @@ extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern u64 read_zcr_features(void);
extern u64 read_smcr_features(void);
/*
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 444a73c2e638..5783e9349563 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -611,12 +611,6 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
ARM64_FTR_END,
};
-static const struct arm64_ftr_bits ftr_zcr[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
- ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */
- ARM64_FTR_END,
-};
-
static const struct arm64_ftr_bits ftr_smcr[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */
@@ -736,7 +730,6 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
/* Op1 = 0, CRn = 1, CRm = 2 */
- ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr),
/* Op1 = 1, CRn = 0, CRm = 0 */
@@ -1040,8 +1033,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
- info->reg_zcr = read_zcr_features();
- init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
+ sve_kernel_enable(NULL);
vec_init_vq_map(ARM64_VEC_SVE);
}
@@ -1289,15 +1281,13 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
+ /* Probe vector lengths */
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
- info->reg_zcr = read_zcr_features();
- taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
- info->reg_zcr, boot->reg_zcr);
-
- /* Probe vector lengths */
- if (!system_capabilities_finalized())
+ if (!system_capabilities_finalized()) {
+ sve_kernel_enable(NULL);
vec_update_vq_map(ARM64_VEC_SVE);
+ }
}
if (IS_ENABLED(CONFIG_ARM64_SME) &&
@@ -3153,19 +3143,11 @@ static void verify_local_elf_hwcaps(void)
static void verify_sve_features(void)
{
- u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
- u64 zcr = read_zcr_features();
-
- unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
- unsigned int len = zcr & ZCR_ELx_LEN_MASK;
-
- if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
+ if (vec_verify_vq_map(ARM64_VEC_SVE)) {
pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
-
- /* Add checks on other ZCR bits here if necessary */
}
static void verify_sme_features(void)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 91e44ac7150f..35bf40c2f972 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1170,32 +1170,12 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb();
}
-/*
- * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
- * vector length.
- *
- * Use only if SVE is present.
- * This function clobbers the SVE vector length.
- */
-u64 read_zcr_features(void)
-{
- /*
- * Set the maximum possible VL, and write zeroes to all other
- * bits to see if they stick.
- */
- sve_kernel_enable(NULL);
- write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
-
- /* Return LEN value that would be written to get the maximum VL */
- return sve_vq_from_vl(sve_get_vl()) - 1;
-}
-
void __init sve_setup(void)
{
struct vl_info *info = &vl_info[ARM64_VEC_SVE];
- u64 zcr;
DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
unsigned long b;
+ int max_bit;
if (!system_supports_sve())
return;
@@ -1208,17 +1188,8 @@ void __init sve_setup(void)
if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map)))
set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map);
- zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
- info->max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
-
- /*
- * Sanity-check that the max VL we determined through CPU features
- * corresponds properly to sve_vq_map. If not, do our best:
- */
- if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SVE,
- info->max_vl)))
- info->max_vl = find_supported_vector_length(ARM64_VEC_SVE,
- info->max_vl);
+ max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
+ info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
/*
* For the default VL, pick the maximum supported value <= 64.