summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/silicon-errata.rst3
-rw-r--r--arch/arm64/Kconfig19
-rw-r--r--arch/arm64/kernel/cpu_errata.c7
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h99
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c14
-rw-r--r--arch/arm64/tools/cpucaps1
6 files changed, 122 insertions, 21 deletions
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 9e311bc43e05..cd46e2b20a81 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -52,6 +52,9 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a6002084f09c..50f6ddc7e3a0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -407,6 +407,25 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
+config AMPERE_ERRATUM_AC03_CPU_38
+ bool "AmpereOne: AC03_CPU_38: Certain bits in the Virtualization Translation Control Register and Translation Control Registers do not follow RES0 semantics"
+ default y
+ help
+ This option adds an alternative code sequence to work around Ampere
+ erratum AC03_CPU_38 on AmpereOne.
+
+ The affected design reports FEAT_HAFDBS as not implemented in
+ ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
+ as required by the architecture. The unadvertised HAFDBS
+ implementation suffers from an additional erratum where hardware
+ A/D updates can occur after a PTE has been marked invalid.
+
+ The workaround forces KVM to explicitly set VTCR_EL2.HA to 0,
+ which avoids enabling unadvertised hardware Access Flag management
+ at stage-2.
+
+ If unsure, say Y.
+
config ARM64_WORKAROUND_CLEAN_CACHE
bool
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 307faa2b4395..be66e94a21bd 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -730,6 +730,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+ {
+ .desc = "AmpereOne erratum AC03_CPU_38",
+ .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index b5f53d3fc14c..dc584b0f4f12 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -70,6 +70,56 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}
+static inline bool __hfgxtr_traps_required(void)
+{
+ if (cpus_have_final_cap(ARM64_SME))
+ return true;
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ return true;
+
+ return false;
+}
+
+static inline void __activate_traps_hfgxtr(void)
+{
+ u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+
+ if (cpus_have_final_cap(ARM64_SME)) {
+ tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+
+ r_clr |= tmp;
+ w_clr |= tmp;
+ }
+
+ /*
+ * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ w_set |= HFGxTR_EL2_TCR_EL1_MASK;
+
+ sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
+ sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+}
+
+static inline void __deactivate_traps_hfgxtr(void)
+{
+ u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+
+ if (cpus_have_final_cap(ARM64_SME)) {
+ tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+
+ r_set |= tmp;
+ w_set |= tmp;
+ }
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ w_clr |= HFGxTR_EL2_TCR_EL1_MASK;
+
+ sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
+ sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+}
+
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
@@ -89,16 +139,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
- sysreg_clear_set_s(SYS_HFGWTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
- }
+ if (__hfgxtr_traps_required())
+ __activate_traps_hfgxtr();
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -109,14 +151,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
- }
+ if (__hfgxtr_traps_required())
+ __deactivate_traps_hfgxtr();
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
@@ -384,12 +420,39 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
return true;
}
+static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ if (sysreg != SYS_TCR_EL1)
+ return false;
+
+ /*
+ * Affected parts do not advertise support for hardware Access Flag /
+ * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
+ * control bits are still functional. The architecture requires these be
+ * RES0 on systems that do not implement FEAT_HAFDBS.
+ *
+ * Uphold the requirements of the architecture by masking guest writes
+ * to TCR_EL1.{HA,HD} here.
+ */
+ val &= ~(TCR_HD | TCR_HA);
+ write_sysreg_el1(val, SYS_TCR);
+ return true;
+}
+
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
return true;
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
+ handle_ampere1_tcr(vcpu))
+ return true;
+
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index a3939a9a4f7a..f5714ba74e9e 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -628,10 +628,18 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable the Hardware Access Flag management, unconditionally
- * on all CPUs. The features is RES0 on CPUs without the support
- * and must be ignored by the CPUs.
+ * on all CPUs. In systems that have asymmetric support for the feature
+ * this allows KVM to leverage hardware support on the subset of cores
+ * that implement the feature.
+ *
+ * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
+ * hardware) on implementations that do not advertise support for the
+ * feature. As such, setting HA unconditionally is safe, unless you
+ * happen to be running on a design that has unadvertised support for
+ * HAFDBS. Here be dragons.
*/
- vtcr |= VTCR_EL2_HA;
+ if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ vtcr |= VTCR_EL2_HA;
#endif /* CONFIG_ARM64_HW_AFDBM */
/* Set the vmid bits */
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 99eb7e50b2f4..b21c84672bbf 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -79,6 +79,7 @@ WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417
+WORKAROUND_AMPERE_AC03_CPU_38
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE