summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/sys_regs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/sys_regs.c')
-rw-r--r--arch/arm64/kvm/sys_regs.c55
1 files changed, 47 insertions, 8 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b29f72478a50..91053aa832d0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1757,7 +1757,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ID_AA64ISAR2_EL1_WFxT;
break;
case SYS_ID_AA64ISAR3_EL1:
- val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
+ val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
+ ID_AA64ISAR3_EL1_FAMINMAX;
break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
@@ -1997,6 +1998,26 @@ static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
return val;
}
+/*
+ * Older versions of KVM erroneously claim support for FEAT_DoubleLock with
+ * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
+ * value if it is consistent with the bug.
+ */
+static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
+{
+ u8 host, user;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
+ read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
+ user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
+
+ return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
+ user == ID_AA64DFR0_EL1_DoubleLock_IMP;
+}
+
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
u64 val)
@@ -2028,6 +2049,11 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
return -EINVAL;
+ if (ignore_feat_doublelock(vcpu, val)) {
+ val &= ~ID_AA64DFR0_EL1_DoubleLock;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
+ }
+
return set_id_reg(vcpu, rd, val);
}
@@ -2148,16 +2174,29 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, user_val);
}
+/*
+ * Allow userspace to de-feature a stage-2 translation granule but prevent it
+ * from claiming the impossible.
+ */
+#define tgran2_val_allowed(tg, safe, user) \
+({ \
+ u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \
+ u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
+ \
+ __s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \
+})
+
static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
- u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK |
- ID_AA64MMFR0_EL1_TGRAN16_2_MASK |
- ID_AA64MMFR0_EL1_TGRAN64_2_MASK;
- if (vcpu_has_nv(vcpu) &&
- ((sanitized_val & tgran2_mask) != (user_val & tgran2_mask)))
+ if (!vcpu_has_nv(vcpu))
+ return set_id_reg(vcpu, rd, user_val);
+
+ if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
+ !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
+ !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
return -EINVAL;
return set_id_reg(vcpu, rd, user_val);
@@ -3141,6 +3180,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64ISAR2_EL1_APA3 |
ID_AA64ISAR2_EL1_GPA3)),
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
+ ID_AA64ISAR3_EL1_LSFE |
ID_AA64ISAR3_EL1_FAMINMAX)),
ID_UNALLOCATED(6,4),
ID_UNALLOCATED(6,5),
@@ -3152,8 +3192,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
~(ID_AA64MMFR0_EL1_RES0 |
ID_AA64MMFR0_EL1_ASIDBITS)),
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
- ID_AA64MMFR1_EL1_HCX |
- ID_AA64MMFR1_EL1_TWED |
ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_VH |
ID_AA64MMFR1_EL1_VMIDBits)),
@@ -3238,6 +3276,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
/* PMBIDR_EL1 is not trapped */
{ PMU_SYS_REG(PMINTENSET_EL1),