summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/sys_regs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/sys_regs.c')
-rw-r--r--arch/arm64/kvm/sys_regs.c3393
1 files changed, 2744 insertions, 649 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2ca2973abe66..c8fd7c6a12a1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -12,11 +12,14 @@
#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/cacheinfo.h>
+#include <linux/debugfs.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <asm/arm_pmuv3.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
@@ -32,6 +35,7 @@
#include <trace/events/kvm.h>
#include "sys_regs.h"
+#include "vgic/vgic.h"
#include "trace.h"
@@ -45,44 +49,346 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val);
+static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
+static bool bad_trap(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params,
+ const struct sys_reg_desc *r,
+ const char *msg)
+{
+ WARN_ONCE(1, "Unexpected %s\n", msg);
+ print_sys_reg_instr(params);
+ return undef_access(vcpu, params, r);
+}
+
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
{
- WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
- print_sys_reg_instr(params);
- kvm_inject_undefined(vcpu);
- return false;
+ return bad_trap(vcpu, params, r,
+ "sys_reg read to write-only register");
}
static bool write_to_read_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
{
- WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
- print_sys_reg_instr(params);
- kvm_inject_undefined(vcpu);
- return false;
+ return bad_trap(vcpu, params, r,
+ "sys_reg write to read-only register");
+}
+
+enum sr_loc_attr {
+ SR_LOC_MEMORY = 0, /* Register definitely in memory */
+ SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
+ SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
+ SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
+ SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
+};
+
+struct sr_loc {
+ enum sr_loc_attr loc;
+ enum vcpu_sysreg map_reg;
+ u64 (*xlate)(u64);
+};
+
+static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
+ enum vcpu_sysreg reg)
+{
+ switch (reg) {
+ case SCTLR_EL1:
+ case CPACR_EL1:
+ case TTBR0_EL1:
+ case TTBR1_EL1:
+ case TCR_EL1:
+ case TCR2_EL1:
+ case PIR_EL1:
+ case PIRE0_EL1:
+ case POR_EL1:
+ case ESR_EL1:
+ case AFSR0_EL1:
+ case AFSR1_EL1:
+ case FAR_EL1:
+ case MAIR_EL1:
+ case VBAR_EL1:
+ case CONTEXTIDR_EL1:
+ case AMAIR_EL1:
+ case CNTKCTL_EL1:
+ case ELR_EL1:
+ case SPSR_EL1:
+ case ZCR_EL1:
+ case SCTLR2_EL1:
+ /*
+ * EL1 registers which have an ELx2 mapping are loaded if
+ * we're not in hypervisor context.
+ */
+ return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
+
+ case TPIDR_EL0:
+ case TPIDRRO_EL0:
+ case TPIDR_EL1:
+ case PAR_EL1:
+ case DACR32_EL2:
+ case IFSR32_EL2:
+ case DBGVCR32_EL2:
+ /* These registers are always loaded, no matter what */
+ return SR_LOC_LOADED;
+
+ default:
+ /* Non-mapped EL2 registers are by definition in memory. */
+ return SR_LOC_MEMORY;
+ }
+}
+
+static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
+ enum vcpu_sysreg reg,
+ enum vcpu_sysreg map_reg,
+ u64 (*xlate)(u64),
+ struct sr_loc *loc)
+{
+ if (!is_hyp_ctxt(vcpu)) {
+ loc->loc = SR_LOC_MEMORY;
+ return;
+ }
+
+ loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
+ loc->map_reg = map_reg;
+
+ WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
+
+ if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
+ loc->loc |= SR_LOC_XLATED;
+ loc->xlate = xlate;
+ }
}
-u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+#define MAPPED_EL2_SYSREG(r, m, t) \
+ case r: { \
+ locate_mapped_el2_register(vcpu, r, m, t, loc); \
+ break; \
+ }
+
+static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
+ struct sr_loc *loc)
+{
+ if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
+ loc->loc = SR_LOC_MEMORY;
+ return;
+ }
+
+ switch (reg) {
+ MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
+ translate_sctlr_el2_to_sctlr_el1 );
+ MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
+ translate_cptr_el2_to_cpacr_el1 );
+ MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
+ translate_ttbr0_el2_to_ttbr0_el1 );
+ MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
+ MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
+ translate_tcr_el2_to_tcr_el1 );
+ MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
+ MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
+ MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
+ MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
+ MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
+ MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
+ MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
+ MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
+ MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
+ MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
+ MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
+ MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
+ MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
+ MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
+ MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
+ case CNTHCTL_EL2:
+ /* CNTHCTL_EL2 is super special, until we support NV2.1 */
+ loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
+ SR_LOC_SPECIAL : SR_LOC_MEMORY);
+ break;
+ default:
+ loc->loc = locate_direct_register(vcpu, reg);
+ }
+}
+
+static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
{
u64 val = 0x8badf00d8badf00d;
- if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
- __vcpu_read_sys_reg_from_cpu(reg, &val))
- return val;
+ switch (reg) {
+ case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
+ case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
+ case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
+ case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
+ case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
+ case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
+ case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
+ case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
+ case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
+ case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
+ case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
+ case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
+ case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
+ case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
+ case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
+ case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
+ case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
+ case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
+ case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
+ case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
+ case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
+ case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
+ case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
+ case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
+ case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
+ case PAR_EL1: val = read_sysreg_par(); break;
+ case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
+ case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
+ case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
+ default: WARN_ON_ONCE(1);
+ }
+
+ return val;
+}
+
+static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
+{
+ switch (reg) {
+ case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
+ case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
+ case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
+ case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
+ case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
+ case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
+ case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
+ case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
+ case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
+ case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
+ case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
+ case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
+ case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
+ case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
+ case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
+ case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
+ case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
+ case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
+ case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
+ case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
+ case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
+ case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
+ case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
+ case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
+ case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
+ case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
+ case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
+ case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
+ case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
+ default: WARN_ON_ONCE(1);
+ }
+}
+
+u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
+{
+ struct sr_loc loc = {};
+
+ locate_register(vcpu, reg, &loc);
+
+ WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
+
+ if (loc.loc & SR_LOC_SPECIAL) {
+ u64 val;
+
+ WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
+
+ /*
+ * CNTHCTL_EL2 requires some special treatment to account
+ * for the bits that can be set via CNTKCTL_EL1 when E2H==1.
+ */
+ switch (reg) {
+ case CNTHCTL_EL2:
+ val = read_sysreg_el1(SYS_CNTKCTL);
+ val &= CNTKCTL_VALID_BITS;
+ val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
+ return val;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ if (loc.loc & SR_LOC_LOADED) {
+ enum vcpu_sysreg map_reg = reg;
+
+ if (loc.loc & SR_LOC_MAPPED)
+ map_reg = loc.map_reg;
+
+ if (!(loc.loc & SR_LOC_XLATED)) {
+ u64 val = read_sr_from_cpu(map_reg);
+
+ if (reg >= __SANITISED_REG_START__)
+ val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
+
+ return val;
+ }
+ }
return __vcpu_sys_reg(vcpu, reg);
}
-void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
{
- if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
- __vcpu_write_sys_reg_to_cpu(val, reg))
- return;
+ struct sr_loc loc = {};
+
+ locate_register(vcpu, reg, &loc);
+
+ WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
+
+ if (loc.loc & SR_LOC_SPECIAL) {
+
+ WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
+
+ switch (reg) {
+ case CNTHCTL_EL2:
+ /*
+ * If E2H=1, some of the bits are backed by
+ * CNTKCTL_EL1, while the rest is kept in memory.
+ * Yes, this is fun stuff.
+ */
+ write_sysreg_el1(val, SYS_CNTKCTL);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ if (loc.loc & SR_LOC_LOADED) {
+ enum vcpu_sysreg map_reg = reg;
+ u64 xlated_val;
+
+ if (reg >= __SANITISED_REG_START__)
+ val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
+
+ if (loc.loc & SR_LOC_MAPPED)
+ map_reg = loc.map_reg;
+
+ if (loc.loc & SR_LOC_XLATED)
+ xlated_val = loc.xlate(val);
+ else
+ xlated_val = val;
- __vcpu_sys_reg(vcpu, reg) = val;
+ write_sr_to_cpu(map_reg, xlated_val);
+
+ /*
+ * Fall through to write the backing store anyway, which
+ * allows translated registers to be directly read without a
+ * reverse translation.
+ */
+ }
+
+ __vcpu_assign_sys_reg(vcpu, reg, val);
}
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
@@ -207,7 +513,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* CPU left in the system, and certainly not from non-secure
* software).
*/
- if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
kvm_set_way_flush(vcpu);
return true;
@@ -217,10 +523,8 @@ static bool access_dcgsw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_has_mte(vcpu->kvm)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_has_mte(vcpu->kvm))
+ return undef_access(vcpu, p, r);
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
return access_dcsw(vcpu, p, r);
@@ -301,6 +605,9 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
{
bool g1;
+ if (!kvm_has_gicv3(vcpu->kvm))
+ return undef_access(vcpu, p, r);
+
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
@@ -344,10 +651,33 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (!kvm_has_gicv3(vcpu->kvm))
+ return undef_access(vcpu, p, r);
+
if (p->is_write)
return ignore_write(vcpu, p);
- p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ if (p->Op1 == 4) { /* ICC_SRE_EL2 */
+ p->regval = KVM_ICC_SRE_EL2;
+ } else { /* ICC_SRE_EL1 */
+ p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ }
+
+ return true;
+}
+
+static bool access_gic_dir(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!kvm_has_gicv3(vcpu->kvm))
+ return undef_access(vcpu, p, r);
+
+ if (!p->is_write)
+ return undef_access(vcpu, p, r);
+
+ vgic_v3_deactivate(vcpu, p->regval);
+
return true;
}
@@ -361,14 +691,6 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
-static bool trap_undef(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- kvm_inject_undefined(vcpu);
- return false;
-}
-
/*
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
@@ -379,13 +701,10 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r);
- if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ return undef_access(vcpu, p, r);
if (p->is_write && sr == SYS_LORID_EL1)
return write_to_read_only(vcpu, p, r);
@@ -397,17 +716,10 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 oslsr;
-
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
- /* Forward the OSLK bit to OSLSR */
- oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
- if (p->regval & OSLAR_EL1_OSLK)
- oslsr |= OSLSR_EL1_OSLK;
-
- __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
+ kvm_debug_handle_oslar(vcpu, p->regval);
return true;
}
@@ -432,7 +744,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
return -EINVAL;
- __vcpu_sys_reg(vcpu, rd->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, rd->reg, val);
return 0;
}
@@ -448,43 +760,13 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
}
}
-/*
- * We want to avoid world-switching all the DBG registers all the
- * time:
- *
- * - If we've touched any debug register, it is likely that we're
- * going to touch more of them. It then makes sense to disable the
- * traps and start doing the save/restore dance
- * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
- * then mandatory to save/restore the registers, as the guest
- * depends on them.
- *
- * For this, we use a DIRTY bit, indicating the guest has modified the
- * debug registers, used as follow:
- *
- * On guest entry:
- * - If the dirty bit is set (because we're coming back from trapping),
- * disable the traps, save host registers, restore guest registers.
- * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
- * set the dirty bit, disable the traps, save host registers,
- * restore guest registers.
- * - Otherwise, enable the traps
- *
- * On guest exit:
- * - If the dirty bit is set, save guest registers, restore host
- * registers and clear the dirty bit. This ensure that the host can
- * now use the debug registers.
- */
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
access_rw(vcpu, p, r);
- if (p->is_write)
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
-
- trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
+ kvm_debug_set_guest_ownership(vcpu);
return true;
}
@@ -493,9 +775,6 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
*
* A 32 bit write to a debug register leave top bits alone
* A 32 bit read from a debug register only returns the bottom bits
- *
- * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
- * switches between host and guest values in future.
*/
static void reg_to_dbg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
@@ -510,8 +789,6 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu,
val &= ~mask;
val |= (p->regval & (mask >> shift)) << shift;
*dbg_reg = val;
-
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
}
static void dbg_to_reg(struct kvm_vcpu *vcpu,
@@ -525,152 +802,79 @@ static void dbg_to_reg(struct kvm_vcpu *vcpu,
p->regval = (*dbg_reg & mask) >> shift;
}
-static bool trap_bvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
-
- return true;
-}
-
-static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
+static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
- return 0;
-}
+ struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
-static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
-{
- *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
- return 0;
+ switch (rd->Op2) {
+ case 0b100:
+ return &dbg->dbg_bvr[rd->CRm];
+ case 0b101:
+ return &dbg->dbg_bcr[rd->CRm];
+ case 0b110:
+ return &dbg->dbg_wvr[rd->CRm];
+ case 0b111:
+ return &dbg->dbg_wcr[rd->CRm];
+ default:
+ KVM_BUG_ON(1, vcpu->kvm);
+ return NULL;
+ }
}
-static u64 reset_bvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
- return rd->val;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static bool trap_bcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ if (!reg)
+ return false;
if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
+ reg_to_dbg(vcpu, p, rd, reg);
else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ dbg_to_reg(vcpu, p, rd, reg);
+ kvm_debug_set_guest_ownership(vcpu);
return true;
}
-static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
+static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 val)
{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
- return 0;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
-{
- *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
- return 0;
-}
-
-static u64 reset_bcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
- return rd->val;
-}
-
-static bool trap_wvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
-
- return true;
-}
-
-static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
-{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
- return 0;
-}
+ if (!reg)
+ return -EINVAL;
-static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
-{
- *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ *reg = val;
return 0;
}
-static u64 reset_wvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
- return rd->val;
-}
-
-static bool trap_wcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 *val)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ u64 *reg = demux_wb_reg(vcpu, rd);
- return true;
-}
+ if (!reg)
+ return -EINVAL;
-static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
-{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
+ *val = *reg;
return 0;
}
-static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
+static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
{
- *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
- return 0;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static u64 reset_wcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ /*
+ * Bail early if we couldn't find storage for the register, the
+ * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
+ * being run.
+ */
+ if (!reg)
+ return 0;
+
+ *reg = rd->val;
return rd->val;
}
@@ -708,6 +912,12 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
return mpidr;
}
+static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ return REG_HIDDEN;
+}
+
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
@@ -719,19 +929,14 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
- u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
-
- /* No PMU available, any PMU reg may UNDEF... */
- if (!kvm_arm_support_pmu_v3())
- return 0;
+ u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
+ u8 n = vcpu->kvm->arch.nr_pmu_counters;
- n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
- n &= ARMV8_PMU_PMCR_N_MASK;
if (n)
mask |= GENMASK(n - 1, 0);
reset_unknown(vcpu, r);
- __vcpu_sys_reg(vcpu, r->reg) &= mask;
+ __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
return __vcpu_sys_reg(vcpu, r->reg);
}
@@ -739,15 +944,19 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
- __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
+ __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
return __vcpu_sys_reg(vcpu, r->reg);
}
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
+ /* This thing will UNDEF, who cares about the reset value? */
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
reset_unknown(vcpu, r);
- __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
+ __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
return __vcpu_sys_reg(vcpu, r->reg);
}
@@ -755,25 +964,23 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
- __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+ __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
return __vcpu_sys_reg(vcpu, r->reg);
}
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
- u64 pmcr;
-
- /* No PMU available, PMCR_EL0 may UNDEF... */
- if (!kvm_arm_support_pmu_v3())
- return 0;
+ u64 pmcr = 0;
- /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
- pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
if (!kvm_supports_32bit_el0())
pmcr |= ARMV8_PMU_PMCR_LC;
- __vcpu_sys_reg(vcpu, r->reg) = pmcr;
+ /*
+ * The value of PMCR.N field is included when the
+ * vCPU register is read via kvm_vcpu_read_pmcr().
+ */
+ __vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
return __vcpu_sys_reg(vcpu, r->reg);
}
@@ -822,7 +1029,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Only update writeable bits of PMCR (continuing into
* kvm_pmu_handle_pmcr() as well)
*/
- val = __vcpu_sys_reg(vcpu, PMCR_EL0);
+ val = kvm_vcpu_read_pmcr(vcpu);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!kvm_supports_32bit_el0())
@@ -830,7 +1037,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
kvm_pmu_handle_pmcr(vcpu, val);
} else {
/* PMCR.P & PMCR.C are RAZ */
- val = __vcpu_sys_reg(vcpu, PMCR_EL0)
+ val = kvm_vcpu_read_pmcr(vcpu)
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
p->regval = val;
}
@@ -845,11 +1052,11 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false;
if (p->is_write)
- __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
else
/* return PMSELR.SEL field */
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
- & ARMV8_PMU_COUNTER_MASK;
+ & PMSELR_EL0_SEL_MASK;
return true;
}
@@ -879,8 +1086,8 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
{
u64 pmcr, val;
- pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
- val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+ pmcr = kvm_vcpu_read_pmcr(vcpu);
+ val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
kvm_inject_undefined(vcpu);
return false;
@@ -905,6 +1112,22 @@ static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
return 0;
}
+static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ u64 idx;
+
+ if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
+ /* PMCCNTR_EL0 */
+ idx = ARMV8_PMU_CYCLE_IDX;
+ else
+ /* PMEVCNTRn_EL0 */
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+
+ kvm_pmu_set_counter_value_user(vcpu, idx, val);
+ return 0;
+}
+
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -917,8 +1140,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
if (pmu_access_event_counter_el0_disabled(vcpu))
return false;
- idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
- & ARMV8_PMU_COUNTER_MASK;
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
+ __vcpu_sys_reg(vcpu, PMSELR_EL0));
} else if (r->Op2 == 0) {
/* PMCCNTR_EL0 */
if (pmu_access_cycle_counter_el0_disabled(vcpu))
@@ -968,7 +1191,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
/* PMXEVTYPER_EL0 */
- idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
reg = PMEVTYPER0_EL0 + idx;
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
@@ -988,12 +1211,30 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
- p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+ p->regval = __vcpu_sys_reg(vcpu, reg);
}
return true;
}
+static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
+{
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
+
+ __vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
+ return 0;
+}
+
+static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
+{
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
+
+ *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
+ return 0;
+}
+
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1002,19 +1243,17 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_access_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_accessible_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
- if (r->Op2 & 0x1) {
+ if (r->Op2 & 0x1)
/* accessing PMCNTENSET_EL0 */
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
- kvm_pmu_enable_counter_mask(vcpu, val);
- kvm_vcpu_pmu_restore_guest(vcpu);
- } else {
+ __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
+ else
/* accessing PMCNTENCLR_EL0 */
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
- kvm_pmu_disable_counter_mask(vcpu, val);
- }
+ __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
+
+ kvm_pmu_reprogram_counter_mask(vcpu, val);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
}
@@ -1025,7 +1264,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
if (check_pmu_access_disabled(vcpu, 0))
return false;
@@ -1035,10 +1274,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (r->Op2 & 0x1)
/* accessing PMINTENSET_EL1 */
- __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
+ __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
else
/* accessing PMINTENCLR_EL1 */
- __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
+ __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
}
@@ -1049,7 +1288,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
if (pmu_access_el0_disabled(vcpu))
return false;
@@ -1057,10 +1296,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write) {
if (r->CRm & 0x2)
/* accessing PMOVSSET_EL0 */
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
+ __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
else
/* accessing PMOVSCLR_EL0 */
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
+ __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
} else {
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
}
@@ -1079,7 +1318,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_accessible_counter_mask(vcpu);
kvm_pmu_software_increment(vcpu, p->regval & mask);
return true;
}
@@ -1088,13 +1327,11 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- if (!vcpu_mode_priv(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!vcpu_mode_priv(vcpu))
+ return undef_access(vcpu, p, r);
- __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
- p->regval & ARMV8_PMU_USERENR_MASK;
+ __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
+ (p->regval & ARMV8_PMU_USERENR_MASK));
} else {
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
& ARMV8_PMU_USERENR_MASK;
@@ -1103,16 +1340,68 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ *val = kvm_vcpu_read_pmcr(vcpu);
+ return 0;
+}
+
+static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ /*
+ * The vCPU can't have more counters than the PMU hardware
+ * implements. Ignore this error to maintain compatibility
+ * with the existing KVM behavior.
+ */
+ if (!kvm_vm_has_ran_once(kvm) &&
+ !vcpu_has_nv(vcpu) &&
+ new_n <= kvm_arm_pmu_get_max_counters(kvm))
+ kvm->arch.nr_pmu_counters = new_n;
+
+ mutex_unlock(&kvm->arch.config_lock);
+
+ /*
+ * Ignore writes to RES0 bits, read only bits that are cleared on
+ * vCPU reset, and writable bits that KVM doesn't support yet.
+ * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
+ * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
+ * But, we leave the bit as it is here, as the vCPU's PMUver might
+ * be changed later (NOTE: the bit will be cleared on first vCPU run
+ * if necessary).
+ */
+ val &= ARMV8_PMU_PMCR_MASK;
+
+ /* The LC bit is RES1 when AArch32 is not supported */
+ if (!kvm_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
+
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
+ return 0;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
- trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
- trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
- trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
- trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }
#define PMU_SYS_REG(name) \
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
@@ -1122,6 +1411,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
+ .set_user = set_pmu_evcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
@@ -1130,14 +1420,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
.reset = reset_pmevtyper, \
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
-static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- kvm_inject_undefined(vcpu);
-
- return false;
-}
-
/* Macro to expand the AMU counter and type registers*/
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
@@ -1174,30 +1456,149 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
switch (reg) {
case SYS_CNTP_TVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTV_TVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
case SYS_AARCH32_CNTP_TVAL:
+ case SYS_CNTP_TVAL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_TVAL;
break;
+
+ case SYS_CNTV_TVAL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTHP_TVAL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTHV_TVAL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
case SYS_CNTP_CTL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTV_CTL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
case SYS_AARCH32_CNTP_CTL:
+ case SYS_CNTP_CTL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CTL;
break;
+
+ case SYS_CNTV_CTL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTHP_CTL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTHV_CTL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
case SYS_CNTP_CVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTV_CVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
case SYS_AARCH32_CNTP_CVAL:
+ case SYS_CNTP_CVAL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CVAL;
break;
+
+ case SYS_CNTV_CVAL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTHP_CVAL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTHV_CVAL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
case SYS_CNTPCT_EL0:
case SYS_CNTPCTSS_EL0:
+ if (is_hyp_ctxt(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
case SYS_AARCH32_CNTPCT:
+ case SYS_AARCH32_CNTPCTSS:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CNT;
break;
+
+ case SYS_CNTVCT_EL0:
+ case SYS_CNTVCTSS_EL0:
+ if (is_hyp_ctxt(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
+ case SYS_AARCH32_CNTVCT:
+ case SYS_AARCH32_CNTVCTSS:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
default:
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
- kvm_inject_undefined(vcpu);
- return false;
+ return undef_access(vcpu, p, r);
}
if (p->is_write)
@@ -1208,6 +1609,49 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
+static int arch_timer_set_user(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 val)
+{
+ switch (reg_to_encoding(rd)) {
+ case SYS_CNTV_CTL_EL0:
+ case SYS_CNTP_CTL_EL0:
+ case SYS_CNTHV_CTL_EL2:
+ case SYS_CNTHP_CTL_EL2:
+ val &= ~ARCH_TIMER_CTRL_IT_STAT;
+ break;
+ case SYS_CNTVCT_EL0:
+ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
+ timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
+ return 0;
+ case SYS_CNTPCT_EL0:
+ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
+ timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
+ return 0;
+ }
+
+ __vcpu_assign_sys_reg(vcpu, rd->reg, val);
+ return 0;
+}
+
+static int arch_timer_get_user(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 *val)
+{
+ switch (reg_to_encoding(rd)) {
+ case SYS_CNTVCT_EL0:
+ *val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
+ break;
+ case SYS_CNTPCT_EL0:
+ *val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
+ break;
+ default:
+ *val = __vcpu_sys_reg(vcpu, rd->reg);
+ }
+
+ return 0;
+}
+
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
s64 new, s64 cur)
{
@@ -1216,8 +1660,14 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
/* Some features have different safe value type in KVM than host features */
switch (id) {
case SYS_ID_AA64DFR0_EL1:
- if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT)
+ switch (kvm_ftr.shift) {
+ case ID_AA64DFR0_EL1_PMUVer_SHIFT:
+ kvm_ftr.type = FTR_LOWER_SAFE;
+ break;
+ case ID_AA64DFR0_EL1_DebugVer_SHIFT:
kvm_ftr.type = FTR_LOWER_SAFE;
+ break;
+ }
break;
case SYS_ID_DFR0_EL1:
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
@@ -1228,7 +1678,7 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
}
-/**
+/*
* arm64_check_features() - Check if a feature register value constitutes
* a subset of features indicated by the idreg's KVM sanitised limit.
*
@@ -1306,6 +1756,10 @@ static u8 pmuver_to_perfmon(u8 pmuver)
}
}
+static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
+static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
+static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
+
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
@@ -1319,35 +1773,58 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val = read_sanitised_ftr_reg(id);
switch (id) {
+ case SYS_ID_AA64DFR0_EL1:
+ val = sanitise_id_aa64dfr0_el1(vcpu, val);
+ break;
+ case SYS_ID_AA64PFR0_EL1:
+ val = sanitise_id_aa64pfr0_el1(vcpu, val);
+ break;
case SYS_ID_AA64PFR1_EL1:
- if (!kvm_has_mte(vcpu->kvm))
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
+ val = sanitise_id_aa64pfr1_el1(vcpu, val);
+ break;
+ case SYS_ID_AA64PFR2_EL1:
+ val &= ID_AA64PFR2_EL1_FPMR |
+ (kvm_has_mte(vcpu->kvm) ?
+ ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
+ 0);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
- val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
+ val &= ~(ID_AA64ISAR1_EL1_APA |
+ ID_AA64ISAR1_EL1_API |
+ ID_AA64ISAR1_EL1_GPA |
+ ID_AA64ISAR1_EL1_GPI);
break;
case SYS_ID_AA64ISAR2_EL1:
if (!vcpu_has_ptrauth(vcpu))
- val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
- ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
- if (!cpus_have_final_cap(ARM64_HAS_WFXT))
- val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
- val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
+ val &= ~(ID_AA64ISAR2_EL1_APA3 |
+ ID_AA64ISAR2_EL1_GPA3);
+ if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
+ has_broken_cntvoff())
+ val &= ~ID_AA64ISAR2_EL1_WFxT;
+ break;
+ case SYS_ID_AA64ISAR3_EL1:
+ val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
+ ID_AA64ISAR3_EL1_FAMINMAX;
break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+ val &= ~ID_AA64MMFR2_EL1_NV;
+ break;
+ case SYS_ID_AA64MMFR3_EL1:
+ val &= ID_AA64MMFR3_EL1_TCRX |
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1POE |
+ ID_AA64MMFR3_EL1_S1PIE;
break;
case SYS_ID_MMFR4_EL1:
- val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+ val &= ~ID_MMFR4_EL1_CCIDX;
break;
}
+ if (vcpu_has_nv(vcpu))
+ val = limit_nv_id_reg(vcpu->kvm, id, val);
+
return val;
}
@@ -1359,18 +1836,51 @@ static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
- return IDREG(vcpu->kvm, reg_to_encoding(r));
+ return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
+}
+
+static bool is_feature_id_reg(u32 encoding)
+{
+ return (sys_reg_Op0(encoding) == 3 &&
+ (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
+ sys_reg_CRn(encoding) == 0 &&
+ sys_reg_CRm(encoding) <= 7);
}
/*
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
- * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
+ * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
+ * registers KVM maintains on a per-VM basis.
+ *
+ * Additionally, the implementation ID registers and CTR_EL0 are handled as
+ * per-VM registers.
*/
-static inline bool is_id_reg(u32 id)
+static inline bool is_vm_ftr_id_reg(u32 id)
+{
+ switch (id) {
+ case SYS_CTR_EL0:
+ case SYS_MIDR_EL1:
+ case SYS_REVIDR_EL1:
+ case SYS_AIDR_EL1:
+ return true;
+ default:
+ return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
+ sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
+ sys_reg_CRm(id) < 8);
+
+ }
+}
+
+static inline bool is_vcpu_ftr_id_reg(u32 id)
+{
+ return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
+}
+
+static inline bool is_aa32_id_reg(u32 id)
{
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
- sys_reg_CRm(id) < 8);
+ sys_reg_CRm(id) <= 3);
}
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
@@ -1418,8 +1928,6 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
return write_to_read_only(vcpu, p, r);
p->regval = read_id_reg(vcpu, r);
- if (vcpu_has_nv(vcpu))
- access_nested_id_reg(vcpu, p, r);
return true;
}
@@ -1434,11 +1942,26 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
-static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
- u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+ if (kvm_has_fpmr(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
+{
if (!vcpu_has_sve(vcpu))
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
@@ -1459,24 +1982,51 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
}
- if (kvm_vgic_global_state.type == VGIC_V3) {
+ if (vgic_is_v3(vcpu->kvm)) {
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
}
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
+ /*
+ * MPAM is disabled by default as KVM also needs a set of PARTID to
+ * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
+ * older kernels let the guest see the ID bit.
+ */
+ val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
+
return val;
}
-static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
{
- u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- /* Limit debug to ARMv8.0 */
- val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
- val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
+ if (!kvm_has_mte(vcpu->kvm)) {
+ val &= ~ID_AA64PFR1_EL1_MTE;
+ val &= ~ID_AA64PFR1_EL1_MTE_frac;
+ }
+
+ if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
+ SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
+ val &= ~ID_AA64PFR1_EL1_RAS_frac;
+
+ val &= ~ID_AA64PFR1_EL1_SME;
+ val &= ~ID_AA64PFR1_EL1_RNDR_trap;
+ val &= ~ID_AA64PFR1_EL1_NMI;
+ val &= ~ID_AA64PFR1_EL1_GCS;
+ val &= ~ID_AA64PFR1_EL1_THE;
+ val &= ~ID_AA64PFR1_EL1_MTEX;
+ val &= ~ID_AA64PFR1_EL1_PFAR;
+ val &= ~ID_AA64PFR1_EL1_MPAM_frac;
+
+ return val;
+}
+
+static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
+{
+ val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
/*
* Only initialize the PMU version if the vCPU was configured with one.
@@ -1489,13 +2039,37 @@ static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
/* Hide SPE from guests */
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
+ /* Hide BRBE from guests */
+ val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
+
return val;
}
+/*
+ * Older versions of KVM erroneously claim support for FEAT_DoubleLock with
+ * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
+ * value if it is consistent with the bug.
+ */
+static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
+{
+ u8 host, user;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
+ read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
+ user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
+
+ return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
+ user == ID_AA64DFR0_EL1_DoubleLock_IMP;
+}
+
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
u64 val)
{
+ u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
/*
@@ -1515,18 +2089,34 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
+ /*
+ * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
+ * nonzero minimum safe value.
+ */
+ if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
+ return -EINVAL;
+
+ if (ignore_feat_doublelock(vcpu, val)) {
+ val &= ~ID_AA64DFR0_EL1_DoubleLock;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
+ }
+
return set_id_reg(vcpu, rd, val);
}
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+ u8 perfmon;
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
val &= ~ID_DFR0_EL1_PerfMon_MASK;
- if (kvm_vcpu_has_pmu(vcpu))
+ if (kvm_vcpu_has_pmu(vcpu)) {
+ perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
+ }
+
+ val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
return val;
}
@@ -1536,6 +2126,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
u64 val)
{
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
+ u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
val &= ~ID_DFR0_EL1_PerfMon_MASK;
@@ -1551,9 +2142,157 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
return -EINVAL;
+ if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
+ return -EINVAL;
+
return set_id_reg(vcpu, rd, val);
}
+static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+ u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
+
+ /*
+ * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
+ * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
+ * guests, but didn't add trap handling. KVM doesn't support MPAM and
+ * always returns an UNDEF for these registers. The guest must see 0
+ * for this field.
+ *
+ * But KVM must also accept values from user-space that were provided
+ * by KVM. On CPUs that support MPAM, permit user-space to write
+ * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
+ */
+ if ((hw_val & mpam_mask) == (user_val & mpam_mask))
+ user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
+
+ /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
+ if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
+ !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
+ (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
+ return -EINVAL;
+
+ /*
+ * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
+ * we support GICv3. Fail attempts to do anything but set that to IMP.
+ */
+ if (vgic_is_v3_compat(vcpu->kvm) &&
+ FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
+ return -EINVAL;
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
+static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
+ u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
+ u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
+ u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
+
+ /* See set_id_aa64pfr0_el1 for comment about MPAM */
+ if ((hw_val & mpam_mask) == (user_val & mpam_mask))
+ user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
+
+ /*
+ * Previously MTE_frac was hidden from guest. However, if the
+ * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
+ * of 0 for this field indicates that the hardware supports
+ * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
+ *
+ * As KVM must accept values from KVM provided by user-space,
+ * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
+ * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
+ * incorrectly claiming hardware support for MTE_ASYNC in the
+ * guest.
+ */
+
+ if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
+ hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
+ user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
+ user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
+ user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
+ }
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
+/*
+ * Allow userspace to de-feature a stage-2 translation granule but prevent it
+ * from claiming the impossible.
+ */
+#define tgran2_val_allowed(tg, safe, user) \
+({ \
+ u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \
+ u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
+ \
+ __s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \
+})
+
+static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
+
+ if (!vcpu_has_nv(vcpu))
+ return set_id_reg(vcpu, rd, user_val);
+
+ if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
+ !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
+ !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
+ return -EINVAL;
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
+static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
+ u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
+
+ /*
+ * We made the mistake to expose the now deprecated NV field,
+ * so allow userspace to write it, but silently ignore it.
+ */
+ if ((hw_val & nv_mask) == (user_val & nv_mask))
+ user_val &= ~nv_mask;
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
+static int set_ctr_el0(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
+
+ /*
+ * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
+ * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
+ * on what hardware reports.
+ *
+ * Using a VIPT software model on PIPT will lead to over invalidation,
+ * but still correct. Hence, we can allow downgrading PIPT to VIPT,
+ * but not the other way around. This is handled via arm64_ftr_safe_value()
+ * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
+ * set as VIPT.
+ */
+ switch (user_L1Ip) {
+ case CTR_EL0_L1Ip_RESERVED_VPIPT:
+ case CTR_EL0_L1Ip_RESERVED_AIVIVT:
+ return -EINVAL;
+ case CTR_EL0_L1Ip_VIPT:
+ case CTR_EL0_L1Ip_PIPT:
+ return set_id_reg(vcpu, rd, user_val);
+ default:
+ return -ENOENT;
+ }
+}
+
/*
* cpufeature ID register user accessors
*
@@ -1604,7 +2343,7 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
ret = arm64_check_features(vcpu, rd, val);
if (!ret)
- IDREG(vcpu->kvm, id) = val;
+ kvm_set_vm_id_reg(vcpu->kvm, id, val);
mutex_unlock(&vcpu->kvm->arch.config_lock);
@@ -1620,6 +2359,18 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return ret;
}
+void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
+{
+ u64 *p = __vm_id_reg(&kvm->arch, reg);
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
+ return;
+
+ *p = val;
+}
+
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 *val)
{
@@ -1639,7 +2390,7 @@ static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write)
return write_to_read_only(vcpu, p, r);
- p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
return true;
}
@@ -1702,9 +2453,9 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
* one cache line.
*/
if (kvm_has_mte(vcpu->kvm))
- clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
+ clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
- __vcpu_sys_reg(vcpu, r->reg) = clidr;
+ __vcpu_assign_sys_reg(vcpu, r->reg, clidr);
return __vcpu_sys_reg(vcpu, r->reg);
}
@@ -1718,7 +2469,7 @@ static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
return -EINVAL;
- __vcpu_sys_reg(vcpu, rd->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, rd->reg, val);
return 0;
}
@@ -1777,35 +2528,60 @@ static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
-#define EL2_REG(name, acc, rst, v) { \
- SYS_DESC(SYS_##name), \
- .access = acc, \
- .reset = rst, \
- .reg = name, \
- .visibility = el2_visibility, \
- .val = v, \
+static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ /*
+ * We really shouldn't be here, and this is likely the result
+ * of a misconfigured trap, as this register should target the
+ * VNCR page, and nothing else.
+ */
+ return bad_trap(vcpu, p, r,
+ "trap of VNCR-backed register");
}
-/*
- * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
- * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
- * handling traps. Given that, they are always hidden from userspace.
- */
-static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static bool bad_redir_trap(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
- return REG_HIDDEN_USER;
+ /*
+ * We really shouldn't be here, and this is likely the result
+ * of a misconfigured trap, as this register should target the
+ * corresponding EL1, and nothing else.
+ */
+ return bad_trap(vcpu, p, r,
+ "trap of EL2 register redirected to EL1");
}
-#define EL12_REG(name, acc, rst, v) { \
- SYS_DESC(SYS_##name##_EL12), \
+#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
+ SYS_DESC(SYS_##name), \
.access = acc, \
.reset = rst, \
- .reg = name##_EL1, \
+ .reg = name, \
+ .get_user = gu, \
+ .set_user = su, \
+ .visibility = filter, \
.val = v, \
- .visibility = elx2_visibility, \
}
+#define EL2_REG_FILTERED(name, acc, rst, v, filter) \
+ SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
+
+#define EL2_REG(name, acc, rst, v) \
+ EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
+
+#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
+#define EL2_REG_VNCR_FILT(name, vis) \
+ EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
+#define EL2_REG_VNCR_GICv3(name) \
+ EL2_REG_VNCR_FILT(name, hidden_visibility)
+#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
+
+#define TIMER_REG(name, vis) \
+ SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
+ arch_timer_get_user, arch_timer_set_user, vis)
+
/*
* Since reset() callback and field val are not used for idregs, they will be
* used for specific purposes for idregs.
@@ -1817,26 +2593,45 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
* from userspace.
*/
-/* sys_reg_desc initialiser for known cpufeature ID registers */
-#define ID_SANITISED(name) { \
- SYS_DESC(SYS_##name), \
+#define ID_DESC_DEFAULT_CALLBACKS \
.access = access_id_reg, \
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
+ .reset = kvm_read_sanitised_id_reg
+
+#define ID_DESC(name) \
+ SYS_DESC(SYS_##name), \
+ ID_DESC_DEFAULT_CALLBACKS
+
+/* sys_reg_desc initialiser for known cpufeature ID registers */
+#define ID_SANITISED(name) { \
+ ID_DESC(name), \
.val = 0, \
}
-/* sys_reg_desc initialiser for known cpufeature ID registers */
-#define AA32_ID_SANITISED(name) { \
- SYS_DESC(SYS_##name), \
- .access = access_id_reg, \
- .get_user = get_id_reg, \
- .set_user = set_id_reg, \
+/* sys_reg_desc initialiser for writable ID registers */
+#define ID_WRITABLE(name, mask) { \
+ ID_DESC(name), \
+ .val = mask, \
+}
+
+/*
+ * 32bit ID regs are fully writable when the guest is 32bit
+ * capable. Nothing in the KVM code should rely on 32bit features
+ * anyway, only 64bit, so let the VMM do its worse.
+ */
+#define AA32_ID_WRITABLE(name) { \
+ ID_DESC(name), \
.visibility = aa32_id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
- .val = 0, \
+ .val = GENMASK(31, 0), \
+}
+
+/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
+#define ID_FILTERED(sysreg, name, mask) { \
+ ID_DESC(sysreg), \
+ .set_user = set_##name, \
+ .val = (mask), \
}
/*
@@ -1845,12 +2640,10 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
* (1 <= crm < 8, 0 <= Op2 < 8).
*/
#define ID_UNALLOCATED(crm, op2) { \
+ .name = "S3_0_0_" #crm "_" #op2, \
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
- .access = access_id_reg, \
- .get_user = get_id_reg, \
- .set_user = set_id_reg, \
+ ID_DESC_DEFAULT_CALLBACKS, \
.visibility = raz_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
@@ -1860,12 +2653,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
* RAZ for the guest.
*/
#define ID_HIDDEN(name) { \
- SYS_DESC(SYS_##name), \
- .access = access_id_reg, \
- .get_user = get_id_reg, \
- .set_user = set_id_reg, \
+ ID_DESC(name), \
.visibility = raz_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
@@ -1874,7 +2663,7 @@ static bool access_sp_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (p->is_write)
- __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
else
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
@@ -1898,13 +2687,407 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (p->is_write)
- __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
+ __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
else
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
return true;
}
+static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
+ else
+ p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
+
+ return true;
+}
+
+static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 val = r->val;
+
+ if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ val |= HCR_E2H;
+
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
+
+ return __vcpu_sys_reg(vcpu, r->reg);
+}
+
+static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ unsigned int (*fn)(const struct kvm_vcpu *,
+ const struct sys_reg_desc *))
+{
+ return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
+}
+
+static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, sve_visibility);
+}
+
+static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_sctlr2(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, sctlr2_visibility);
+}
+
+static bool access_zcr_el2(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned int vq;
+
+ if (guest_hyp_sve_traps_enabled(vcpu)) {
+ kvm_inject_nested_sve_trap(vcpu);
+ return false;
+ }
+
+ if (!p->is_write) {
+ p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
+ return true;
+ }
+
+ vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
+ vq = min(vq, vcpu_sve_max_vq(vcpu));
+ __vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
+ return true;
+}
+
+static bool access_gic_vtr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = kvm_get_guest_vtr_el2();
+
+ return true;
+}
+
+static bool access_gic_misr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_misr(vcpu);
+
+ return true;
+}
+
+static bool access_gic_eisr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_eisr(vcpu);
+
+ return true;
+}
+
+static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_elrsr(vcpu);
+
+ return true;
+}
+
+static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_s1poe(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, s1poe_visibility);
+}
+
+static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_tcr2(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, tcr2_visibility);
+}
+
+static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (el2_visibility(vcpu, rd) == 0 &&
+ kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_s1pie(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return __el2_visibility(vcpu, rd, s1pie_visibility);
+}
+
+static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (vcpu_has_nv(vcpu) &&
+ !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static bool access_mdcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
+
+ if (!p->is_write) {
+ p->regval = old;
+ return true;
+ }
+
+ val = p->regval;
+ hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
+
+ /*
+ * If HPMN is out of bounds, limit it to what we actually
+ * support. This matches the UNKNOWN definition of the field
+ * in that case, and keeps the emulation simple. Sort of.
+ */
+ if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
+ hpmn = vcpu->kvm->arch.nr_pmu_counters;
+ u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
+ }
+
+ __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
+
+ /*
+ * Request a reload of the PMU to enable/disable the counters
+ * affected by HPME.
+ */
+ if ((old ^ val) & MDCR_EL2_HPME)
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
+ return true;
+}
+
+static bool access_ras(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ switch(reg_to_encoding(r)) {
+ case SYS_ERXPFGCDN_EL1:
+ case SYS_ERXPFGCTL_EL1:
+ case SYS_ERXPFGF_EL1:
+ case SYS_ERXMISC2_EL1:
+ case SYS_ERXMISC3_EL1:
+ if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
+ (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
+ kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+ break;
+ default:
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+ }
+
+ return trap_raz_wi(vcpu, p, r);
+}
+
+/*
+ * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
+ * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
+ * The values made visible to userspace were the register values of the boot
+ * CPU.
+ *
+ * At the same time, reads from these registers at EL1 previously were not
+ * trapped, allowing the guest to read the actual hardware value. On big-little
+ * machines, this means the VM can see different values depending on where a
+ * given vCPU got scheduled.
+ *
+ * These registers are now trapped as collateral damage from SME, and what
+ * follows attempts to give a user / guest view consistent with the existing
+ * ABI.
+ */
+static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ /*
+ * Return the VM-scoped implementation ID register values if userspace
+ * has made them writable.
+ */
+ if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
+ return access_id_reg(vcpu, p, r);
+
+ /*
+ * Otherwise, fall back to the old behavior of returning the value of
+ * the current CPU.
+ */
+ switch (reg_to_encoding(r)) {
+ case SYS_REVIDR_EL1:
+ p->regval = read_sysreg(revidr_el1);
+ break;
+ case SYS_AIDR_EL1:
+ p->regval = read_sysreg(aidr_el1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return true;
+}
+
+static u64 __ro_after_init boot_cpu_midr_val;
+static u64 __ro_after_init boot_cpu_revidr_val;
+static u64 __ro_after_init boot_cpu_aidr_val;
+
+static void init_imp_id_regs(void)
+{
+ boot_cpu_midr_val = read_sysreg(midr_el1);
+ boot_cpu_revidr_val = read_sysreg(revidr_el1);
+ boot_cpu_aidr_val = read_sysreg(aidr_el1);
+}
+
+static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ switch (reg_to_encoding(r)) {
+ case SYS_MIDR_EL1:
+ return boot_cpu_midr_val;
+ case SYS_REVIDR_EL1:
+ return boot_cpu_revidr_val;
+ case SYS_AIDR_EL1:
+ return boot_cpu_aidr_val;
+ default:
+ KVM_BUG_ON(1, vcpu->kvm);
+ return 0;
+ }
+}
+
+static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 expected;
+
+ guard(mutex)(&kvm->arch.config_lock);
+
+ expected = read_id_reg(vcpu, r);
+ if (expected == val)
+ return 0;
+
+ if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
+ return -EINVAL;
+
+ /*
+ * Once the VM has started the ID registers are immutable. Reject the
+ * write if userspace tries to change it.
+ */
+ if (kvm_vm_has_ran_once(kvm))
+ return -EBUSY;
+
+ /*
+ * Any value is allowed for the implementation ID registers so long as
+ * it is within the writable mask.
+ */
+ if ((val & r->val) != val)
+ return -EINVAL;
+
+ kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
+ return 0;
+}
+
+#define IMPLEMENTATION_ID(reg, mask) { \
+ SYS_DESC(SYS_##reg), \
+ .access = access_imp_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_imp_id_reg, \
+ .reset = reset_imp_id_reg, \
+ .val = mask, \
+ }
+
+static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
+ return vcpu->kvm->arch.nr_pmu_counters;
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -1917,16 +3100,6 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
* guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
- { SYS_DESC(SYS_DC_ISW), access_dcsw },
- { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CSW), access_dcsw },
- { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CISW), access_dcsw },
- { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
-
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
@@ -1961,9 +3134,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
// DBGDTR[TR]X_EL0 share the same encoding
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
- { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
+ { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
+ IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+ IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
/*
* ID regs: all ID_SANITISED() entries here must have corresponding
@@ -1972,64 +3147,89 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 mappings of the AArch32 ID registers */
/* CRm=1 */
- AA32_ID_SANITISED(ID_PFR0_EL1),
- AA32_ID_SANITISED(ID_PFR1_EL1),
+ AA32_ID_WRITABLE(ID_PFR0_EL1),
+ AA32_ID_WRITABLE(ID_PFR1_EL1),
{ SYS_DESC(SYS_ID_DFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_dfr0_el1,
.visibility = aa32_id_visibility,
.reset = read_sanitised_id_dfr0_el1,
- .val = ID_DFR0_EL1_PerfMon_MASK, },
+ .val = GENMASK(31, 0) },
ID_HIDDEN(ID_AFR0_EL1),
- AA32_ID_SANITISED(ID_MMFR0_EL1),
- AA32_ID_SANITISED(ID_MMFR1_EL1),
- AA32_ID_SANITISED(ID_MMFR2_EL1),
- AA32_ID_SANITISED(ID_MMFR3_EL1),
+ AA32_ID_WRITABLE(ID_MMFR0_EL1),
+ AA32_ID_WRITABLE(ID_MMFR1_EL1),
+ AA32_ID_WRITABLE(ID_MMFR2_EL1),
+ AA32_ID_WRITABLE(ID_MMFR3_EL1),
/* CRm=2 */
- AA32_ID_SANITISED(ID_ISAR0_EL1),
- AA32_ID_SANITISED(ID_ISAR1_EL1),
- AA32_ID_SANITISED(ID_ISAR2_EL1),
- AA32_ID_SANITISED(ID_ISAR3_EL1),
- AA32_ID_SANITISED(ID_ISAR4_EL1),
- AA32_ID_SANITISED(ID_ISAR5_EL1),
- AA32_ID_SANITISED(ID_MMFR4_EL1),
- AA32_ID_SANITISED(ID_ISAR6_EL1),
+ AA32_ID_WRITABLE(ID_ISAR0_EL1),
+ AA32_ID_WRITABLE(ID_ISAR1_EL1),
+ AA32_ID_WRITABLE(ID_ISAR2_EL1),
+ AA32_ID_WRITABLE(ID_ISAR3_EL1),
+ AA32_ID_WRITABLE(ID_ISAR4_EL1),
+ AA32_ID_WRITABLE(ID_ISAR5_EL1),
+ AA32_ID_WRITABLE(ID_MMFR4_EL1),
+ AA32_ID_WRITABLE(ID_ISAR6_EL1),
/* CRm=3 */
- AA32_ID_SANITISED(MVFR0_EL1),
- AA32_ID_SANITISED(MVFR1_EL1),
- AA32_ID_SANITISED(MVFR2_EL1),
+ AA32_ID_WRITABLE(MVFR0_EL1),
+ AA32_ID_WRITABLE(MVFR1_EL1),
+ AA32_ID_WRITABLE(MVFR2_EL1),
ID_UNALLOCATED(3,3),
- AA32_ID_SANITISED(ID_PFR2_EL1),
+ AA32_ID_WRITABLE(ID_PFR2_EL1),
ID_HIDDEN(ID_DFR1_EL1),
- AA32_ID_SANITISED(ID_MMFR5_EL1),
+ AA32_ID_WRITABLE(ID_MMFR5_EL1),
ID_UNALLOCATED(3,7),
/* AArch64 ID registers */
/* CRm=4 */
- { SYS_DESC(SYS_ID_AA64PFR0_EL1),
- .access = access_id_reg,
- .get_user = get_id_reg,
- .set_user = set_id_reg,
- .reset = read_sanitised_id_aa64pfr0_el1,
- .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
- ID_SANITISED(ID_AA64PFR1_EL1),
- ID_UNALLOCATED(4,2),
+ ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
+ ~(ID_AA64PFR0_EL1_AMU |
+ ID_AA64PFR0_EL1_MPAM |
+ ID_AA64PFR0_EL1_SVE |
+ ID_AA64PFR0_EL1_AdvSIMD |
+ ID_AA64PFR0_EL1_FP)),
+ ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
+ ~(ID_AA64PFR1_EL1_PFAR |
+ ID_AA64PFR1_EL1_MTEX |
+ ID_AA64PFR1_EL1_THE |
+ ID_AA64PFR1_EL1_GCS |
+ ID_AA64PFR1_EL1_MTE_frac |
+ ID_AA64PFR1_EL1_NMI |
+ ID_AA64PFR1_EL1_RNDR_trap |
+ ID_AA64PFR1_EL1_SME |
+ ID_AA64PFR1_EL1_RES0 |
+ ID_AA64PFR1_EL1_MPAM_frac |
+ ID_AA64PFR1_EL1_MTE)),
+ ID_WRITABLE(ID_AA64PFR2_EL1,
+ ID_AA64PFR2_EL1_FPMR |
+ ID_AA64PFR2_EL1_MTEFAR |
+ ID_AA64PFR2_EL1_MTESTOREONLY),
ID_UNALLOCATED(4,3),
- ID_SANITISED(ID_AA64ZFR0_EL1),
+ ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
ID_HIDDEN(ID_AA64SMFR0_EL1),
ID_UNALLOCATED(4,6),
- ID_UNALLOCATED(4,7),
+ ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
/* CRm=5 */
- { SYS_DESC(SYS_ID_AA64DFR0_EL1),
- .access = access_id_reg,
- .get_user = get_id_reg,
- .set_user = set_id_aa64dfr0_el1,
- .reset = read_sanitised_id_aa64dfr0_el1,
- .val = ID_AA64DFR0_EL1_PMUVer_MASK, },
+ /*
+ * Prior to FEAT_Debugv8.9, the architecture defines context-aware
+ * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
+ * KVM does not trap + emulate the breakpoint registers, and as such
+ * cannot support a layout that misaligns with the underlying hardware.
+ * While it may be possible to describe a subset that aligns with
+ * hardware, just prevent changes to BRPs and CTX_CMPs altogether for
+ * simplicity.
+ *
+ * See DDI0487K.a, section D2.8.3 Breakpoint types and linking
+ * of breakpoints for more details.
+ */
+ ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
+ ID_AA64DFR0_EL1_DoubleLock_MASK |
+ ID_AA64DFR0_EL1_WRPs_MASK |
+ ID_AA64DFR0_EL1_PMUVer_MASK |
+ ID_AA64DFR0_EL1_DebugVer_MASK),
ID_SANITISED(ID_AA64DFR1_EL1),
ID_UNALLOCATED(5,2),
ID_UNALLOCATED(5,3),
@@ -2039,21 +3239,42 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(5,7),
/* CRm=6 */
- ID_SANITISED(ID_AA64ISAR0_EL1),
- ID_SANITISED(ID_AA64ISAR1_EL1),
- ID_SANITISED(ID_AA64ISAR2_EL1),
- ID_UNALLOCATED(6,3),
+ ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
+ ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
+ ID_AA64ISAR1_EL1_GPA |
+ ID_AA64ISAR1_EL1_API |
+ ID_AA64ISAR1_EL1_APA)),
+ ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
+ ID_AA64ISAR2_EL1_APA3 |
+ ID_AA64ISAR2_EL1_GPA3)),
+ ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
+ ID_AA64ISAR3_EL1_LSFE |
+ ID_AA64ISAR3_EL1_FAMINMAX)),
ID_UNALLOCATED(6,4),
ID_UNALLOCATED(6,5),
ID_UNALLOCATED(6,6),
ID_UNALLOCATED(6,7),
/* CRm=7 */
- ID_SANITISED(ID_AA64MMFR0_EL1),
- ID_SANITISED(ID_AA64MMFR1_EL1),
- ID_SANITISED(ID_AA64MMFR2_EL1),
- ID_SANITISED(ID_AA64MMFR3_EL1),
- ID_UNALLOCATED(7,4),
+ ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
+ ~(ID_AA64MMFR0_EL1_RES0 |
+ ID_AA64MMFR0_EL1_ASIDBITS)),
+ ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
+ ID_AA64MMFR1_EL1_XNX |
+ ID_AA64MMFR1_EL1_VH |
+ ID_AA64MMFR1_EL1_VMIDBits)),
+ ID_FILTERED(ID_AA64MMFR2_EL1,
+ id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
+ ID_AA64MMFR2_EL1_EVT |
+ ID_AA64MMFR2_EL1_FWB |
+ ID_AA64MMFR2_EL1_IDS |
+ ID_AA64MMFR2_EL1_NV |
+ ID_AA64MMFR2_EL1_CCIDX)),
+ ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
+ ID_AA64MMFR3_EL1_SCTLRX |
+ ID_AA64MMFR3_EL1_S1PIE |
+ ID_AA64MMFR3_EL1_S1POE)),
+ ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
ID_UNALLOCATED(7,5),
ID_UNALLOCATED(7,6),
ID_UNALLOCATED(7,7),
@@ -2061,6 +3282,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+ { SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
+ .visibility = sctlr2_visibility },
MTE_REG(RGSR_EL1),
MTE_REG(GCR_EL1),
@@ -2072,7 +3295,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
- { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
+ { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
+ .visibility = tcr2_visibility },
PTRAUTH_KEY(APIA),
PTRAUTH_KEY(APIB),
@@ -2083,18 +3307,25 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
{ SYS_DESC(SYS_ELR_EL1), access_elr},
+ { SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
+
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
- { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
- { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERRIDR_EL1), access_ras },
+ { SYS_DESC(SYS_ERRSELR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXFR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
+ { SYS_DESC(SYS_ERXADDR_EL1), access_ras },
+ { SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
+ { SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
+ { SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
+ { SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
MTE_REG(TFSR_EL1),
MTE_REG(TFSRE0_EL1),
@@ -2113,65 +3344,100 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
/* PMBIDR_EL1 is not trapped */
{ PMU_SYS_REG(PMINTENSET_EL1),
- .access = access_pminten, .reg = PMINTENSET_EL1 },
+ .access = access_pminten, .reg = PMINTENSET_EL1,
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMINTENCLR_EL1),
- .access = access_pminten, .reg = PMINTENSET_EL1 },
+ .access = access_pminten, .reg = PMINTENSET_EL1,
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
- { SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
- { SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
+ { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
+ .visibility = s1pie_visibility },
+ { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
+ .visibility = s1pie_visibility },
+ { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
+ .visibility = s1poe_visibility },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
+ { SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
+ { SYS_DESC(SYS_MPAM1_EL1), undef_access },
+ { SYS_DESC(SYS_MPAM0_EL1), undef_access },
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
- { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
+ { SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
- { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+ { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
+ { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
+
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
- .set_user = set_clidr },
+ .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
+ IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
- { SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_SVCR), undef_access },
-
- { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
- .reset = reset_pmcr, .reg = PMCR_EL0 },
+ ID_FILTERED(CTR_EL0, ctr_el0,
+ CTR_EL0_DIC_MASK |
+ CTR_EL0_IDC_MASK |
+ CTR_EL0_DminLine_MASK |
+ CTR_EL0_L1Ip_MASK |
+ CTR_EL0_IminLine_MASK),
+ { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
+ { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
+
+ { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
+ .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
{ PMU_SYS_REG(PMCNTENSET_EL0),
- .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0,
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMCNTENCLR_EL0),
- .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0,
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMOVSCLR_EL0),
- .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ .access = access_pmovs, .reg = PMOVSSET_EL0,
+ .get_user = get_pmreg, .set_user = set_pmreg },
/*
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
* previously (and pointlessly) advertised in the past...
@@ -2187,7 +3453,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(PMCCNTR_EL0),
.access = access_pmu_evcntr, .reset = reset_unknown,
- .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+ .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
+ .set_user = set_pmu_evcntr },
{ PMU_SYS_REG(PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(PMXEVCNTR_EL0),
@@ -2199,8 +3466,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
{ PMU_SYS_REG(PMOVSSET_EL0),
- .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ .access = access_pmovs, .reg = PMOVSSET_EL0,
+ .get_user = get_pmreg, .set_user = set_pmreg },
+ { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
+ .visibility = s1poe_visibility },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
@@ -2280,11 +3550,19 @@ static const struct sys_reg_desc sys_reg_descs[] = {
AMU_AMEVTYPER1_EL0(14),
AMU_AMEVTYPER1_EL0(15),
- { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
+ .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
+ { SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
+ .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
- { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
- { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
+ TIMER_REG(CNTP_CTL_EL0, NULL),
+ TIMER_REG(CNTP_CVAL_EL0, NULL),
+
+ { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
+ TIMER_REG(CNTV_CTL_EL0, NULL),
+ TIMER_REG(CNTV_CVAL_EL0, NULL),
/* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0),
@@ -2357,70 +3635,648 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
- EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
- EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
+ EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
+ EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
- EL2_REG(HCR_EL2, access_rw, reset_val, 0),
- EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
+ EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
+ sctlr2_el2_visibility),
+ EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
+ EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
- EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
- EL2_REG(HACR_EL2, access_rw, reset_val, 0),
+ EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
+ EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
+ EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
+ EL2_REG_VNCR(HACR_EL2, reset_val, 0),
+
+ EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
+ sve_el2_visibility),
+
+ EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
- EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
- EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
-
- { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
- EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
- EL2_REG(ELR_EL2, access_rw, reset_val, 0),
+ EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
+ tcr2_el2_visibility),
+ EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
+ EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
+ EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
+ vncr_el2_visibility),
+
+ { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
+ EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
+ EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
+ EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
+ EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
+ EL2_REG_REDIR(ELR_EL2, reset_val, 0),
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
- { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
+ /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
+ { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
+
+ { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
- EL2_REG(ESR_EL2, access_rw, reset_val, 0),
- { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
+ EL2_REG_REDIR(ESR_EL2, reset_val, 0),
+ EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
+ { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
- EL2_REG(FAR_EL2, access_rw, reset_val, 0),
+ EL2_REG_REDIR(FAR_EL2, reset_val, 0),
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
+ EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
+ s1pie_el2_visibility),
+ EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
+ s1pie_el2_visibility),
+ EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
+ s1poe_el2_visibility),
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
+ { SYS_DESC(SYS_MPAM2_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
+ { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
- EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
- { SYS_DESC(SYS_RMR_EL2), trap_undef },
+ { SYS_DESC(SYS_RVBAR_EL2), undef_access },
+ { SYS_DESC(SYS_RMR_EL2), undef_access },
+ EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
+
+ EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
+
+ { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
+
+ EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
+ { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
+ { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
+ { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
+ { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
+ EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
+
+ EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
+ EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
- EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
+ EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
+ TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
+ TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
+
+ { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
+ TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
+ TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
+
+ { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
- EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
- EL12_REG(CPACR, access_rw, reset_val, 0),
- EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
- EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
- EL12_REG(TCR, access_vm_reg, reset_val, 0),
- { SYS_DESC(SYS_SPSR_EL12), access_spsr},
- { SYS_DESC(SYS_ELR_EL12), access_elr},
- EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
- EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
- EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
- EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
- EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
- EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
- EL12_REG(VBAR, access_rw, reset_val, 0),
- EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
- EL12_REG(CNTKCTL, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
+
+ { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
-static const struct sys_reg_desc *first_idreg;
+static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ if (__kvm_at_s1e01(vcpu, op, p->regval))
+ return false;
+
+ return true;
+}
+
+static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ /* There is no FGT associated with AT S1E2A :-( */
+ if (op == OP_AT_S1E2A &&
+ !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+
+ if (__kvm_at_s1e2(vcpu, op, p->regval))
+ return false;
+
+ return true;
+}
+
+static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ if (__kvm_at_s12(vcpu, op, p->regval))
+ return false;
+
+ return true;
+}
+
+static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
+{
+ struct kvm *kvm = vpcu->kvm;
+ u8 CRm = sys_reg_CRm(instr);
+
+ if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
+ !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
+ return false;
+
+ if (CRm == TLBI_CRm_nROS &&
+ !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ return false;
+
+ return true;
+}
+
+static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ write_lock(&vcpu->kvm->mmu_lock);
+
+ /*
+ * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
+ * corresponding VMIDs.
+ */
+ kvm_nested_s2_unmap(vcpu->kvm, true);
+
+ write_unlock(&vcpu->kvm->mmu_lock);
+
+ return true;
+}
+
+static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
+{
+ struct kvm *kvm = vpcu->kvm;
+ u8 CRm = sys_reg_CRm(instr);
+ u8 Op2 = sys_reg_Op2(instr);
+
+ if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
+ !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
+ return false;
+
+ if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
+ !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ return false;
+
+ if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
+ !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ return false;
+
+ if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
+ !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ return false;
+
+ return true;
+}
+
+/* Only defined here as this is an internal "abstraction" */
+union tlbi_info {
+ struct {
+ u64 start;
+ u64 size;
+ } range;
+
+ struct {
+ u64 addr;
+ } ipa;
+
+ struct {
+ u64 addr;
+ u32 encoding;
+ } va;
+};
+
+static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
+ const union tlbi_info *info)
+{
+ /*
+ * The unmap operation is allowed to drop the MMU lock and block, which
+ * means that @mmu could be used for a different context than the one
+ * currently being invalidated.
+ *
+ * This behavior is still safe, as:
+ *
+ * 1) The vCPU(s) that recycled the MMU are responsible for invalidating
+ * the entire MMU before reusing it, which still honors the intent
+ * of a TLBI.
+ *
+ * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
+ * and ERET to the guest), other vCPUs are allowed to use stale
+ * translations.
+ *
+ * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
+ * at worst may cause more aborts for shadow stage-2 fills.
+ *
+ * Dropping the MMU lock also implies that shadow stage-2 fills could
+ * happen behind the back of the TLBI. This is still safe, though, as
+ * the L1 needs to put its stage-2 in a consistent state before doing
+ * the TLBI.
+ */
+ kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
+}
+
+static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+ u64 limit, vttbr;
+
+ if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+ limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
+
+ kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+ &(union tlbi_info) {
+ .range = {
+ .start = 0,
+ .size = limit,
+ },
+ },
+ s2_mmu_unmap_range);
+
+ return true;
+}
+
+static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+ u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+ u64 base, range;
+
+ if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ /*
+ * Because the shadow S2 structure doesn't necessarily reflect that
+ * of the guest's S2 (different base granule size, for example), we
+ * decide to ignore TTL and only use the described range.
+ */
+ base = decode_range_tlbi(p->regval, &range, NULL);
+
+ kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+ &(union tlbi_info) {
+ .range = {
+ .start = base,
+ .size = range,
+ },
+ },
+ s2_mmu_unmap_range);
+
+ return true;
+}
+
+static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
+ const union tlbi_info *info)
+{
+ unsigned long max_size;
+ u64 base_addr;
+
+ /*
+ * We drop a number of things from the supplied value:
+ *
+ * - NS bit: we're non-secure only.
+ *
+ * - IPA[51:48]: We don't support 52bit IPA just yet...
+ *
+ * And of course, adjust the IPA to be on an actual address.
+ */
+ base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
+ max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
+ base_addr &= ~(max_size - 1);
+
+ /*
+ * See comment in s2_mmu_unmap_range() for why this is allowed to
+ * reschedule.
+ */
+ kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
+}
+
+static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+ u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+
+ if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+ &(union tlbi_info) {
+ .ipa = {
+ .addr = p->regval,
+ },
+ },
+ s2_mmu_unmap_ipa);
+
+ return true;
+}
+
+static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
+ const union tlbi_info *info)
+{
+ WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
+}
+
+static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
+ return true;
+}
+
+static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ /*
+ * If we're here, this is because we've trapped on a EL1 TLBI
+ * instruction that affects the EL1 translation regime while
+ * we're running in a context that doesn't allow us to let the
+ * HW do its thing (aka vEL2):
+ *
+ * - HCR_EL2.E2H == 0 : a non-VHE guest
+ * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
+ *
+ * Another possibility is that we are invalidating the EL2 context
+ * using EL1 instructions, but that we landed here because we need
+ * additional invalidation for structures that are not held in the
+ * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
+ * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
+ * as we don't allow an NV-capable L1 in a nVHE configuration.
+ *
+ * We don't expect these helpers to ever be called when running
+ * in a vEL1 context.
+ */
+
+ WARN_ON(!vcpu_is_el2(vcpu));
+
+ if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
+
+ if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
+ kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
+ return true;
+ }
+
+ kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
+ get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
+ &(union tlbi_info) {
+ .va = {
+ .addr = p->regval,
+ .encoding = sys_encoding,
+ },
+ },
+ s2_mmu_tlbi_s1e1);
+
+ return true;
+}
+
+#define SYS_INSN(insn, access_fn) \
+ { \
+ SYS_DESC(OP_##insn), \
+ .access = (access_fn), \
+ }
+
+static struct sys_reg_desc sys_insn_descs[] = {
+ { SYS_DESC(SYS_DC_ISW), access_dcsw },
+ { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
+
+ SYS_INSN(AT_S1E1R, handle_at_s1e01),
+ SYS_INSN(AT_S1E1W, handle_at_s1e01),
+ SYS_INSN(AT_S1E0R, handle_at_s1e01),
+ SYS_INSN(AT_S1E0W, handle_at_s1e01),
+ SYS_INSN(AT_S1E1RP, handle_at_s1e01),
+ SYS_INSN(AT_S1E1WP, handle_at_s1e01),
+
+ { SYS_DESC(SYS_DC_CSW), access_dcsw },
+ { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CISW), access_dcsw },
+ { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
+
+ SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
+
+ SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
+ SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
+
+ SYS_INSN(AT_S1E2R, handle_at_s1e2),
+ SYS_INSN(AT_S1E2W, handle_at_s1e2),
+ SYS_INSN(AT_S12E1R, handle_at_s12),
+ SYS_INSN(AT_S12E1W, handle_at_s12),
+ SYS_INSN(AT_S12E0R, handle_at_s12),
+ SYS_INSN(AT_S12E0W, handle_at_s12),
+ SYS_INSN(AT_S1E2A, handle_at_s1e2),
+
+ SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
+
+ SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
+ SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
+
+ SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
+
+ SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
+
+ SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
+
+ SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
+ SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
+ SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
+ SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
+
+ SYS_INSN(TLBI_ALLE1, handle_alle1is),
+
+ SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
+
+ SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
+
+ SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
+
+ SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
+ SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
+
+ SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
+
+ SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
+ SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
+ SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
+ SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
+ SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
+ SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
+ SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
+ SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
+};
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
@@ -2429,14 +4285,14 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
- u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
- u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
-
- p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
- (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
- (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
- | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
+ u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
+ u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
+
+ p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
+ (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
+ (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
+ (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
+ (1 << 15) | (el3 << 14) | (el3 << 12));
return true;
}
}
@@ -2450,18 +4306,20 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
* None of the other registers share their location, so treat them as
* if they were 64bit.
*/
-#define DBG_BCR_BVR_WCR_WVR(n) \
- /* DBGBVRn */ \
- { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
- /* DBGBCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
- /* DBGWVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
- /* DBGWCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
-
-#define DBGBXVR(n) \
- { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
+ trap_dbg_wb_reg, NULL, n }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
+
+#define DBGBXVR(n) \
+ { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
+ trap_dbg_wb_reg, NULL, n }
/*
* Trapped cp14 registers. We generally ignore most of the external
@@ -2596,6 +4454,7 @@ static const struct sys_reg_desc cp15_regs[] = {
/* TTBCR2 */
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
+ { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
/* DFSR */
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
@@ -2645,8 +4504,28 @@ static const struct sys_reg_desc cp15_regs[] = {
/* AMAIR1 */
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
- /* ICC_SRE */
- { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
+ { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
+ { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+ { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
@@ -2737,24 +4616,28 @@ static const struct sys_reg_desc cp15_64_regs[] = {
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
+ { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
+ { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
};
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
- bool is_32)
+ bool reset_check)
{
unsigned int i;
for (i = 0; i < n; i++) {
- if (!is_32 && table[i].reg && !table[i].reset) {
- kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
+ if (reset_check && table[i].reg && !table[i].reset) {
+ kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
+ &table[i], i, table[i].name);
return false;
}
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
- kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
+ kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
+ &table[i], i, table[i - 1].name, table[i].name);
return false;
}
}
@@ -2851,7 +4734,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *global,
@@ -2940,7 +4824,7 @@ static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
return true;
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
- params->is_write ? "write" : "read", reg_id);
+ str_write_read(params->is_write), reg_id);
return false;
}
@@ -3018,7 +4902,9 @@ static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @params: &struct sys_reg_params
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
@@ -3054,9 +4940,13 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
* Certain AArch32 ID registers are handled by rerouting to the AArch64
* system register table. Registers in the ID range where CRm=0 are
* excluded from this scheme as they do not trivially map into AArch64
- * system register encodings.
+ * system register encodings, except for AIDR/REVIDR.
*/
- if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
+ if (params.Op1 == 0 && params.CRn == 0 &&
+ (params.CRm || params.Op2 == 6 /* REVIDR */))
+ return kvm_emulate_cp15_id_reg(vcpu, &params);
+ if (params.Op1 == 1 && params.CRn == 0 &&
+ params.CRm == 0 && params.Op2 == 7 /* AIDR */)
return kvm_emulate_cp15_id_reg(vcpu, &params);
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
@@ -3076,12 +4966,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
}
-static bool is_imp_def_sys_reg(struct sys_reg_params *params)
-{
- // See ARM DDI 0487E.a, section D12.3.2
- return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
-}
-
/**
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
* @vcpu: The VCPU pointer
@@ -3090,48 +4974,145 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
* Return: true if the system register access was successful, false otherwise.
*/
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
- struct sys_reg_params *params)
+ struct sys_reg_params *params)
{
const struct sys_reg_desc *r;
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
-
if (likely(r)) {
perform_access(vcpu, params, r);
return true;
}
- if (is_imp_def_sys_reg(params)) {
- kvm_inject_undefined(vcpu);
+ print_sys_reg_msg(params,
+ "Unsupported guest sys_reg access at: %lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
+ kvm_inject_undefined(vcpu);
+
+ return false;
+}
+
+static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
+{
+ unsigned long i, idreg_idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
+ const struct sys_reg_desc *r = &sys_reg_descs[i];
+
+ if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
+ continue;
+
+ if (idreg_idx == pos)
+ return r;
+
+ idreg_idx++;
+ }
+
+ return NULL;
+}
+
+static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+ u8 *iter;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ iter = &kvm->arch.idreg_debugfs_iter;
+ if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
+ *iter == (u8)~0) {
+ *iter = *pos;
+ if (!idregs_debug_find(kvm, *iter))
+ iter = NULL;
} else {
- print_sys_reg_msg(params,
- "Unsupported guest sys_reg access at: %lx [%08lx]\n",
- *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
- kvm_inject_undefined(vcpu);
+ iter = ERR_PTR(-EBUSY);
}
- return false;
+
+ mutex_unlock(&kvm->arch.config_lock);
+
+ return iter;
}
-static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
+static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
{
- const struct sys_reg_desc *idreg = first_idreg;
- u32 id = reg_to_encoding(idreg);
+ struct kvm *kvm = s->private;
+
+ (*pos)++;
+
+ if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
+ kvm->arch.idreg_debugfs_iter++;
+
+ return &kvm->arch.idreg_debugfs_iter;
+ }
+
+ return NULL;
+}
+
+static void idregs_debug_stop(struct seq_file *s, void *v)
+{
+ struct kvm *kvm = s->private;
+
+ if (IS_ERR(v))
+ return;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
+static int idregs_debug_show(struct seq_file *s, void *v)
+{
+ const struct sys_reg_desc *desc;
+ struct kvm *kvm = s->private;
+
+ desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
+
+ if (!desc->name)
+ return 0;
+
+ seq_printf(s, "%20s:\t%016llx\n",
+ desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
+
+ return 0;
+}
+
+static const struct seq_operations idregs_debug_sops = {
+ .start = idregs_debug_start,
+ .next = idregs_debug_next,
+ .stop = idregs_debug_stop,
+ .show = idregs_debug_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(idregs_debug);
+
+void kvm_sys_regs_create_debugfs(struct kvm *kvm)
+{
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
+ &idregs_debug_fops);
+}
+
+static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
+{
+ u32 id = reg_to_encoding(reg);
struct kvm *kvm = vcpu->kvm;
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
return;
- lockdep_assert_held(&kvm->arch.config_lock);
-
- /* Initialize all idregs */
- while (is_id_reg(id)) {
- IDREG(kvm, id) = idreg->reset(vcpu, idreg);
+ kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
+}
- idreg++;
- id = reg_to_encoding(idreg);
- }
+static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *reg)
+{
+ if (kvm_vcpu_initialized(vcpu))
+ return;
- set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+ reg->reset(vcpu, reg);
}
/**
@@ -3143,41 +5124,66 @@ static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
*/
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long i;
- kvm_reset_id_regs(vcpu);
-
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
const struct sys_reg_desc *r = &sys_reg_descs[i];
- if (is_id_reg(reg_to_encoding(r)))
+ if (!r->reset)
continue;
- if (r->reset)
+ if (is_vm_ftr_id_reg(reg_to_encoding(r)))
+ reset_vm_ftr_id_reg(vcpu, r);
+ else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
+ reset_vcpu_ftr_id_reg(vcpu, r);
+ else
r->reset(vcpu, r);
+
+ if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
+ __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
}
+
+ set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
}
/**
- * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
+ * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
+ * trap on a guest execution
* @vcpu: The VCPU pointer
*/
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
{
+ const struct sys_reg_desc *desc = NULL;
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int sr_idx;
trace_kvm_handle_sys_reg(esr);
+ if (triage_sysreg_trap(vcpu, &sr_idx))
+ return 1;
+
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
- if (!emulate_sys_reg(vcpu, &params))
- return 1;
+ /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
+ if (params.Op0 == 2 || params.Op0 == 3)
+ desc = &sys_reg_descs[sr_idx];
+ else
+ desc = &sys_insn_descs[sr_idx];
+
+ perform_access(vcpu, &params, desc);
- if (!params.is_write)
+ /* Read from system register? */
+ if (!params.is_write &&
+ (params.Op0 == 2 || params.Op0 == 3))
vcpu_set_reg(vcpu, Rt, params.regval);
+
return 1;
}
@@ -3247,72 +5253,6 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-/*
- * These are the invariant sys_reg registers: we let the guest see the
- * host versions of these, so they're part of the guest state.
- *
- * A future CPU may provide a mechanism to present different values to
- * the guest, or a future kvm may trap them.
- */
-
-#define FUNCTION_INVARIANT(reg) \
- static u64 get_##reg(struct kvm_vcpu *v, \
- const struct sys_reg_desc *r) \
- { \
- ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
- return ((struct sys_reg_desc *)r)->val; \
- }
-
-FUNCTION_INVARIANT(midr_el1)
-FUNCTION_INVARIANT(revidr_el1)
-FUNCTION_INVARIANT(aidr_el1)
-
-static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
-{
- ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
- return ((struct sys_reg_desc *)r)->val;
-}
-
-/* ->val is filled in by kvm_sys_reg_table_init() */
-static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
- { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
- { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
- { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
- { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
-};
-
-static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- return put_user(r->val, uaddr);
-}
-
-static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
- u64 val;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- if (get_user(val, uaddr))
- return -EFAULT;
-
- /* This is what we mean by invariant: you can't change it. */
- if (r->val != val)
- return -EINVAL;
-
- return 0;
-}
-
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
{
u32 val;
@@ -3366,16 +5306,29 @@ static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
}
}
+static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
+{
+ switch(reg->id) {
+ case KVM_REG_ARM_TIMER_CVAL:
+ return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
+ case KVM_REG_ARM_TIMER_CNT:
+ return TO_ARM64_SYS_REG(CNTVCT_EL0);
+ default:
+ return reg->id;
+ }
+}
+
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num)
{
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r;
+ u64 id = kvm_one_reg_to_id(reg);
u64 val;
int ret;
- r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
- if (!r || sysreg_hidden_user(vcpu, r))
+ r = id_to_sys_reg_desc(vcpu, id, table, num);
+ if (!r || sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->get_user) {
@@ -3394,15 +5347,10 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(vcpu, reg->id, uaddr);
- err = get_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_get_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -3412,14 +5360,15 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
{
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r;
+ u64 id = kvm_one_reg_to_id(reg);
u64 val;
int ret;
if (get_user(val, uaddr))
return -EFAULT;
- r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
- if (!r || sysreg_hidden_user(vcpu, r))
+ r = id_to_sys_reg_desc(vcpu, id, table, num);
+ if (!r || sysreg_hidden(vcpu, r))
return -ENOENT;
if (sysreg_user_write_ignore(vcpu, r))
@@ -3428,7 +5377,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
if (r->set_user) {
ret = (r->set_user)(vcpu, r, val);
} else {
- __vcpu_sys_reg(vcpu, r->reg) = val;
+ __vcpu_assign_sys_reg(vcpu, r->reg, val);
ret = 0;
}
@@ -3438,15 +5387,10 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(vcpu, reg->id, uaddr);
- err = set_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_set_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -3483,10 +5427,23 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
{
+ u64 idx;
+
if (!*uind)
return true;
- if (put_user(sys_reg_to_index(reg), *uind))
+ switch (reg_to_encoding(reg)) {
+ case SYS_CNTV_CVAL_EL0:
+ idx = KVM_REG_ARM_TIMER_CVAL;
+ break;
+ case SYS_CNTVCT_EL0:
+ idx = KVM_REG_ARM_TIMER_CNT;
+ break;
+ default:
+ idx = sys_reg_to_index(reg);
+ }
+
+ if (put_user(idx, *uind))
return false;
(*uind)++;
@@ -3505,7 +5462,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
if (!(rd->reg || rd->get_user))
return 0;
- if (sysreg_hidden_user(vcpu, rd))
+ if (sysreg_hidden(vcpu, rd))
return 0;
if (!copy_reg_to_user(rd, uind))
@@ -3535,23 +5492,14 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
{
- return ARRAY_SIZE(invariant_sys_regs)
- + num_demux_regs()
+ return num_demux_regs()
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
}
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
int err;
- /* Then give them all the invariant registers' indices. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
- if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
- return -EFAULT;
- uindices++;
- }
-
err = walk_sys_regs(vcpu, uindices);
if (err < 0)
return err;
@@ -3560,32 +5508,179 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return write_demux_regids(uindices);
}
+#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
+ KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
+ sys_reg_Op1(r), \
+ sys_reg_CRn(r), \
+ sys_reg_CRm(r), \
+ sys_reg_Op2(r))
+
+int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
+{
+ const void *zero_page = page_to_virt(ZERO_PAGE(0));
+ u64 __user *masks = (u64 __user *)range->addr;
+
+ /* Only feature id range is supported, reserved[13] must be zero. */
+ if (range->range ||
+ memcmp(range->reserved, zero_page, sizeof(range->reserved)))
+ return -EINVAL;
+
+ /* Wipe the whole thing first */
+ if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
+ return -EFAULT;
+
+ for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
+ const struct sys_reg_desc *reg = &sys_reg_descs[i];
+ u32 encoding = reg_to_encoding(reg);
+ u64 val;
+
+ if (!is_feature_id_reg(encoding) || !reg->set_user)
+ continue;
+
+ if (!reg->val ||
+ (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
+ continue;
+ }
+ val = reg->val;
+
+ if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ if (has_vhe() || has_hvhe())
+ vcpu->arch.hcr_el2 |= HCR_E2H;
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
+ /* route synchronous external abort exceptions to EL2 */
+ vcpu->arch.hcr_el2 |= HCR_TEA;
+ /* trap error record accesses */
+ vcpu->arch.hcr_el2 |= HCR_TERR;
+ }
+
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+ vcpu->arch.hcr_el2 |= HCR_FWB;
+
+ if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+ !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
+ kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
+ vcpu->arch.hcr_el2 |= HCR_TID4;
+ else
+ vcpu->arch.hcr_el2 |= HCR_TID2;
+
+ if (vcpu_el1_is_32bit(vcpu))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+
+ if (kvm_has_mte(vcpu->kvm))
+ vcpu->arch.hcr_el2 |= HCR_ATA;
+
+ /*
+ * In the absence of FGT, we cannot independently trap TLBI
+ * Range instructions. This isn't great, but trapping all
+ * TLBIs would be far worse. Live with it...
+ */
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ vcpu->arch.hcr_el2 |= HCR_TTLBOS;
+}
+
+void kvm_calculate_traps(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+ vcpu_set_hcr(vcpu);
+ vcpu_set_ich_hcr(vcpu);
+ vcpu_set_hcrx(vcpu);
+
+ if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
+ goto out;
+
+ compute_fgu(kvm, HFGRTR_GROUP);
+ compute_fgu(kvm, HFGITR_GROUP);
+ compute_fgu(kvm, HDFGRTR_GROUP);
+ compute_fgu(kvm, HAFGRTR_GROUP);
+ compute_fgu(kvm, HFGRTR2_GROUP);
+ compute_fgu(kvm, HFGITR2_GROUP);
+ compute_fgu(kvm, HDFGRTR2_GROUP);
+
+ set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
+out:
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
+/*
+ * Perform last adjustments to the ID registers that are implied by the
+ * configuration outside of the ID regs themselves, as well as any
+ * initialisation that directly depend on these ID registers (such as
+ * RES0/RES1 behaviours). This is not the place to configure traps though.
+ *
+ * Because this can be called once per CPU, changes must be idempotent.
+ */
+int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ guard(mutex)(&kvm->arch.config_lock);
+
+ /*
+ * This hacks into the ID registers, so only perform it when the
+ * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
+ */
+ if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
+ u64 val;
+
+ val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
+ kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
+ val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
+ kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
+ }
+
+ if (vcpu_has_nv(vcpu)) {
+ int ret = kvm_init_nv_sysregs(vcpu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int __init kvm_sys_reg_table_init(void)
{
- struct sys_reg_params params;
+ const struct sys_reg_desc *gicv3_regs;
bool valid = true;
- unsigned int i;
+ unsigned int i, sz;
+ int ret = 0;
/* Make sure tables are unique and in order. */
- valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
- valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
- valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
- valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
- valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
- valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
+ valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
+ valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
+ valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
+ valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
+ valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
+ valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
+
+ gicv3_regs = vgic_v3_get_sysreg_table(&sz);
+ valid &= check_sysreg_table(gicv3_regs, sz, false);
if (!valid)
return -EINVAL;
- /* We abuse the reset function to overwrite the table itself. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
- invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
+ init_imp_id_regs();
- /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
- params = encoding_to_params(SYS_ID_PFR0_EL1);
- first_idreg = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
- if (!first_idreg)
- return -EINVAL;
+ ret = populate_nv_trap_config();
- return 0;
+ check_feature_map();
+
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
+ ret = populate_sysreg_config(sys_reg_descs + i, i);
+
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
+ ret = populate_sysreg_config(sys_insn_descs + i, i);
+
+ return ret;
}