summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/emulate-nested.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/emulate-nested.c')
-rw-r--r--arch/arm64/kvm/emulate-nested.c1323
1 files changed, 978 insertions, 345 deletions
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 431fd429932d..3a384e9660b8 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -16,9 +16,13 @@
enum trap_behaviour {
BEHAVE_HANDLE_LOCALLY = 0,
+
BEHAVE_FORWARD_READ = BIT(0),
BEHAVE_FORWARD_WRITE = BIT(1),
- BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+ BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+
+ /* Traps that take effect in Host EL0, this is rare! */
+ BEHAVE_FORWARD_IN_HOST_EL0 = BIT(2),
};
struct trap_bits {
@@ -79,25 +83,43 @@ enum cgt_group_id {
CGT_MDCR_E2TB,
CGT_MDCR_TDCC,
+ CGT_CPTR_TAM,
+ CGT_CPTR_TCPAC,
+
+ CGT_HCRX_EnFPM,
+ CGT_HCRX_TCR2En,
+
+ CGT_CNTHCTL_EL1TVT,
+ CGT_CNTHCTL_EL1TVCT,
+
+ CGT_ICH_HCR_TC,
+ CGT_ICH_HCR_TALL0,
+ CGT_ICH_HCR_TALL1,
+ CGT_ICH_HCR_TDIR,
+
/*
* Anything after this point is a combination of coarse trap
* controls, which must all be evaluated to decide what to do.
*/
__MULTIPLE_CONTROL_BITS__,
- CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_IMO_FMO_ICH_HCR_TC = __MULTIPLE_CONTROL_BITS__,
CGT_HCR_TID2_TID4,
CGT_HCR_TTLB_TTLBIS,
CGT_HCR_TTLB_TTLBOS,
CGT_HCR_TVM_TRVM,
+ CGT_HCR_TVM_TRVM_HCRX_TCR2En,
CGT_HCR_TPU_TICAB,
CGT_HCR_TPU_TOCU,
CGT_HCR_NV1_nNV2_ENSCXT,
CGT_MDCR_TPM_TPMCR,
+ CGT_MDCR_TPM_HPMN,
CGT_MDCR_TDE_TDA,
CGT_MDCR_TDE_TDOSA,
CGT_MDCR_TDE_TDRA,
CGT_MDCR_TDCC_TDE_TDA,
+ CGT_ICH_HCR_TC_TDIR,
+
/*
* Anything after this point requires a callback evaluating a
* complex trap condition. Ugly stuff.
@@ -105,6 +127,11 @@ enum cgt_group_id {
__COMPLEX_CONDITIONS__,
CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
CGT_CNTHCTL_EL1PTEN,
+ CGT_CNTHCTL_EL1NVPCT,
+ CGT_CNTHCTL_EL1NVVCT,
+
+ CGT_CPTR_TTA,
+ CGT_MDCR_HPMN,
/* Must be last */
__NR_CGT_GROUP_IDS__
@@ -121,7 +148,7 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TID2,
.mask = HCR_TID2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TID3] = {
.index = HCR_EL2,
@@ -145,37 +172,37 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TIDCP,
.mask = HCR_TIDCP,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TACR] = {
.index = HCR_EL2,
.value = HCR_TACR,
.mask = HCR_TACR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TSW] = {
.index = HCR_EL2,
.value = HCR_TSW,
.mask = HCR_TSW,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */
.index = HCR_EL2,
.value = HCR_TPC,
.mask = HCR_TPC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TPU] = {
.index = HCR_EL2,
.value = HCR_TPU,
.mask = HCR_TPU,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLB] = {
.index = HCR_EL2,
.value = HCR_TTLB,
.mask = HCR_TTLB,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TVM] = {
.index = HCR_EL2,
@@ -187,7 +214,7 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TDZ,
.mask = HCR_TDZ,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TRVM] = {
.index = HCR_EL2,
@@ -199,151 +226,213 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TLOR,
.mask = HCR_TLOR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TERR] = {
.index = HCR_EL2,
.value = HCR_TERR,
.mask = HCR_TERR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_APK] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_APK,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV] = {
.index = HCR_EL2,
.value = HCR_NV,
.mask = HCR_NV,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV_nNV2] = {
.index = HCR_EL2,
.value = HCR_NV,
.mask = HCR_NV | HCR_NV2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV1_nNV2] = {
.index = HCR_EL2,
.value = HCR_NV | HCR_NV1,
.mask = HCR_NV | HCR_NV1 | HCR_NV2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_AT] = {
.index = HCR_EL2,
.value = HCR_AT,
.mask = HCR_AT,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_nFIEN] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_FIEN,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TID4] = {
.index = HCR_EL2,
.value = HCR_TID4,
.mask = HCR_TID4,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TICAB] = {
.index = HCR_EL2,
.value = HCR_TICAB,
.mask = HCR_TICAB,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TOCU] = {
.index = HCR_EL2,
.value = HCR_TOCU,
.mask = HCR_TOCU,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_ENSCXT] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_ENSCXT,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLBIS] = {
.index = HCR_EL2,
.value = HCR_TTLBIS,
.mask = HCR_TTLBIS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLBOS] = {
.index = HCR_EL2,
.value = HCR_TTLBOS,
.mask = HCR_TTLBOS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TPMCR] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPMCR,
.mask = MDCR_EL2_TPMCR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW |
+ BEHAVE_FORWARD_IN_HOST_EL0,
},
[CGT_MDCR_TPM] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPM,
.mask = MDCR_EL2_TPM,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW |
+ BEHAVE_FORWARD_IN_HOST_EL0,
},
[CGT_MDCR_TDE] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDE,
.mask = MDCR_EL2_TDE,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDA,
.mask = MDCR_EL2_TDA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDOSA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDOSA,
.mask = MDCR_EL2_TDOSA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDRA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDRA,
.mask = MDCR_EL2_TDRA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_E2PB] = {
.index = MDCR_EL2,
.value = 0,
.mask = BIT(MDCR_EL2_E2PB_SHIFT),
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TPMS] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPMS,
.mask = MDCR_EL2_TPMS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TTRF] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TTRF,
.mask = MDCR_EL2_TTRF,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_E2TB] = {
.index = MDCR_EL2,
.value = 0,
.mask = BIT(MDCR_EL2_E2TB_SHIFT),
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDCC] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDCC,
.mask = MDCR_EL2_TDCC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_CPTR_TAM] = {
+ .index = CPTR_EL2,
+ .value = CPTR_EL2_TAM,
+ .mask = CPTR_EL2_TAM,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_CPTR_TCPAC] = {
+ .index = CPTR_EL2,
+ .value = CPTR_EL2_TCPAC,
+ .mask = CPTR_EL2_TCPAC,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_HCRX_EnFPM] = {
+ .index = HCRX_EL2,
+ .value = 0,
+ .mask = HCRX_EL2_EnFPM,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_HCRX_TCR2En] = {
+ .index = HCRX_EL2,
+ .value = 0,
+ .mask = HCRX_EL2_TCR2En,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_CNTHCTL_EL1TVT] = {
+ .index = CNTHCTL_EL2,
+ .value = CNTHCTL_EL1TVT,
+ .mask = CNTHCTL_EL1TVT,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_CNTHCTL_EL1TVCT] = {
+ .index = CNTHCTL_EL2,
+ .value = CNTHCTL_EL1TVCT,
+ .mask = CNTHCTL_EL1TVCT,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_ICH_HCR_TC] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_EL2_TC,
+ .mask = ICH_HCR_EL2_TC,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_ICH_HCR_TALL0] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_EL2_TALL0,
+ .mask = ICH_HCR_EL2_TALL0,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_ICH_HCR_TALL1] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_EL2_TALL1,
+ .mask = ICH_HCR_EL2_TALL1,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_ICH_HCR_TDIR] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_EL2_TDIR,
+ .mask = ICH_HCR_EL2_TDIR,
+ .behaviour = BEHAVE_FORWARD_RW,
},
};
@@ -354,19 +443,24 @@ static const struct trap_bits coarse_trap_bits[] = {
}
static const enum cgt_group_id *coarse_control_combo[] = {
- MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO),
MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4),
MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM),
+ MCB(CGT_HCR_TVM_TRVM_HCRX_TCR2En,
+ CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_TCR2En),
MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB),
MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR),
+ MCB(CGT_MDCR_TPM_HPMN, CGT_MDCR_TPM, CGT_MDCR_HPMN),
MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA),
MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA),
+
+ MCB(CGT_HCR_IMO_FMO_ICH_HCR_TC, CGT_HCR_IMO, CGT_HCR_FMO, CGT_ICH_HCR_TC),
+ MCB(CGT_ICH_HCR_TC_TDIR, CGT_ICH_HCR_TC, CGT_ICH_HCR_TDIR),
};
typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *);
@@ -399,7 +493,7 @@ static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu)
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10))
return BEHAVE_HANDLE_LOCALLY;
- return BEHAVE_FORWARD_ANY;
+ return BEHAVE_FORWARD_RW;
}
static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
@@ -407,7 +501,74 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10))
return BEHAVE_HANDLE_LOCALLY;
- return BEHAVE_FORWARD_ANY;
+ return BEHAVE_FORWARD_RW;
+}
+
+static bool is_nested_nv2_guest(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ val = __vcpu_sys_reg(vcpu, HCR_EL2);
+ return ((val & (HCR_E2H | HCR_TGE | HCR_NV2 | HCR_NV1 | HCR_NV)) == (HCR_E2H | HCR_NV2 | HCR_NV));
+}
+
+static enum trap_behaviour check_cnthctl_el1nvpct(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_nv2_guest(vcpu) ||
+ !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVPCT))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_RW;
+}
+
+static enum trap_behaviour check_cnthctl_el1nvvct(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_nv2_guest(vcpu) ||
+ !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVVCT))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_RW;
+}
+
+static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
+{
+ u64 val = __vcpu_sys_reg(vcpu, CPTR_EL2);
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val = translate_cptr_el2_to_cpacr_el1(val);
+
+ if (val & CPACR_EL1_TTA)
+ return BEHAVE_FORWARD_RW;
+
+ return BEHAVE_HANDLE_LOCALLY;
+}
+
+static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+ unsigned int idx;
+
+
+ switch (sysreg) {
+ case SYS_PMEVTYPERn_EL0(0) ... SYS_PMEVTYPERn_EL0(30):
+ case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
+ idx = (sys_reg_CRm(sysreg) & 0x3) << 3 | sys_reg_Op2(sysreg);
+ break;
+ case SYS_PMXEVTYPER_EL0:
+ case SYS_PMXEVCNTR_EL0:
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
+ __vcpu_sys_reg(vcpu, PMSELR_EL0));
+ break;
+ default:
+ /* Someone used this trap helper for something else... */
+ KVM_BUG_ON(1, vcpu->kvm);
+ return BEHAVE_HANDLE_LOCALLY;
+ }
+
+ if (kvm_pmu_counter_is_hyp(vcpu, idx))
+ return BEHAVE_FORWARD_RW | BEHAVE_FORWARD_IN_HOST_EL0;
+
+ return BEHAVE_HANDLE_LOCALLY;
}
#define CCC(id, fn) \
@@ -416,6 +577,10 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
static const complex_condition_check ccc[] = {
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
+ CCC(CGT_CNTHCTL_EL1NVPCT, check_cnthctl_el1nvpct),
+ CCC(CGT_CNTHCTL_EL1NVVCT, check_cnthctl_el1nvvct),
+ CCC(CGT_CPTR_TTA, check_cptr_tta),
+ CCC(CGT_MDCR_HPMN, check_mdcr_hpmn),
};
/*
@@ -427,12 +592,14 @@ static const complex_condition_check ccc[] = {
* [19:14] bit number in the FGT register (6 bits)
* [20] trap polarity (1 bit)
* [25:21] FG filter (5 bits)
- * [62:26] Unused (37 bits)
+ * [35:26] Main SysReg table index (10 bits)
+ * [62:36] Unused (27 bits)
* [63] RES0 - Must be zero, as lost on insertion in the xarray
*/
#define TC_CGT_BITS 10
#define TC_FGT_BITS 4
#define TC_FGF_BITS 5
+#define TC_SRI_BITS 10
union trap_config {
u64 val;
@@ -442,7 +609,8 @@ union trap_config {
unsigned long bit:6; /* Bit number */
unsigned long pol:1; /* Polarity */
unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
- unsigned long unused:37; /* Unused, should be zero */
+ unsigned long sri:TC_SRI_BITS; /* SysReg Index */
+ unsigned long unused:27; /* Unused, should be zero */
unsigned long mbz:1; /* Must Be Zero */
};
};
@@ -454,6 +622,11 @@ struct encoding_to_trap_config {
const unsigned int line;
};
+/*
+ * WARNING: using ranges is a treacherous endeavour, as sysregs that
+ * are part of an architectural range are not necessarily contiguous
+ * in the [Op0,Op1,CRn,CRm,Ops] space. Tread carefully.
+ */
#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \
{ \
.encoding = sr_start, \
@@ -484,9 +657,9 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4),
SR_RANGE_TRAP(SYS_ID_PFR0_EL1,
sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3),
- SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO),
- SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO),
- SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0),
sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP),
SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0),
@@ -619,6 +792,11 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM),
SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM),
SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_PIR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_PIRE0_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_POR_EL0, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_POR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TCR2_EL1, CGT_HCR_TVM_TRVM_HCRX_TCR2En),
SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ),
SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ),
SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ),
@@ -722,17 +900,22 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SYS_CNTHP_CVAL_EL2, CGT_HCR_NV),
SR_RANGE_TRAP(SYS_CNTHV_TVAL_EL2,
SYS_CNTHV_CVAL_EL2, CGT_HCR_NV),
- /* All _EL02, _EL12 registers */
+ /* All _EL02, _EL12 registers up to CNTKCTL_EL12*/
SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0),
sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV),
SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0),
- sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV),
+ sys_reg(3, 5, 14, 1, 0), CGT_HCR_NV),
+ SR_TRAP(SYS_CNTP_CTL_EL02, CGT_CNTHCTL_EL1NVPCT),
+ SR_TRAP(SYS_CNTP_CVAL_EL02, CGT_CNTHCTL_EL1NVPCT),
+ SR_TRAP(SYS_CNTV_CTL_EL02, CGT_CNTHCTL_EL1NVVCT),
+ SR_TRAP(SYS_CNTV_CVAL_EL02, CGT_CNTHCTL_EL1NVVCT),
SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV),
SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2A, CGT_HCR_NV),
SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV),
SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV),
SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV),
@@ -814,6 +997,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT),
SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT),
SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1A, CGT_HCR_AT),
SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN),
SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN),
SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN),
@@ -824,77 +1008,77 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM),
SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM),
SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA),
SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA),
@@ -997,27 +1181,101 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB),
SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB),
SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_CPACR_EL1, CGT_CPTR_TCPAC),
+ SR_TRAP(SYS_AMUSERENR_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCFGR_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCGCR_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCNTENCLR0_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCNTENCLR1_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCNTENSET0_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCNTENSET1_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMCR_EL0, CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR0_EL0(0), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR0_EL0(1), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR0_EL0(2), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR0_EL0(3), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(0), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(1), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(2), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(3), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(4), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(5), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(6), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(7), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(8), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(9), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(10), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(11), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(12), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(13), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(14), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVCNTR1_EL0(15), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER0_EL0(0), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER0_EL0(1), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER0_EL0(2), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER0_EL0(3), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(0), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(1), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(2), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(3), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(4), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(5), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(6), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(7), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(8), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(9), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(10), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(11), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(12), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(13), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(14), CGT_CPTR_TAM),
+ SR_TRAP(SYS_AMEVTYPER1_EL0(15), CGT_CPTR_TAM),
+ /* op0=2, op1=1, and CRn<0b1000 */
+ SR_RANGE_TRAP(sys_reg(2, 1, 0, 0, 0),
+ sys_reg(2, 1, 7, 15, 7), CGT_CPTR_TTA),
SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
+ SR_TRAP(SYS_CNTV_TVAL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTV_CVAL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTV_CTL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTVCT_EL0, CGT_CNTHCTL_EL1TVCT),
+ SR_TRAP(SYS_CNTVCTSS_EL0, CGT_CNTHCTL_EL1TVCT),
+ SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM),
+ /*
+ * IMPDEF choice:
+ * We treat ICC_SRE_EL2.{SRE,Enable) and ICV_SRE_EL1.SRE as
+ * RAO/WI. We therefore never consider ICC_SRE_EL2.Enable for
+ * ICC_SRE_EL1 access, and always handle it locally.
+ */
+ SR_TRAP(SYS_ICC_AP0R0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R1_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R2_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R3_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP1R0_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R2_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R3_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_BPR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_BPR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_CTLR_EL1, CGT_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_DIR_EL1, CGT_ICH_HCR_TC_TDIR),
+ SR_TRAP(SYS_ICC_EOIR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_EOIR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_HPPIR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_HPPIR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_IAR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_IAR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_IGRPEN0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_IGRPEN1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_PMR_EL1, CGT_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_RPR_EL1, CGT_ICH_HCR_TC),
};
static DEFINE_XARRAY(sr_forward_xa);
-enum fgt_group_id {
- __NO_FGT_GROUP__,
- HFGxTR_GROUP,
- HDFGRTR_GROUP,
- HDFGWTR_GROUP,
- HFGITR_GROUP,
- HAFGRTR_GROUP,
-
- /* Must be last */
- __NR_FGT_GROUP_IDS__
-};
-
enum fg_filter_id {
__NO_FGF__,
HCRX_FGTnXS,
@@ -1026,97 +1284,128 @@ enum fg_filter_id {
__NR_FG_FILTER_IDS__
};
-#define SR_FGF(sr, g, b, p, f) \
- { \
- .encoding = sr, \
- .end = sr, \
- .tc = { \
+#define __FGT(g, b, p, f) \
+ { \
.fgt = g ## _GROUP, \
.bit = g ## _EL2_ ## b ## _SHIFT, \
.pol = p, \
.fgf = f, \
- }, \
+ }
+
+#define FGT(g, b, p) __FGT(g, b, p, __NO_FGF__)
+
+/*
+ * See the warning next to SR_RANGE_TRAP(), and apply the same
+ * level of caution.
+ */
+#define SR_FGF_RANGE(sr, e, g, b, p, f) \
+ { \
+ .encoding = sr, \
+ .end = e, \
+ .tc = __FGT(g, b, p, f), \
.line = __LINE__, \
}
-#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__)
+#define SR_FGF(sr, g, b, p, f) SR_FGF_RANGE(sr, sr, g, b, p, f)
+#define SR_FGT(sr, g, b, p) SR_FGF_RANGE(sr, sr, g, b, p, __NO_FGF__)
+#define SR_FGT_RANGE(sr, end, g, b, p) \
+ SR_FGF_RANGE(sr, end, g, b, p, __NO_FGF__)
static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
/* HFGRTR_EL2, HFGWTR_EL2 */
- SR_FGT(SYS_AMAIR2_EL1, HFGxTR, nAMAIR2_EL1, 0),
- SR_FGT(SYS_MAIR2_EL1, HFGxTR, nMAIR2_EL1, 0),
- SR_FGT(SYS_S2POR_EL1, HFGxTR, nS2POR_EL1, 0),
- SR_FGT(SYS_POR_EL1, HFGxTR, nPOR_EL1, 0),
- SR_FGT(SYS_POR_EL0, HFGxTR, nPOR_EL0, 0),
- SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0),
- SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0),
- SR_FGT(SYS_RCWMASK_EL1, HFGxTR, nRCWMASK_EL1, 0),
- SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
- SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
- SR_FGT(SYS_GCSCR_EL1, HFGxTR, nGCS_EL1, 0),
- SR_FGT(SYS_GCSPR_EL1, HFGxTR, nGCS_EL1, 0),
- SR_FGT(SYS_GCSCRE0_EL1, HFGxTR, nGCS_EL0, 0),
- SR_FGT(SYS_GCSPR_EL0, HFGxTR, nGCS_EL0, 0),
- SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
- SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1),
- SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1),
- SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1),
- SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1),
- SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1),
- SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1),
- SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1),
- SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1),
- SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1),
- SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1),
- SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
- SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
- SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1),
- SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1),
- SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1),
- SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1),
- SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1),
- SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1),
- SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1),
- SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1),
- SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1),
- SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1),
- SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1),
- SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1),
- SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1),
- SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1),
- SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1),
- SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1),
- SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1),
- SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1),
- SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1),
- SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1),
- SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1),
- SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1),
- SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1),
- SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1),
- SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1),
- SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1),
- SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1),
- SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1),
- SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1),
- SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1),
- SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1),
- SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1),
- SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1),
- SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1),
- SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1),
- SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1),
- SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1),
- SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1),
- SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1),
- SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1),
- SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1),
- SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1),
- SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1),
- SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1),
+ SR_FGT(SYS_AMAIR2_EL1, HFGRTR, nAMAIR2_EL1, 0),
+ SR_FGT(SYS_MAIR2_EL1, HFGRTR, nMAIR2_EL1, 0),
+ SR_FGT(SYS_S2POR_EL1, HFGRTR, nS2POR_EL1, 0),
+ SR_FGT(SYS_POR_EL1, HFGRTR, nPOR_EL1, 0),
+ SR_FGT(SYS_POR_EL0, HFGRTR, nPOR_EL0, 0),
+ SR_FGT(SYS_PIR_EL1, HFGRTR, nPIR_EL1, 0),
+ SR_FGT(SYS_PIRE0_EL1, HFGRTR, nPIRE0_EL1, 0),
+ SR_FGT(SYS_RCWMASK_EL1, HFGRTR, nRCWMASK_EL1, 0),
+ SR_FGT(SYS_TPIDR2_EL0, HFGRTR, nTPIDR2_EL0, 0),
+ SR_FGT(SYS_SMPRI_EL1, HFGRTR, nSMPRI_EL1, 0),
+ SR_FGT(SYS_GCSCR_EL1, HFGRTR, nGCS_EL1, 0),
+ SR_FGT(SYS_GCSPR_EL1, HFGRTR, nGCS_EL1, 0),
+ SR_FGT(SYS_GCSCRE0_EL1, HFGRTR, nGCS_EL0, 0),
+ SR_FGT(SYS_GCSPR_EL0, HFGRTR, nGCS_EL0, 0),
+ SR_FGT(SYS_ACCDATA_EL1, HFGRTR, nACCDATA_EL1, 0),
+ SR_FGT(SYS_ERXADDR_EL1, HFGRTR, ERXADDR_EL1, 1),
+ SR_FGT(SYS_ERXPFGCDN_EL1, HFGRTR, ERXPFGCDN_EL1, 1),
+ SR_FGT(SYS_ERXPFGCTL_EL1, HFGRTR, ERXPFGCTL_EL1, 1),
+ SR_FGT(SYS_ERXPFGF_EL1, HFGRTR, ERXPFGF_EL1, 1),
+ SR_FGT(SYS_ERXMISC0_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC1_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC2_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC3_EL1, HFGRTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXSTATUS_EL1, HFGRTR, ERXSTATUS_EL1, 1),
+ SR_FGT(SYS_ERXCTLR_EL1, HFGRTR, ERXCTLR_EL1, 1),
+ SR_FGT(SYS_ERXFR_EL1, HFGRTR, ERXFR_EL1, 1),
+ SR_FGT(SYS_ERRSELR_EL1, HFGRTR, ERRSELR_EL1, 1),
+ SR_FGT(SYS_ERRIDR_EL1, HFGRTR, ERRIDR_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGRTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGRTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_VBAR_EL1, HFGRTR, VBAR_EL1, 1),
+ SR_FGT(SYS_TTBR1_EL1, HFGRTR, TTBR1_EL1, 1),
+ SR_FGT(SYS_TTBR0_EL1, HFGRTR, TTBR0_EL1, 1),
+ SR_FGT(SYS_TPIDR_EL0, HFGRTR, TPIDR_EL0, 1),
+ SR_FGT(SYS_TPIDRRO_EL0, HFGRTR, TPIDRRO_EL0, 1),
+ SR_FGT(SYS_TPIDR_EL1, HFGRTR, TPIDR_EL1, 1),
+ SR_FGT(SYS_TCR_EL1, HFGRTR, TCR_EL1, 1),
+ SR_FGT(SYS_TCR2_EL1, HFGRTR, TCR_EL1, 1),
+ SR_FGT(SYS_SCXTNUM_EL0, HFGRTR, SCXTNUM_EL0, 1),
+ SR_FGT(SYS_SCXTNUM_EL1, HFGRTR, SCXTNUM_EL1, 1),
+ SR_FGT(SYS_SCTLR_EL1, HFGRTR, SCTLR_EL1, 1),
+ SR_FGT(SYS_REVIDR_EL1, HFGRTR, REVIDR_EL1, 1),
+ SR_FGT(SYS_PAR_EL1, HFGRTR, PAR_EL1, 1),
+ SR_FGT(SYS_MPIDR_EL1, HFGRTR, MPIDR_EL1, 1),
+ SR_FGT(SYS_MIDR_EL1, HFGRTR, MIDR_EL1, 1),
+ SR_FGT(SYS_MAIR_EL1, HFGRTR, MAIR_EL1, 1),
+ SR_FGT(SYS_LORSA_EL1, HFGRTR, LORSA_EL1, 1),
+ SR_FGT(SYS_LORN_EL1, HFGRTR, LORN_EL1, 1),
+ SR_FGT(SYS_LORID_EL1, HFGRTR, LORID_EL1, 1),
+ SR_FGT(SYS_LOREA_EL1, HFGRTR, LOREA_EL1, 1),
+ SR_FGT(SYS_LORC_EL1, HFGRTR, LORC_EL1, 1),
+ SR_FGT(SYS_ISR_EL1, HFGRTR, ISR_EL1, 1),
+ SR_FGT(SYS_FAR_EL1, HFGRTR, FAR_EL1, 1),
+ SR_FGT(SYS_ESR_EL1, HFGRTR, ESR_EL1, 1),
+ SR_FGT(SYS_DCZID_EL0, HFGRTR, DCZID_EL0, 1),
+ SR_FGT(SYS_CTR_EL0, HFGRTR, CTR_EL0, 1),
+ SR_FGT(SYS_CSSELR_EL1, HFGRTR, CSSELR_EL1, 1),
+ SR_FGT(SYS_CPACR_EL1, HFGRTR, CPACR_EL1, 1),
+ SR_FGT(SYS_CONTEXTIDR_EL1, HFGRTR, CONTEXTIDR_EL1, 1),
+ SR_FGT(SYS_CLIDR_EL1, HFGRTR, CLIDR_EL1, 1),
+ SR_FGT(SYS_CCSIDR_EL1, HFGRTR, CCSIDR_EL1, 1),
+ SR_FGT(SYS_APIBKEYLO_EL1, HFGRTR, APIBKey, 1),
+ SR_FGT(SYS_APIBKEYHI_EL1, HFGRTR, APIBKey, 1),
+ SR_FGT(SYS_APIAKEYLO_EL1, HFGRTR, APIAKey, 1),
+ SR_FGT(SYS_APIAKEYHI_EL1, HFGRTR, APIAKey, 1),
+ SR_FGT(SYS_APGAKEYLO_EL1, HFGRTR, APGAKey, 1),
+ SR_FGT(SYS_APGAKEYHI_EL1, HFGRTR, APGAKey, 1),
+ SR_FGT(SYS_APDBKEYLO_EL1, HFGRTR, APDBKey, 1),
+ SR_FGT(SYS_APDBKEYHI_EL1, HFGRTR, APDBKey, 1),
+ SR_FGT(SYS_APDAKEYLO_EL1, HFGRTR, APDAKey, 1),
+ SR_FGT(SYS_APDAKEYHI_EL1, HFGRTR, APDAKey, 1),
+ SR_FGT(SYS_AMAIR_EL1, HFGRTR, AMAIR_EL1, 1),
+ SR_FGT(SYS_AIDR_EL1, HFGRTR, AIDR_EL1, 1),
+ SR_FGT(SYS_AFSR1_EL1, HFGRTR, AFSR1_EL1, 1),
+ SR_FGT(SYS_AFSR0_EL1, HFGRTR, AFSR0_EL1, 1),
+
+ /* HFGRTR2_EL2, HFGWTR2_EL2 */
+ SR_FGT(SYS_ACTLRALIAS_EL1, HFGRTR2, nACTLRALIAS_EL1, 0),
+ SR_FGT(SYS_ACTLRMASK_EL1, HFGRTR2, nACTLRMASK_EL1, 0),
+ SR_FGT(SYS_CPACRALIAS_EL1, HFGRTR2, nCPACRALIAS_EL1, 0),
+ SR_FGT(SYS_CPACRMASK_EL1, HFGRTR2, nCPACRMASK_EL1, 0),
+ SR_FGT(SYS_PFAR_EL1, HFGRTR2, nPFAR_EL1, 0),
+ SR_FGT(SYS_RCWSMASK_EL1, HFGRTR2, nRCWSMASK_EL1, 0),
+ SR_FGT(SYS_SCTLR2ALIAS_EL1, HFGRTR2, nSCTLRALIAS2_EL1, 0),
+ SR_FGT(SYS_SCTLR2MASK_EL1, HFGRTR2, nSCTLR2MASK_EL1, 0),
+ SR_FGT(SYS_SCTLRALIAS_EL1, HFGRTR2, nSCTLRALIAS_EL1, 0),
+ SR_FGT(SYS_SCTLRMASK_EL1, HFGRTR2, nSCTLRMASK_EL1, 0),
+ SR_FGT(SYS_TCR2ALIAS_EL1, HFGRTR2, nTCR2ALIAS_EL1, 0),
+ SR_FGT(SYS_TCR2MASK_EL1, HFGRTR2, nTCR2MASK_EL1, 0),
+ SR_FGT(SYS_TCRALIAS_EL1, HFGRTR2, nTCRALIAS_EL1, 0),
+ SR_FGT(SYS_TCRMASK_EL1, HFGRTR2, nTCRMASK_EL1, 0),
+ SR_FGT(SYS_ERXGSR_EL1, HFGRTR2, nERXGSR_EL1, 0),
+
/* HFGITR_EL2 */
SR_FGT(OP_AT_S1E1A, HFGITR, ATS1E1A, 1),
SR_FGT(OP_COSP_RCTX, HFGITR, COSPRCTX, 1),
@@ -1226,6 +1515,11 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1),
SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1),
SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1),
+
+ /* HFGITR2_EL2 */
+ SR_FGT(SYS_DC_CIGDVAPS, HFGITR2, nDCCIVAPS, 0),
+ SR_FGT(SYS_DC_CIVAPS, HFGITR2, nDCCIVAPS, 0),
+
/* HDFGRTR_EL2 */
SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1),
SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0),
@@ -1535,68 +1829,12 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1),
SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1),
SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1),
- SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT_RANGE(SYS_PMEVTYPERn_EL0(0),
+ SYS_PMEVTYPERn_EL0(30),
+ HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT_RANGE(SYS_PMEVCNTRn_EL0(0),
+ SYS_PMEVCNTRn_EL0(30),
+ HDFGRTR, PMEVCNTRn_EL0, 1),
SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1),
SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1),
SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1),
@@ -1674,6 +1912,59 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1),
SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1),
SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1),
+
+ /* HDFGRTR2_EL2 */
+ SR_FGT(SYS_MDSELR_EL1, HDFGRTR2, nMDSELR_EL1, 0),
+ SR_FGT(SYS_MDSTEPOP_EL1, HDFGRTR2, nMDSTEPOP_EL1, 0),
+ SR_FGT(SYS_PMCCNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT_RANGE(SYS_PMEVCNTSVRn_EL1(0),
+ SYS_PMEVCNTSVRn_EL1(30),
+ HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT(SYS_PMICNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0),
+ SR_FGT(SYS_PMECR_EL1, HDFGRTR2, nPMECR_EL1, 0),
+ SR_FGT(SYS_PMIAR_EL1, HDFGRTR2, nPMIAR_EL1, 0),
+ SR_FGT(SYS_PMICFILTR_EL0, HDFGRTR2, nPMICFILTR_EL0, 0),
+ SR_FGT(SYS_PMICNTR_EL0, HDFGRTR2, nPMICNTR_EL0, 0),
+ SR_FGT(SYS_PMSSCR_EL1, HDFGRTR2, nPMSSCR_EL1, 0),
+ SR_FGT(SYS_PMUACR_EL1, HDFGRTR2, nPMUACR_EL1, 0),
+ SR_FGT(SYS_SPMACCESSR_EL1, HDFGRTR2, nSPMACCESSR_EL1, 0),
+ SR_FGT(SYS_SPMCFGR_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMDEVARCH_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCGCRn_EL1(0), HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCGCRn_EL1(1), HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMIIDR_EL1, HDFGRTR2, nSPMID, 0),
+ SR_FGT(SYS_SPMCNTENCLR_EL0, HDFGRTR2, nSPMCNTEN, 0),
+ SR_FGT(SYS_SPMCNTENSET_EL0, HDFGRTR2, nSPMCNTEN, 0),
+ SR_FGT(SYS_SPMCR_EL0, HDFGRTR2, nSPMCR_EL0, 0),
+ SR_FGT(SYS_SPMDEVAFF_EL1, HDFGRTR2, nSPMDEVAFF_EL1, 0),
+ /*
+ * We have up to 64 of these registers in ranges of 16, banked via
+ * SPMSELR_EL0.BANK. We're only concerned with the accessors here,
+ * not the architectural registers.
+ */
+ SR_FGT_RANGE(SYS_SPMEVCNTRn_EL0(0),
+ SYS_SPMEVCNTRn_EL0(15),
+ HDFGRTR2, nSPMEVCNTRn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVFILT2Rn_EL0(0),
+ SYS_SPMEVFILT2Rn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVFILTRn_EL0(0),
+ SYS_SPMEVFILTRn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT_RANGE(SYS_SPMEVTYPERn_EL0(0),
+ SYS_SPMEVTYPERn_EL0(15),
+ HDFGRTR2, nSPMEVTYPERn_EL0, 0),
+ SR_FGT(SYS_SPMINTENCLR_EL1, HDFGRTR2, nSPMINTEN, 0),
+ SR_FGT(SYS_SPMINTENSET_EL1, HDFGRTR2, nSPMINTEN, 0),
+ SR_FGT(SYS_SPMOVSCLR_EL0, HDFGRTR2, nSPMOVS, 0),
+ SR_FGT(SYS_SPMOVSSET_EL0, HDFGRTR2, nSPMOVS, 0),
+ SR_FGT(SYS_SPMSCR_EL1, HDFGRTR2, nSPMSCR_EL1, 0),
+ SR_FGT(SYS_SPMSELR_EL0, HDFGRTR2, nSPMSELR_EL0, 0),
+ SR_FGT(SYS_TRCITECR_EL1, HDFGRTR2, nTRCITECR_EL1, 0),
+ SR_FGT(SYS_PMBMAR_EL1, HDFGRTR2, nPMBMAR_EL1, 0),
+ SR_FGT(SYS_PMSDSFR_EL1, HDFGRTR2, nPMSDSFR_EL1, 0),
+ SR_FGT(SYS_TRBMPAM_EL1, HDFGRTR2, nTRBMPAM_EL1, 0),
+
/*
* HDFGWTR_EL2
*
@@ -1684,12 +1975,19 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
* read-side mappings, and only the write-side mappings that
* differ from the read side, and the trap handler will pick
* the correct shadow register based on the access type.
+ *
+ * Same model applies to the FEAT_FGT2 registers.
*/
SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1),
SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1),
SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1),
SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1),
SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1),
+
+ /* HDFGWTR2_EL2 */
+ SR_FGT(SYS_PMZR_EL0, HDFGWTR2, nPMZR_EL0, 0),
+ SR_FGT(SYS_SPMZR_EL0, HDFGWTR2, nSPMEVCNTRn_EL0, 0),
+
/*
* HAFGRTR_EL2
*/
@@ -1735,6 +2033,20 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
SR_FGT(SYS_AMEVCNTR0_EL0(0), HAFGRTR, AMEVCNTR00_EL0, 1),
};
+/*
+ * Additional FGTs that do not fire with ESR_EL2.EC==0x18. This table
+ * isn't used for exception routing, but only as a promise that the
+ * trap is handled somewhere else.
+ */
+static const union trap_config non_0x18_fgt[] __initconst = {
+ FGT(HFGITR, PSBCSYNC, 1),
+ FGT(HFGITR, nGCSSTR_EL1, 0),
+ FGT(HFGITR, SVC_EL1, 1),
+ FGT(HFGITR, SVC_EL0, 1),
+ FGT(HFGITR, ERET, 1),
+ FGT(HFGITR2, TSBCSYNC, 1),
+};
+
static union trap_config get_trap_config(u32 sysreg)
{
return (union trap_config) {
@@ -1757,6 +2069,152 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
err);
}
+static u32 encoding_next(u32 encoding)
+{
+ u8 op0, op1, crn, crm, op2;
+
+ op0 = sys_reg_Op0(encoding);
+ op1 = sys_reg_Op1(encoding);
+ crn = sys_reg_CRn(encoding);
+ crm = sys_reg_CRm(encoding);
+ op2 = sys_reg_Op2(encoding);
+
+ if (op2 < Op2_mask)
+ return sys_reg(op0, op1, crn, crm, op2 + 1);
+ if (crm < CRm_mask)
+ return sys_reg(op0, op1, crn, crm + 1, 0);
+ if (crn < CRn_mask)
+ return sys_reg(op0, op1, crn + 1, 0, 0);
+ if (op1 < Op1_mask)
+ return sys_reg(op0, op1 + 1, 0, 0, 0);
+
+ return sys_reg(op0 + 1, 0, 0, 0, 0);
+}
+
+#define FGT_MASKS(__n, __m) \
+ struct fgt_masks __n = { .str = #__m, .res0 = __m, }
+
+FGT_MASKS(hfgrtr_masks, HFGRTR_EL2_RES0);
+FGT_MASKS(hfgwtr_masks, HFGWTR_EL2_RES0);
+FGT_MASKS(hfgitr_masks, HFGITR_EL2_RES0);
+FGT_MASKS(hdfgrtr_masks, HDFGRTR_EL2_RES0);
+FGT_MASKS(hdfgwtr_masks, HDFGWTR_EL2_RES0);
+FGT_MASKS(hafgrtr_masks, HAFGRTR_EL2_RES0);
+FGT_MASKS(hfgrtr2_masks, HFGRTR2_EL2_RES0);
+FGT_MASKS(hfgwtr2_masks, HFGWTR2_EL2_RES0);
+FGT_MASKS(hfgitr2_masks, HFGITR2_EL2_RES0);
+FGT_MASKS(hdfgrtr2_masks, HDFGRTR2_EL2_RES0);
+FGT_MASKS(hdfgwtr2_masks, HDFGWTR2_EL2_RES0);
+
+static __init bool aggregate_fgt(union trap_config tc)
+{
+ struct fgt_masks *rmasks, *wmasks;
+
+ switch (tc.fgt) {
+ case HFGRTR_GROUP:
+ rmasks = &hfgrtr_masks;
+ wmasks = &hfgwtr_masks;
+ break;
+ case HDFGRTR_GROUP:
+ rmasks = &hdfgrtr_masks;
+ wmasks = &hdfgwtr_masks;
+ break;
+ case HAFGRTR_GROUP:
+ rmasks = &hafgrtr_masks;
+ wmasks = NULL;
+ break;
+ case HFGITR_GROUP:
+ rmasks = &hfgitr_masks;
+ wmasks = NULL;
+ break;
+ case HFGRTR2_GROUP:
+ rmasks = &hfgrtr2_masks;
+ wmasks = &hfgwtr2_masks;
+ break;
+ case HDFGRTR2_GROUP:
+ rmasks = &hdfgrtr2_masks;
+ wmasks = &hdfgwtr2_masks;
+ break;
+ case HFGITR2_GROUP:
+ rmasks = &hfgitr2_masks;
+ wmasks = NULL;
+ break;
+ }
+
+ /*
+ * A bit can be reserved in either the R or W register, but
+ * not both.
+ */
+ if ((BIT(tc.bit) & rmasks->res0) &&
+ (!wmasks || (BIT(tc.bit) & wmasks->res0)))
+ return false;
+
+ if (tc.pol)
+ rmasks->mask |= BIT(tc.bit) & ~rmasks->res0;
+ else
+ rmasks->nmask |= BIT(tc.bit) & ~rmasks->res0;
+
+ if (wmasks) {
+ if (tc.pol)
+ wmasks->mask |= BIT(tc.bit) & ~wmasks->res0;
+ else
+ wmasks->nmask |= BIT(tc.bit) & ~wmasks->res0;
+ }
+
+ return true;
+}
+
+static __init int check_fgt_masks(struct fgt_masks *masks)
+{
+ unsigned long duplicate = masks->mask & masks->nmask;
+ u64 res0 = masks->res0;
+ int ret = 0;
+
+ if (duplicate) {
+ int i;
+
+ for_each_set_bit(i, &duplicate, 64) {
+ kvm_err("%s[%d] bit has both polarities\n",
+ masks->str, i);
+ }
+
+ ret = -EINVAL;
+ }
+
+ masks->res0 = ~(masks->mask | masks->nmask);
+ if (masks->res0 != res0)
+ kvm_info("Implicit %s = %016llx, expecting %016llx\n",
+ masks->str, masks->res0, res0);
+
+ return ret;
+}
+
+static __init int check_all_fgt_masks(int ret)
+{
+ static struct fgt_masks * const masks[] __initconst = {
+ &hfgrtr_masks,
+ &hfgwtr_masks,
+ &hfgitr_masks,
+ &hdfgrtr_masks,
+ &hdfgwtr_masks,
+ &hafgrtr_masks,
+ &hfgrtr2_masks,
+ &hfgwtr2_masks,
+ &hfgitr2_masks,
+ &hdfgrtr2_masks,
+ &hdfgwtr2_masks,
+ };
+ int err = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(masks); i++)
+ err |= check_fgt_masks(masks[i]);
+
+ return ret ?: err;
+}
+
+#define for_each_encoding_in(__x, __s, __e) \
+ for (u32 __x = __s; __x <= __e; __x = encoding_next(__x))
+
int __init populate_nv_trap_config(void)
{
int ret = 0;
@@ -1765,6 +2223,7 @@ int __init populate_nv_trap_config(void)
BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS));
BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS));
BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS));
+ BUILD_BUG_ON(__HCRX_EL2_MASK & __HCRX_EL2_nMASK);
for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) {
const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i];
@@ -1775,26 +2234,25 @@ int __init populate_nv_trap_config(void)
ret = -EINVAL;
}
- if (cgt->encoding != cgt->end) {
- prev = xa_store_range(&sr_forward_xa,
- cgt->encoding, cgt->end,
- xa_mk_value(cgt->tc.val),
- GFP_KERNEL);
- } else {
- prev = xa_store(&sr_forward_xa, cgt->encoding,
+ for_each_encoding_in(enc, cgt->encoding, cgt->end) {
+ prev = xa_store(&sr_forward_xa, enc,
xa_mk_value(cgt->tc.val), GFP_KERNEL);
if (prev && !xa_is_err(prev)) {
ret = -EINVAL;
print_nv_trap_error(cgt, "Duplicate CGT", ret);
}
- }
- if (xa_is_err(prev)) {
- ret = xa_err(prev);
- print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ }
}
}
+ if (__HCRX_EL2_RES0 != HCRX_EL2_RES0)
+ kvm_info("Sanitised HCR_EL2_RES0 = %016llx, expecting %016llx\n",
+ __HCRX_EL2_RES0, HCRX_EL2_RES0);
+
kvm_info("nv: %ld coarse grained trap handlers\n",
ARRAY_SIZE(encoding_to_cgt));
@@ -1804,24 +2262,46 @@ int __init populate_nv_trap_config(void)
for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
union trap_config tc;
+ void *prev;
if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
ret = -EINVAL;
print_nv_trap_error(fgt, "Invalid FGT", ret);
}
- tc = get_trap_config(fgt->encoding);
+ for_each_encoding_in(enc, fgt->encoding, fgt->end) {
+ tc = get_trap_config(enc);
- if (tc.fgt) {
- ret = -EINVAL;
- print_nv_trap_error(fgt, "Duplicate FGT", ret);
+ if (tc.fgt) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Duplicate FGT", ret);
+ }
+
+ tc.val |= fgt->tc.val;
+ prev = xa_store(&sr_forward_xa, enc,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(fgt, "Failed FGT insertion", ret);
+ }
+
+ if (!aggregate_fgt(tc)) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "FGT bit is reserved", ret);
+ }
}
+ }
- tc.val |= fgt->tc.val;
- xa_store(&sr_forward_xa, fgt->encoding,
- xa_mk_value(tc.val), GFP_KERNEL);
+ for (int i = 0; i < ARRAY_SIZE(non_0x18_fgt); i++) {
+ if (!aggregate_fgt(non_0x18_fgt[i])) {
+ ret = -EINVAL;
+ kvm_err("non_0x18_fgt[%d] is reserved\n", i);
+ }
}
+ ret = check_all_fgt_masks(ret);
+
kvm_info("nv: %ld fine grained trap handlers\n",
ARRAY_SIZE(encoding_to_fgt));
@@ -1832,7 +2312,8 @@ check_mcb:
cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
for (int i = 0; cgids[i] != __RESERVED__; i++) {
- if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) {
+ if (cgids[i] >= __MULTIPLE_CONTROL_BITS__ &&
+ cgids[i] < __COMPLEX_CONDITIONS__) {
kvm_err("Recursive MCB %d/%d\n", id, cgids[i]);
ret = -EINVAL;
}
@@ -1845,6 +2326,38 @@ check_mcb:
return ret;
}
+int __init populate_sysreg_config(const struct sys_reg_desc *sr,
+ unsigned int idx)
+{
+ union trap_config tc;
+ u32 encoding;
+ void *ret;
+
+ /*
+ * 0 is a valid value for the index, but not for the storage.
+ * We'll store (idx+1), so check against an offset'd limit.
+ */
+ if (idx >= (BIT(TC_SRI_BITS) - 1)) {
+ kvm_err("sysreg %s (%d) out of range\n", sr->name, idx);
+ return -EINVAL;
+ }
+
+ encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2);
+ tc = get_trap_config(encoding);
+
+ if (tc.sri) {
+ kvm_err("sysreg %s (%d) duplicate entry (%d)\n",
+ sr->name, idx - 1, tc.sri);
+ return -EINVAL;
+ }
+
+ tc.sri = idx + 1;
+ ret = xa_store(&sr_forward_xa, encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ return xa_err(ret);
+}
+
static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
const struct trap_bits *tb)
{
@@ -1892,29 +2405,59 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
return __compute_trap_behaviour(vcpu, tc.cgt, b);
}
-static bool check_fgt_bit(u64 val, const union trap_config tc)
+static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
{
- return ((val >> tc.bit) & 1) == tc.pol;
+ struct kvm_sysreg_masks *masks;
+
+ /* Only handle the VNCR-backed regs for now */
+ if (sr < __VNCR_START__)
+ return 0;
+
+ masks = kvm->arch.sysreg_masks;
+
+ return masks->mask[sr - __VNCR_START__].res0;
}
-#define sanitised_sys_reg(vcpu, reg) \
- ({ \
- u64 __val; \
- __val = __vcpu_sys_reg(vcpu, reg); \
- __val &= ~__ ## reg ## _RES0; \
- (__val); \
- })
+static bool check_fgt_bit(struct kvm_vcpu *vcpu, enum vcpu_sysreg sr,
+ const union trap_config tc)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 val;
-bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
+ /*
+ * KVM doesn't know about any FGTs that apply to the host, and hopefully
+ * that'll remain the case.
+ */
+ if (is_hyp_ctxt(vcpu))
+ return false;
+
+ val = __vcpu_sys_reg(vcpu, sr);
+
+ if (tc.pol)
+ return (val & BIT(tc.bit));
+
+ /*
+ * FGTs with negative polarities are an absolute nightmare, as
+ * we need to evaluate the bit in the light of the feature
+ * that defines it. WTF were they thinking?
+ *
+ * So let's check if the bit has been earmarked as RES0, as
+ * this indicates an unimplemented feature.
+ */
+ if (val & BIT(tc.bit))
+ return false;
+
+ return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
+}
+
+bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
{
+ enum vcpu_sysreg fgtreg;
union trap_config tc;
enum trap_behaviour b;
bool is_read;
u32 sysreg;
- u64 esr, val;
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return false;
+ u64 esr;
esr = kvm_vcpu_get_esr(vcpu);
sysreg = esr_sys64_to_sysreg(esr);
@@ -1926,39 +2469,54 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
* A value of 0 for the whole entry means that we know nothing
* for this sysreg, and that it cannot be re-injected into the
* nested hypervisor. In this situation, let's cut it short.
- *
- * Note that ultimately, we could also make use of the xarray
- * to store the index of the sysreg in the local descriptor
- * array, avoiding another search... Hint, hint...
*/
if (!tc.val)
- return false;
+ goto local;
+
+ /*
+ * If a sysreg can be trapped using a FGT, first check whether we
+ * trap for the purpose of forbidding the feature. In that case,
+ * inject an UNDEF.
+ */
+ if (tc.fgt != __NO_FGT_GROUP__ &&
+ (vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) {
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ /*
+ * If we're not nesting, immediately return to the caller, with the
+ * sysreg index, should we have it.
+ */
+ if (!vcpu_has_nv(vcpu))
+ goto local;
+
+ /*
+ * There are a few traps that take effect InHost, but are constrained
+ * to EL0. Don't bother with computing the trap behaviour if the vCPU
+ * isn't in EL0.
+ */
+ if (is_hyp_ctxt(vcpu) && !vcpu_is_host_el0(vcpu))
+ goto local;
switch ((enum fgt_group_id)tc.fgt) {
case __NO_FGT_GROUP__:
break;
- case HFGxTR_GROUP:
- if (is_read)
- val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
- else
- val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
+ case HFGRTR_GROUP:
+ fgtreg = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
break;
case HDFGRTR_GROUP:
- case HDFGWTR_GROUP:
- if (is_read)
- val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
- else
- val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
+ fgtreg = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
break;
case HAFGRTR_GROUP:
- val = sanitised_sys_reg(vcpu, HAFGRTR_EL2);
+ fgtreg = HAFGRTR_EL2;
break;
case HFGITR_GROUP:
- val = sanitised_sys_reg(vcpu, HFGITR_EL2);
+ fgtreg = HFGITR_EL2;
switch (tc.fgf) {
u64 tmp;
@@ -1966,27 +2524,63 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
break;
case HCRX_FGTnXS:
- tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
+ tmp = __vcpu_sys_reg(vcpu, HCRX_EL2);
if (tmp & HCRX_EL2_FGTnXS)
tc.fgt = __NO_FGT_GROUP__;
}
break;
- case __NR_FGT_GROUP_IDS__:
+ case HFGRTR2_GROUP:
+ fgtreg = is_read ? HFGRTR2_EL2 : HFGWTR2_EL2;
+ break;
+
+ case HDFGRTR2_GROUP:
+ fgtreg = is_read ? HDFGRTR2_EL2 : HDFGWTR2_EL2;
+ break;
+
+ case HFGITR2_GROUP:
+ fgtreg = HFGITR2_EL2;
+ break;
+
+ default:
/* Something is really wrong, bail out */
- WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
- return false;
+ WARN_ONCE(1, "Bad FGT group (encoding %08x, config %016llx)\n",
+ sysreg, tc.val);
+ goto local;
}
- if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, fgtreg, tc))
goto inject;
b = compute_trap_behaviour(vcpu, tc);
+ if (!(b & BEHAVE_FORWARD_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
+ goto local;
+
if (((b & BEHAVE_FORWARD_READ) && is_read) ||
((b & BEHAVE_FORWARD_WRITE) && !is_read))
goto inject;
+local:
+ if (!tc.sri) {
+ struct sys_reg_params params;
+
+ params = esr_sys64_to_params(esr);
+
+ /*
+ * Check for the IMPDEF range, as per DDI0487 J.a,
+ * D18.3.2 Reserved encodings for IMPLEMENTATION
+ * DEFINED registers.
+ */
+ if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011))
+ print_sys_reg_msg(&params,
+ "Unsupported guest access at: %lx\n",
+ *vcpu_pc(vcpu));
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ *sr_index = tc.sri - 1;
return false;
inject:
@@ -1996,6 +2590,41 @@ inject:
return true;
}
+static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit;
+ if (!is_hyp_ctxt(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+static bool forward_hcr_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ return __forward_traps(vcpu, HCR_EL2, control_bit);
+}
+
+bool forward_smc_trap(struct kvm_vcpu *vcpu)
+{
+ return forward_hcr_traps(vcpu, HCR_TSC);
+}
+
+static bool forward_mdcr_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ return __forward_traps(vcpu, MDCR_EL2, control_bit);
+}
+
+bool forward_debug_exception(struct kvm_vcpu *vcpu)
+{
+ return forward_mdcr_traps(vcpu, MDCR_EL2_TDE);
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
@@ -2031,49 +2660,50 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
{
- u64 spsr, elr, mode;
- bool direct_eret;
+ u64 spsr, elr, esr;
- /*
- * Going through the whole put/load motions is a waste of time
- * if this is a VHE guest hypervisor returning to its own
- * userspace, or the hypervisor performing a local exception
- * return. No need to save/restore registers, no need to
- * switch S2 MMU. Just do the canonical ERET.
- */
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
- mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
-
- direct_eret = (mode == PSR_MODE_EL0t &&
- vcpu_el2_e2h_is_set(vcpu) &&
- vcpu_el2_tge_is_set(vcpu));
- direct_eret |= (mode == PSR_MODE_EL2h || mode == PSR_MODE_EL2t);
-
- if (direct_eret) {
- *vcpu_pc(vcpu) = vcpu_read_sys_reg(vcpu, ELR_EL2);
- *vcpu_cpsr(vcpu) = spsr;
- trace_kvm_nested_eret(vcpu, *vcpu_pc(vcpu), spsr);
- return;
+ /* Check for an ERETAx */
+ esr = kvm_vcpu_get_esr(vcpu);
+ if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
+ /*
+ * Oh no, ERETAx failed to authenticate.
+ *
+ * If we have FPACCOMBINE and we don't have a pending
+ * Illegal Execution State exception (which has priority
+ * over FPAC), deliver an exception right away.
+ *
+ * Otherwise, let the mangled ELR value trickle down the
+ * ERET handling, and the guest will have a little surprise.
+ */
+ if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
+ esr &= ESR_ELx_ERET_ISS_ERETA;
+ esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
+ kvm_inject_nested_sync(vcpu, esr);
+ return;
+ }
}
preempt_disable();
+ vcpu_set_flag(vcpu, IN_NESTED_ERET);
kvm_arch_vcpu_put(vcpu);
- elr = __vcpu_sys_reg(vcpu, ELR_EL2);
+ if (!esr_iss_is_eretax(esr))
+ elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
- /*
- * Note that the current exception level is always the virtual EL2,
- * since we set HCR_EL2.NV bit only when entering the virtual EL2.
- */
*vcpu_pc(vcpu) = elr;
*vcpu_cpsr(vcpu) = spsr;
kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ vcpu_clear_flag(vcpu, IN_NESTED_ERET);
preempt_enable();
+
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_nested_transition(vcpu);
}
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2156,6 +2786,9 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_nested_transition(vcpu);
+
return 1;
}