diff options
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 65 | ||||
-rw-r--r-- | arch/arm64/kvm/emulate-nested.c | 1852 | ||||
-rw-r--r-- | arch/arm64/kvm/guest.c | 15 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 29 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 127 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/mm.c | 83 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/setup.c | 27 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/switch.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/tlb.c | 30 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/pgtable.c | 63 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/tlb.c | 28 | ||||
-rw-r--r-- | arch/arm64/kvm/mmu.c | 104 | ||||
-rw-r--r-- | arch/arm64/kvm/nested.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 37 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu.c | 18 | ||||
-rw-r--r-- | arch/arm64/kvm/reset.c | 25 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 15 | ||||
-rw-r--r-- | arch/arm64/kvm/trace_arm.h | 26 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic.h | 2 |
22 files changed, 2370 insertions, 203 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index f531da6b362e..83c1e09be42e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -25,7 +25,6 @@ menuconfig KVM select MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT - select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_XFER_TO_GUEST_WORK @@ -43,6 +42,7 @@ menuconfig KVM select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS select INTERVAL_TREE + select XARRAY_MULTI help Support hosting virtualized guest machines. diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d1cb298a58a0..4866b3f7b4ea 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -36,6 +36,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/kvm_nested.h> #include <asm/kvm_pkvm.h> #include <asm/kvm_emulate.h> #include <asm/sections.h> @@ -365,7 +366,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) #endif /* Force users to call KVM_ARM_VCPU_INIT */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; @@ -462,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_ptrauth_disable(vcpu); kvm_arch_vcpu_load_debug_state_flags(vcpu); - if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus)) + if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); } @@ -574,7 +575,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { - return vcpu->arch.target >= 0; + return vcpu_get_flag(vcpu, VCPU_INITIALIZED); } /* @@ -803,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) kvm_pmu_handle_pmcr(vcpu, __vcpu_sys_reg(vcpu, PMCR_EL0)); + if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) + kvm_vcpu_pmu_restore_guest(vcpu); + if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) return kvm_vcpu_suspend(vcpu); @@ -818,6 +822,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) if (likely(!vcpu_mode_is_32bit(vcpu))) return false; + if (vcpu_has_nv(vcpu)) + return true; + return !kvm_supports_32bit_el0(); } @@ -1058,7 +1065,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * invalid. The VMM can try and fix it by issuing a * KVM_ARM_VCPU_INIT if it really wants to. */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); ret = ARM_EXCEPTION_IL; } @@ -1219,8 +1226,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, { unsigned long features = init->features[0]; - return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) || - vcpu->arch.target != init->target; + return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); } static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, @@ -1236,20 +1242,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES)) goto out_unlock; - vcpu->arch.target = init->target; bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); /* Now we know what it is, we can reset it. */ ret = kvm_reset_vcpu(vcpu); if (ret) { - vcpu->arch.target = -1; bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); goto out_unlock; } bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); - + vcpu_set_flag(vcpu, VCPU_INITIALIZED); out_unlock: mutex_unlock(&kvm->arch.config_lock); return ret; @@ -1260,14 +1264,15 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, { int ret; - if (init->target != kvm_target_cpu()) + if (init->target != KVM_ARM_TARGET_GENERIC_V8 && + init->target != kvm_target_cpu()) return -EINVAL; ret = kvm_vcpu_init_check_features(vcpu, init); if (ret) return ret; - if (vcpu->arch.target == -1) + if (!kvm_vcpu_initialized(vcpu)) return __kvm_vcpu_set_target(vcpu, init); if (kvm_vcpu_init_changed(vcpu, init)) @@ -1532,12 +1537,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - kvm_flush_remote_tlbs(kvm); -} - static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { @@ -1595,9 +1594,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } case KVM_ARM_PREFERRED_TARGET: { - struct kvm_vcpu_init init; - - kvm_vcpu_preferred_target(&init); + struct kvm_vcpu_init init = { + .target = KVM_ARM_TARGET_GENERIC_V8, + }; if (copy_to_user(argp, &init, sizeof(init))) return -EFAULT; @@ -2276,30 +2275,8 @@ static int __init init_hyp_mode(void) for_each_possible_cpu(cpu) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - unsigned long hyp_addr; - /* - * Allocate a contiguous HYP private VA range for the stack - * and guard page. The allocation is also aligned based on - * the order of its size. - */ - err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); - if (err) { - kvm_err("Cannot allocate hyp stack guard page\n"); - goto out_err; - } - - /* - * Since the stack grows downwards, map the stack to the page - * at the higher address and leave the lower guard page - * unbacked. - * - * Any valid stack address now has the PAGE_SHIFT bit as 1 - * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. - */ - err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE, - __pa(stack_page), PAGE_HYP); + err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_err; @@ -2312,8 +2289,6 @@ static int __init init_hyp_mode(void) * has been mapped in the flexible private VA space. */ params->stack_pa = __pa(stack_page); - - params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } for_each_possible_cpu(cpu) { diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index b96662029fb1..9ced1bf0c2b7 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -14,6 +14,1858 @@ #include "trace.h" +enum trap_behaviour { + BEHAVE_HANDLE_LOCALLY = 0, + BEHAVE_FORWARD_READ = BIT(0), + BEHAVE_FORWARD_WRITE = BIT(1), + BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE, +}; + +struct trap_bits { + const enum vcpu_sysreg index; + const enum trap_behaviour behaviour; + const u64 value; + const u64 mask; +}; + +/* Coarse Grained Trap definitions */ +enum cgt_group_id { + /* Indicates no coarse trap control */ + __RESERVED__, + + /* + * The first batch of IDs denote coarse trapping that are used + * on their own instead of being part of a combination of + * trap controls. + */ + CGT_HCR_TID1, + CGT_HCR_TID2, + CGT_HCR_TID3, + CGT_HCR_IMO, + CGT_HCR_FMO, + CGT_HCR_TIDCP, + CGT_HCR_TACR, + CGT_HCR_TSW, + CGT_HCR_TPC, + CGT_HCR_TPU, + CGT_HCR_TTLB, + CGT_HCR_TVM, + CGT_HCR_TDZ, + CGT_HCR_TRVM, + CGT_HCR_TLOR, + CGT_HCR_TERR, + CGT_HCR_APK, + CGT_HCR_NV, + CGT_HCR_NV_nNV2, + CGT_HCR_NV1_nNV2, + CGT_HCR_AT, + CGT_HCR_nFIEN, + CGT_HCR_TID4, + CGT_HCR_TICAB, + CGT_HCR_TOCU, + CGT_HCR_ENSCXT, + CGT_HCR_TTLBIS, + CGT_HCR_TTLBOS, + + CGT_MDCR_TPMCR, + CGT_MDCR_TPM, + CGT_MDCR_TDE, + CGT_MDCR_TDA, + CGT_MDCR_TDOSA, + CGT_MDCR_TDRA, + CGT_MDCR_E2PB, + CGT_MDCR_TPMS, + CGT_MDCR_TTRF, + CGT_MDCR_E2TB, + CGT_MDCR_TDCC, + + /* + * Anything after this point is a combination of coarse trap + * controls, which must all be evaluated to decide what to do. + */ + __MULTIPLE_CONTROL_BITS__, + CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__, + CGT_HCR_TID2_TID4, + CGT_HCR_TTLB_TTLBIS, + CGT_HCR_TTLB_TTLBOS, + CGT_HCR_TVM_TRVM, + CGT_HCR_TPU_TICAB, + CGT_HCR_TPU_TOCU, + CGT_HCR_NV1_nNV2_ENSCXT, + CGT_MDCR_TPM_TPMCR, + CGT_MDCR_TDE_TDA, + CGT_MDCR_TDE_TDOSA, + CGT_MDCR_TDE_TDRA, + CGT_MDCR_TDCC_TDE_TDA, + + /* + * Anything after this point requires a callback evaluating a + * complex trap condition. Ugly stuff. + */ + __COMPLEX_CONDITIONS__, + CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__, + CGT_CNTHCTL_EL1PTEN, + + /* Must be last */ + __NR_CGT_GROUP_IDS__ +}; + +static const struct trap_bits coarse_trap_bits[] = { + [CGT_HCR_TID1] = { + .index = HCR_EL2, + .value = HCR_TID1, + .mask = HCR_TID1, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_TID2] = { + .index = HCR_EL2, + .value = HCR_TID2, + .mask = HCR_TID2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TID3] = { + .index = HCR_EL2, + .value = HCR_TID3, + .mask = HCR_TID3, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_IMO] = { + .index = HCR_EL2, + .value = HCR_IMO, + .mask = HCR_IMO, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_FMO] = { + .index = HCR_EL2, + .value = HCR_FMO, + .mask = HCR_FMO, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_TIDCP] = { + .index = HCR_EL2, + .value = HCR_TIDCP, + .mask = HCR_TIDCP, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TACR] = { + .index = HCR_EL2, + .value = HCR_TACR, + .mask = HCR_TACR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TSW] = { + .index = HCR_EL2, + .value = HCR_TSW, + .mask = HCR_TSW, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */ + .index = HCR_EL2, + .value = HCR_TPC, + .mask = HCR_TPC, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TPU] = { + .index = HCR_EL2, + .value = HCR_TPU, + .mask = HCR_TPU, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLB] = { + .index = HCR_EL2, + .value = HCR_TTLB, + .mask = HCR_TTLB, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TVM] = { + .index = HCR_EL2, + .value = HCR_TVM, + .mask = HCR_TVM, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_TDZ] = { + .index = HCR_EL2, + .value = HCR_TDZ, + .mask = HCR_TDZ, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TRVM] = { + .index = HCR_EL2, + .value = HCR_TRVM, + .mask = HCR_TRVM, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_TLOR] = { + .index = HCR_EL2, + .value = HCR_TLOR, + .mask = HCR_TLOR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TERR] = { + .index = HCR_EL2, + .value = HCR_TERR, + .mask = HCR_TERR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_APK] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_APK, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV] = { + .index = HCR_EL2, + .value = HCR_NV, + .mask = HCR_NV, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV_nNV2] = { + .index = HCR_EL2, + .value = HCR_NV, + .mask = HCR_NV | HCR_NV2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV1_nNV2] = { + .index = HCR_EL2, + .value = HCR_NV | HCR_NV1, + .mask = HCR_NV | HCR_NV1 | HCR_NV2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_AT] = { + .index = HCR_EL2, + .value = HCR_AT, + .mask = HCR_AT, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_nFIEN] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_FIEN, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TID4] = { + .index = HCR_EL2, + .value = HCR_TID4, + .mask = HCR_TID4, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TICAB] = { + .index = HCR_EL2, + .value = HCR_TICAB, + .mask = HCR_TICAB, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TOCU] = { + .index = HCR_EL2, + .value = HCR_TOCU, + .mask = HCR_TOCU, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_ENSCXT] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_ENSCXT, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLBIS] = { + .index = HCR_EL2, + .value = HCR_TTLBIS, + .mask = HCR_TTLBIS, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLBOS] = { + .index = HCR_EL2, + .value = HCR_TTLBOS, + .mask = HCR_TTLBOS, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TPMCR] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPMCR, + .mask = MDCR_EL2_TPMCR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TPM] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPM, + .mask = MDCR_EL2_TPM, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDE] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDE, + .mask = MDCR_EL2_TDE, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDA, + .mask = MDCR_EL2_TDA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDOSA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDOSA, + .mask = MDCR_EL2_TDOSA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDRA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDRA, + .mask = MDCR_EL2_TDRA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_E2PB] = { + .index = MDCR_EL2, + .value = 0, + .mask = BIT(MDCR_EL2_E2PB_SHIFT), + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TPMS] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPMS, + .mask = MDCR_EL2_TPMS, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TTRF] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TTRF, + .mask = MDCR_EL2_TTRF, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_E2TB] = { + .index = MDCR_EL2, + .value = 0, + .mask = BIT(MDCR_EL2_E2TB_SHIFT), + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDCC] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDCC, + .mask = MDCR_EL2_TDCC, + .behaviour = BEHAVE_FORWARD_ANY, + }, +}; + +#define MCB(id, ...) \ + [id - __MULTIPLE_CONTROL_BITS__] = \ + (const enum cgt_group_id[]){ \ + __VA_ARGS__, __RESERVED__ \ + } + +static const enum cgt_group_id *coarse_control_combo[] = { + MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO), + MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4), + MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS), + MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS), + MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM), + MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB), + MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU), + MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT), + MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR), + MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA), + MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA), + MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA), + MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA), +}; + +typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); + +/* + * Warning, maximum confusion ahead. + * + * When E2H=0, CNTHCTL_EL2[1:0] are defined as EL1PCEN:EL1PCTEN + * When E2H=1, CNTHCTL_EL2[11:10] are defined as EL1PTEN:EL1PCTEN + * + * Note the single letter difference? Yet, the bits have the same + * function despite a different layout and a different name. + * + * We don't try to reconcile this mess. We just use the E2H=0 bits + * to generate something that is in the E2H=1 format, and live with + * it. You're welcome. + */ +static u64 get_sanitized_cnthctl(struct kvm_vcpu *vcpu) +{ + u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); + + if (!vcpu_el2_e2h_is_set(vcpu)) + val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10; + + return val & ((CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN) << 10); +} + +static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu) +{ + if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_ANY; +} + +static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu) +{ + if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_ANY; +} + +#define CCC(id, fn) \ + [id - __COMPLEX_CONDITIONS__] = fn + +static const complex_condition_check ccc[] = { + CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten), + CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten), +}; + +/* + * Bit assignment for the trap controls. We use a 64bit word with the + * following layout for each trapped sysreg: + * + * [9:0] enum cgt_group_id (10 bits) + * [13:10] enum fgt_group_id (4 bits) + * [19:14] bit number in the FGT register (6 bits) + * [20] trap polarity (1 bit) + * [25:21] FG filter (5 bits) + * [62:26] Unused (37 bits) + * [63] RES0 - Must be zero, as lost on insertion in the xarray + */ +#define TC_CGT_BITS 10 +#define TC_FGT_BITS 4 +#define TC_FGF_BITS 5 + +union trap_config { + u64 val; + struct { + unsigned long cgt:TC_CGT_BITS; /* Coarse Grained Trap id */ + unsigned long fgt:TC_FGT_BITS; /* Fine Grained Trap id */ + unsigned long bit:6; /* Bit number */ + unsigned long pol:1; /* Polarity */ + unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */ + unsigned long unused:37; /* Unused, should be zero */ + unsigned long mbz:1; /* Must Be Zero */ + }; +}; + +struct encoding_to_trap_config { + const u32 encoding; + const u32 end; + const union trap_config tc; + const unsigned int line; +}; + +#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \ + { \ + .encoding = sr_start, \ + .end = sr_end, \ + .tc = { \ + .cgt = trap_id, \ + }, \ + .line = __LINE__, \ + } + +#define SR_TRAP(sr, trap_id) SR_RANGE_TRAP(sr, sr, trap_id) + +/* + * Map encoding to trap bits for exception reported with EC=0x18. + * These must only be evaluated when running a nested hypervisor, but + * that the current context is not a hypervisor context. When the + * trapped access matches one of the trap controls, the exception is + * re-injected in the nested hypervisor. + */ +static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { + SR_TRAP(SYS_REVIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_AIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_SMIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_CTR_EL0, CGT_HCR_TID2), + SR_TRAP(SYS_CCSIDR_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CCSIDR2_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CLIDR_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4), + SR_RANGE_TRAP(SYS_ID_PFR0_EL1, + sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3), + SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO), + SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO), + SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO), + SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0), + sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0), + sys_reg(3, 1, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 2, 11, 0, 0), + sys_reg(3, 2, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 3, 11, 0, 0), + sys_reg(3, 3, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 4, 11, 0, 0), + sys_reg(3, 4, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 5, 11, 0, 0), + sys_reg(3, 5, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 6, 11, 0, 0), + sys_reg(3, 6, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 7, 11, 0, 0), + sys_reg(3, 7, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 0, 15, 0, 0), + sys_reg(3, 0, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 1, 15, 0, 0), + sys_reg(3, 1, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 2, 15, 0, 0), + sys_reg(3, 2, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 3, 15, 0, 0), + sys_reg(3, 3, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 4, 15, 0, 0), + sys_reg(3, 4, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 5, 15, 0, 0), + sys_reg(3, 5, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 6, 15, 0, 0), + sys_reg(3, 6, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 7, 15, 0, 0), + sys_reg(3, 7, 15, 15, 7), CGT_HCR_TIDCP), + SR_TRAP(SYS_ACTLR_EL1, CGT_HCR_TACR), + SR_TRAP(SYS_DC_ISW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CISW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_IGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_IGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVADP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CIGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CIGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVADP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVADP, CGT_HCR_TPC), + SR_TRAP(SYS_IC_IVAU, CGT_HCR_TPU_TOCU), + SR_TRAP(SYS_IC_IALLU, CGT_HCR_TPU_TOCU), + SR_TRAP(SYS_IC_IALLUIS, CGT_HCR_TPU_TICAB), + SR_TRAP(SYS_DC_CVAU, CGT_HCR_TPU_TOCU), + SR_TRAP(OP_TLBI_RVAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VMALLE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_ASIDE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VMALLE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_ASIDE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_ASIDE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_ASIDE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_ASIDE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VMALLE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_ASIDE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_ESR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_FAR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AFSR0_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AFSR1_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ), + SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ), + SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ), + SR_TRAP(SYS_LORSA_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LOREA_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORN_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORC_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORID_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_ERRIDR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERRSELR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXADDR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXCTLR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXFR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC0_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC1_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC2_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC3_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXSTATUS_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_APIAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIAKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIBKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIBKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDAKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDBKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDBKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APGAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APGAKEYHI_EL1, CGT_HCR_APK), + /* All _EL2 registers */ + SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0), + sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV), + /* Skip the SP_EL1 encoding... */ + SR_TRAP(SYS_SPSR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ELR_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1), + sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV), + SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0), + sys_reg(3, 4, 14, 15, 7), CGT_HCR_NV), + /* All _EL02, _EL12 registers */ + SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0), + sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV), + SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0), + sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV), + SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV), + SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1ISNXS,CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1OSNXS,CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_CPP_RCTX, CGT_HCR_NV), + SR_TRAP(OP_DVP_RCTX, CGT_HCR_NV), + SR_TRAP(OP_CFP_RCTX, CGT_HCR_NV), + SR_TRAP(SYS_SP_EL1, CGT_HCR_NV_nNV2), + SR_TRAP(SYS_VBAR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_ELR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_SPSR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_SCXTNUM_EL1, CGT_HCR_NV1_nNV2_ENSCXT), + SR_TRAP(SYS_SCXTNUM_EL0, CGT_HCR_ENSCXT), + SR_TRAP(OP_AT_S1E1R, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1W, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E0R, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT), + SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_PMCR_EL0, CGT_MDCR_TPM_TPMCR), + SR_TRAP(SYS_PMCNTENSET_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCNTENCLR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMOVSSET_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM), + SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_OSDTRRX_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_OSDTRTX_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_DBGDTR_EL0, CGT_MDCR_TDCC_TDE_TDA), + /* + * Also covers DBGDTRRX_EL0, which has the same encoding as + * SYS_DBGDTRTX_EL0... + */ + SR_TRAP(SYS_DBGDTRTX_EL0, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_MDSCR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_OSECCR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_OSLAR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_OSLSR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_OSDLR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_DBGPRCR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_MDRAR_EL1, CGT_MDCR_TDE_TDRA), + SR_TRAP(SYS_PMBLIMITR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMBPTR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMBSR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMSCR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSEVFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSFCR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSICR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSIDR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSIRR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSLATFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSNEVFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_TRFCR_EL1, CGT_MDCR_TTRF), + SR_TRAP(SYS_TRBBASER_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBLIMITR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBMAR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN), + SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN), +}; + +static DEFINE_XARRAY(sr_forward_xa); + +enum fgt_group_id { + __NO_FGT_GROUP__, + HFGxTR_GROUP, + HDFGRTR_GROUP, + HDFGWTR_GROUP, + HFGITR_GROUP, + + /* Must be last */ + __NR_FGT_GROUP_IDS__ +}; + +enum fg_filter_id { + __NO_FGF__, + HCRX_FGTnXS, + + /* Must be last */ + __NR_FG_FILTER_IDS__ +}; + +#define SR_FGF(sr, g, b, p, f) \ + { \ + .encoding = sr, \ + .end = sr, \ + .tc = { \ + .fgt = g ## _GROUP, \ + .bit = g ## _EL2_ ## b ## _SHIFT, \ + .pol = p, \ + .fgf = f, \ + }, \ + .line = __LINE__, \ + } + +#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__) + +static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { + /* HFGRTR_EL2, HFGWTR_EL2 */ + SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0), + SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0), + SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0), + SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1), + SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1), + SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1), + SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1), + SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1), + SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1), + SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1), + SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1), + SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1), + SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1), + SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1), + SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1), + SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1), + SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1), + SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1), + SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1), + SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1), + SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1), + SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1), + SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1), + SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1), + SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1), + SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1), + SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1), + SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1), + SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1), + SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1), + SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1), + SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1), + SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1), + SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1), + SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1), + SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1), + SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1), + SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1), + SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1), + SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1), + SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1), + SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1), + SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1), + SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1), + SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1), + SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1), + SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1), + SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1), + SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1), + SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1), + SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1), + SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1), + SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1), + SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1), + SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1), + /* HFGITR_EL2 */ + SR_FGT(OP_BRB_IALL, HFGITR, nBRBIALL, 0), + SR_FGT(OP_BRB_INJ, HFGITR, nBRBINJ, 0), + SR_FGT(SYS_DC_CVAC, HFGITR, DCCVAC, 1), + SR_FGT(SYS_DC_CGVAC, HFGITR, DCCVAC, 1), + SR_FGT(SYS_DC_CGDVAC, HFGITR, DCCVAC, 1), + SR_FGT(OP_CPP_RCTX, HFGITR, CPPRCTX, 1), + SR_FGT(OP_DVP_RCTX, HFGITR, DVPRCTX, 1), + SR_FGT(OP_CFP_RCTX, HFGITR, CFPRCTX, 1), + SR_FGT(OP_TLBI_VAALE1, HFGITR, TLBIVAALE1, 1), + SR_FGT(OP_TLBI_VALE1, HFGITR, TLBIVALE1, 1), + SR_FGT(OP_TLBI_VAAE1, HFGITR, TLBIVAAE1, 1), + SR_FGT(OP_TLBI_ASIDE1, HFGITR, TLBIASIDE1, 1), + SR_FGT(OP_TLBI_VAE1, HFGITR, TLBIVAE1, 1), + SR_FGT(OP_TLBI_VMALLE1, HFGITR, TLBIVMALLE1, 1), + SR_FGT(OP_TLBI_RVAALE1, HFGITR, TLBIRVAALE1, 1), + SR_FGT(OP_TLBI_RVALE1, HFGITR, TLBIRVALE1, 1), + SR_FGT(OP_TLBI_RVAAE1, HFGITR, TLBIRVAAE1, 1), + SR_FGT(OP_TLBI_RVAE1, HFGITR, TLBIRVAE1, 1), + SR_FGT(OP_TLBI_RVAALE1IS, HFGITR, TLBIRVAALE1IS, 1), + SR_FGT(OP_TLBI_RVALE1IS, HFGITR, TLBIRVALE1IS, 1), + SR_FGT(OP_TLBI_RVAAE1IS, HFGITR, TLBIRVAAE1IS, 1), + SR_FGT(OP_TLBI_RVAE1IS, HFGITR, TLBIRVAE1IS, 1), + SR_FGT(OP_TLBI_VAALE1IS, HFGITR, TLBIVAALE1IS, 1), + SR_FGT(OP_TLBI_VALE1IS, HFGITR, TLBIVALE1IS, 1), + SR_FGT(OP_TLBI_VAAE1IS, HFGITR, TLBIVAAE1IS, 1), + SR_FGT(OP_TLBI_ASIDE1IS, HFGITR, TLBIASIDE1IS, 1), + SR_FGT(OP_TLBI_VAE1IS, HFGITR, TLBIVAE1IS, 1), + SR_FGT(OP_TLBI_VMALLE1IS, HFGITR, TLBIVMALLE1IS, 1), + SR_FGT(OP_TLBI_RVAALE1OS, HFGITR, TLBIRVAALE1OS, 1), + SR_FGT(OP_TLBI_RVALE1OS, HFGITR, TLBIRVALE1OS, 1), + SR_FGT(OP_TLBI_RVAAE1OS, HFGITR, TLBIRVAAE1OS, 1), + SR_FGT(OP_TLBI_RVAE1OS, HFGITR, TLBIRVAE1OS, 1), + SR_FGT(OP_TLBI_VAALE1OS, HFGITR, TLBIVAALE1OS, 1), + SR_FGT(OP_TLBI_VALE1OS, HFGITR, TLBIVALE1OS, 1), + SR_FGT(OP_TLBI_VAAE1OS, HFGITR, TLBIVAAE1OS, 1), + SR_FGT(OP_TLBI_ASIDE1OS, HFGITR, TLBIASIDE1OS, 1), + SR_FGT(OP_TLBI_VAE1OS, HFGITR, TLBIVAE1OS, 1), + SR_FGT(OP_TLBI_VMALLE1OS, HFGITR, TLBIVMALLE1OS, 1), + /* nXS variants must be checked against HCRX_EL2.FGTnXS */ + SR_FGF(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1, HCRX_FGTnXS), + SR_FGT(OP_AT_S1E1WP, HFGITR, ATS1E1WP, 1), + SR_FGT(OP_AT_S1E1RP, HFGITR, ATS1E1RP, 1), + SR_FGT(OP_AT_S1E0W, HFGITR, ATS1E0W, 1), + SR_FGT(OP_AT_S1E0R, HFGITR, ATS1E0R, 1), + SR_FGT(OP_AT_S1E1W, HFGITR, ATS1E1W, 1), + SR_FGT(OP_AT_S1E1R, HFGITR, ATS1E1R, 1), + SR_FGT(SYS_DC_ZVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_GVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_GZVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_CIVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CIGVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CIGDVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CGVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CGDVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CGVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CGDVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CVAU, HFGITR, DCCVAU, 1), + SR_FGT(SYS_DC_CISW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CIGSW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CIGDSW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_CGSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_CGDSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_ISW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IGSW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IGDSW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_DC_IGVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_DC_IGDVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1), + SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1), + SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1), + /* HDFGRTR_EL2 */ + SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1), + SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0), + SR_FGT(SYS_BRBINF_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINFINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRCINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGTINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTS_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBCR_EL1, HDFGRTR, nBRBCTL, 0), + SR_FGT(SYS_BRBFCR_EL1, HDFGRTR, nBRBCTL, 0), + SR_FGT(SYS_BRBIDR0_EL1, HDFGRTR, nBRBIDR, 0), + SR_FGT(SYS_PMCEID0_EL0, HDFGRTR, PMCEIDn_EL0, 1), + SR_FGT(SYS_PMCEID1_EL0, HDFGRTR, PMCEIDn_EL0, 1), + SR_FGT(SYS_PMUSERENR_EL0, HDFGRTR, PMUSERENR_EL0, 1), + SR_FGT(SYS_TRBTRG_EL1, HDFGRTR, TRBTRG_EL1, 1), + SR_FGT(SYS_TRBSR_EL1, HDFGRTR, TRBSR_EL1, 1), + SR_FGT(SYS_TRBPTR_EL1, HDFGRTR, TRBPTR_EL1, 1), + SR_FGT(SYS_TRBMAR_EL1, HDFGRTR, TRBMAR_EL1, 1), + SR_FGT(SYS_TRBLIMITR_EL1, HDFGRTR, TRBLIMITR_EL1, 1), + SR_FGT(SYS_TRBIDR_EL1, HDFGRTR, TRBIDR_EL1, 1), + SR_FGT(SYS_TRBBASER_EL1, HDFGRTR, TRBBASER_EL1, 1), + SR_FGT(SYS_TRCVICTLR, HDFGRTR, TRCVICTLR, 1), + SR_FGT(SYS_TRCSTATR, HDFGRTR, TRCSTATR, 1), + SR_FGT(SYS_TRCSSCSR(0), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(1), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(2), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(3), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(4), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(5), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(6), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(7), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSEQSTR, HDFGRTR, TRCSEQSTR, 1), + SR_FGT(SYS_TRCPRGCTLR, HDFGRTR, TRCPRGCTLR, 1), + SR_FGT(SYS_TRCOSLSR, HDFGRTR, TRCOSLSR, 1), + SR_FGT(SYS_TRCIMSPEC(0), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(1), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(2), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(3), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(4), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(5), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(6), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(7), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCDEVARCH, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCDEVID, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR0, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR1, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR2, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR3, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR4, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR5, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR6, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR7, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR8, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR9, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR10, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR11, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR12, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR13, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCCNTVR(0), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(1), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(2), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(3), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCLAIMCLR, HDFGRTR, TRCCLAIM, 1), + SR_FGT(SYS_TRCCLAIMSET, HDFGRTR, TRCCLAIM, 1), + SR_FGT(SYS_TRCAUXCTLR, HDFGRTR, TRCAUXCTLR, 1), + SR_FGT(SYS_TRCAUTHSTATUS, HDFGRTR, TRCAUTHSTATUS, 1), + SR_FGT(SYS_TRCACATR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCBBCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCCCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCCTLR0, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCCTLR1, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCONFIGR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEVENTCTL0R, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEVENTCTL1R, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCQCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(16), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(17), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(18), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(19), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(20), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(21), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(22), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(23), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(24), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(25), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(26), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(27), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(28), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(29), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(30), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(31), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQRSTEVR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSTALLCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSYNCPR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCTRACEIDR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCTSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVIIECTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVIPCSSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVISSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCCTLR0, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCCTLR1, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_PMSLATFR_EL1, HDFGRTR, PMSLATFR_EL1, 1), + SR_FGT(SYS_PMSIRR_EL1, HDFGRTR, PMSIRR_EL1, 1), + SR_FGT(SYS_PMSIDR_EL1, HDFGRTR, PMSIDR_EL1, 1), + SR_FGT(SYS_PMSICR_EL1, HDFGRTR, PMSICR_EL1, 1), + SR_FGT(SYS_PMSFCR_EL1, HDFGRTR, PMSFCR_EL1, 1), + SR_FGT(SYS_PMSEVFR_EL1, HDFGRTR, PMSEVFR_EL1, 1), + SR_FGT(SYS_PMSCR_EL1, HDFGRTR, PMSCR_EL1, 1), + SR_FGT(SYS_PMBSR_EL1, HDFGRTR, PMBSR_EL1, 1), + SR_FGT(SYS_PMBPTR_EL1, HDFGRTR, PMBPTR_EL1, 1), + SR_FGT(SYS_PMBLIMITR_EL1, HDFGRTR, PMBLIMITR_EL1, 1), + SR_FGT(SYS_PMMIR_EL1, HDFGRTR, PMMIR_EL1, 1), + SR_FGT(SYS_PMSELR_EL0, HDFGRTR, PMSELR_EL0, 1), + SR_FGT(SYS_PMOVSCLR_EL0, HDFGRTR, PMOVS, 1), + SR_FGT(SYS_PMOVSSET_EL0, HDFGRTR, PMOVS, 1), + SR_FGT(SYS_PMINTENCLR_EL1, HDFGRTR, PMINTEN, 1), + SR_FGT(SYS_PMINTENSET_EL1, HDFGRTR, PMINTEN, 1), + SR_FGT(SYS_PMCNTENCLR_EL0, HDFGRTR, PMCNTEN, 1), + SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1), + SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1), + SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1), + SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1), + SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1), + SR_FGT(SYS_DBGPRCR_EL1, HDFGRTR, DBGPRCR_EL1, 1), + SR_FGT(SYS_DBGAUTHSTATUS_EL1, HDFGRTR, DBGAUTHSTATUS_EL1, 1), + SR_FGT(SYS_DBGCLAIMSET_EL1, HDFGRTR, DBGCLAIM, 1), + SR_FGT(SYS_DBGCLAIMCLR_EL1, HDFGRTR, DBGCLAIM, 1), + SR_FGT(SYS_MDSCR_EL1, HDFGRTR, MDSCR_EL1, 1), + /* + * The trap bits capture *64* debug registers per bit, but the + * ARM ARM only describes the encoding for the first 16, and + * we don't really support more than that anyway. + */ + SR_FGT(SYS_DBGWVRn_EL1(0), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(1), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(2), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(3), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(4), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(5), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(6), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(7), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(8), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(9), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(10), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(11), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(12), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(13), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(14), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(15), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(0), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(1), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(2), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(3), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(4), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(5), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(6), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(7), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(8), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(9), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(10), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(11), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(12), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(13), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(14), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(15), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(0), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(1), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(2), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(3), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(4), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(5), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(6), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(7), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(8), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(9), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(10), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(11), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(12), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(13), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(14), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(15), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(0), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(1), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(2), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(3), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(4), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(5), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(6), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(7), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(8), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(9), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(10), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(11), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(12), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1), + /* + * HDFGWTR_EL2 + * + * Although HDFGRTR_EL2 and HDFGWTR_EL2 registers largely + * overlap in their bit assignment, there are a number of bits + * that are RES0 on one side, and an actual trap bit on the + * other. The policy chosen here is to describe all the + * read-side mappings, and only the write-side mappings that + * differ from the read side, and the trap handler will pick + * the correct shadow register based on the access type. + */ + SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1), + SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1), + SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1), + SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1), + SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1), +}; + +static union trap_config get_trap_config(u32 sysreg) +{ + return (union trap_config) { + .val = xa_to_value(xa_load(&sr_forward_xa, sysreg)), + }; +} + +static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc, + const char *type, int err) +{ + kvm_err("%s line %d encoding range " + "(%d, %d, %d, %d, %d) - (%d, %d, %d, %d, %d) (err=%d)\n", + type, tc->line, + sys_reg_Op0(tc->encoding), sys_reg_Op1(tc->encoding), + sys_reg_CRn(tc->encoding), sys_reg_CRm(tc->encoding), + sys_reg_Op2(tc->encoding), + sys_reg_Op0(tc->end), sys_reg_Op1(tc->end), + sys_reg_CRn(tc->end), sys_reg_CRm(tc->end), + sys_reg_Op2(tc->end), + err); +} + +int __init populate_nv_trap_config(void) +{ + int ret = 0; + + BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *)); + BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS)); + BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS)); + BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS)); + + for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) { + const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i]; + void *prev; + + if (cgt->tc.val & BIT(63)) { + kvm_err("CGT[%d] has MBZ bit set\n", i); + ret = -EINVAL; + } + + if (cgt->encoding != cgt->end) { + prev = xa_store_range(&sr_forward_xa, + cgt->encoding, cgt->end, + xa_mk_value(cgt->tc.val), + GFP_KERNEL); + } else { + prev = xa_store(&sr_forward_xa, cgt->encoding, + xa_mk_value(cgt->tc.val), GFP_KERNEL); + if (prev && !xa_is_err(prev)) { + ret = -EINVAL; + print_nv_trap_error(cgt, "Duplicate CGT", ret); + } + } + + if (xa_is_err(prev)) { + ret = xa_err(prev); + print_nv_trap_error(cgt, "Failed CGT insertion", ret); + } + } + + kvm_info("nv: %ld coarse grained trap handlers\n", + ARRAY_SIZE(encoding_to_cgt)); + + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + goto check_mcb; + + for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) { + const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i]; + union trap_config tc; + + if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) { + ret = -EINVAL; + print_nv_trap_error(fgt, "Invalid FGT", ret); + } + + tc = get_trap_config(fgt->encoding); + + if (tc.fgt) { + ret = -EINVAL; + print_nv_trap_error(fgt, "Duplicate FGT", ret); + } + + tc.val |= fgt->tc.val; + xa_store(&sr_forward_xa, fgt->encoding, + xa_mk_value(tc.val), GFP_KERNEL); + } + + kvm_info("nv: %ld fine grained trap handlers\n", + ARRAY_SIZE(encoding_to_fgt)); + +check_mcb: + for (int id = __MULTIPLE_CONTROL_BITS__; id < __COMPLEX_CONDITIONS__; id++) { + const enum cgt_group_id *cgids; + + cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__]; + + for (int i = 0; cgids[i] != __RESERVED__; i++) { + if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) { + kvm_err("Recursive MCB %d/%d\n", id, cgids[i]); + ret = -EINVAL; + } + } + } + + if (ret) + xa_destroy(&sr_forward_xa); + + return ret; +} + +static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu, + const struct trap_bits *tb) +{ + enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY; + u64 val; + + val = __vcpu_sys_reg(vcpu, tb->index); + if ((val & tb->mask) == tb->value) + b |= tb->behaviour; + + return b; +} + +static enum trap_behaviour __compute_trap_behaviour(struct kvm_vcpu *vcpu, + const enum cgt_group_id id, + enum trap_behaviour b) +{ + switch (id) { + const enum cgt_group_id *cgids; + + case __RESERVED__ ... __MULTIPLE_CONTROL_BITS__ - 1: + if (likely(id != __RESERVED__)) + b |= get_behaviour(vcpu, &coarse_trap_bits[id]); + break; + case __MULTIPLE_CONTROL_BITS__ ... __COMPLEX_CONDITIONS__ - 1: + /* Yes, this is recursive. Don't do anything stupid. */ + cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__]; + for (int i = 0; cgids[i] != __RESERVED__; i++) + b |= __compute_trap_behaviour(vcpu, cgids[i], b); + break; + default: + if (ARRAY_SIZE(ccc)) + b |= ccc[id - __COMPLEX_CONDITIONS__](vcpu); + break; + } + + return b; +} + +static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu, + const union trap_config tc) +{ + enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY; + + return __compute_trap_behaviour(vcpu, tc.cgt, b); +} + +static bool check_fgt_bit(u64 val, const union trap_config tc) +{ + return ((val >> tc.bit) & 1) == tc.pol; +} + +#define sanitised_sys_reg(vcpu, reg) \ + ({ \ + u64 __val; \ + __val = __vcpu_sys_reg(vcpu, reg); \ + __val &= ~__ ## reg ## _RES0; \ + (__val); \ + }) + +bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) +{ + union trap_config tc; + enum trap_behaviour b; + bool is_read; + u32 sysreg; + u64 esr, val; + + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return false; + + esr = kvm_vcpu_get_esr(vcpu); + sysreg = esr_sys64_to_sysreg(esr); + is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; + + tc = get_trap_config(sysreg); + + /* + * A value of 0 for the whole entry means that we know nothing + * for this sysreg, and that it cannot be re-injected into the + * nested hypervisor. In this situation, let's cut it short. + * + * Note that ultimately, we could also make use of the xarray + * to store the index of the sysreg in the local descriptor + * array, avoiding another search... Hint, hint... + */ + if (!tc.val) + return false; + + switch ((enum fgt_group_id)tc.fgt) { + case __NO_FGT_GROUP__: + break; + + case HFGxTR_GROUP: + if (is_read) + val = sanitised_sys_reg(vcpu, HFGRTR_EL2); + else + val = sanitised_sys_reg(vcpu, HFGWTR_EL2); + break; + + case HDFGRTR_GROUP: + case HDFGWTR_GROUP: + if (is_read) + val = sanitised_sys_reg(vcpu, HDFGRTR_EL2); + else + val = sanitised_sys_reg(vcpu, HDFGWTR_EL2); + break; + + case HFGITR_GROUP: + val = sanitised_sys_reg(vcpu, HFGITR_EL2); + switch (tc.fgf) { + u64 tmp; + + case __NO_FGF__: + break; + + case HCRX_FGTnXS: + tmp = sanitised_sys_reg(vcpu, HCRX_EL2); + if (tmp & HCRX_EL2_FGTnXS) + tc.fgt = __NO_FGT_GROUP__; + } + break; + + case __NR_FGT_GROUP_IDS__: + /* Something is really wrong, bail out */ + WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); + return false; + } + + if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc)) + goto inject; + + b = compute_trap_behaviour(vcpu, tc); + + if (((b & BEHAVE_FORWARD_READ) && is_read) || + ((b & BEHAVE_FORWARD_WRITE) && !is_read)) + goto inject; + + return false; + +inject: + trace_kvm_forward_sysreg_trap(vcpu, sysreg, is_read); + + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return true; +} + static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) { u64 mode = spsr & PSR_MODE_MASK; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 20280a5233f6..95f6945c4432 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -884,21 +884,6 @@ u32 __attribute_const__ kvm_target_cpu(void) return KVM_ARM_TARGET_GENERIC_V8; } -void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) -{ - u32 target = kvm_target_cpu(); - - memset(init, 0, sizeof(*init)); - - /* - * For now, we don't return any features. - * In future, we might use features to return target - * specific features available for the preferred - * target type. - */ - init->target = (__u32)target; -} - int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 6dcd6604b6bc..617ae6dea5d5 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -222,7 +222,33 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu) if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET) return kvm_handle_ptrauth(vcpu); - kvm_emulate_nested_eret(vcpu); + /* + * If we got here, two possibilities: + * + * - the guest is in EL2, and we need to fully emulate ERET + * + * - the guest is in EL1, and we need to reinject the + * exception into the L1 hypervisor. + * + * If KVM ever traps ERET for its own use, we'll have to + * revisit this. + */ + if (is_hyp_ctxt(vcpu)) + kvm_emulate_nested_eret(vcpu); + else + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + + return 1; +} + +static int handle_svc(struct kvm_vcpu *vcpu) +{ + /* + * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a + * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so + * we should only have to deal with a 64 bit exception. + */ + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); return 1; } @@ -239,6 +265,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_SMC32] = handle_smc, [ESR_ELx_EC_HVC64] = handle_hvc, [ESR_ELx_EC_SMC64] = handle_smc, + [ESR_ELx_EC_SVC64] = handle_svc, [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, [ESR_ELx_EC_SVE] = handle_sve, [ESR_ELx_EC_ERET] = kvm_handle_eret, diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 34f222af6165..9cfe6bd1dbe4 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -70,20 +70,26 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } } -static inline bool __hfgxtr_traps_required(void) -{ - if (cpus_have_final_cap(ARM64_SME)) - return true; - - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - return true; +#define compute_clr_set(vcpu, reg, clr, set) \ + do { \ + u64 hfg; \ + hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ + set |= hfg & __ ## reg ## _MASK; \ + clr |= ~hfg & __ ## reg ## _nMASK; \ + } while(0) - return false; -} -static inline void __activate_traps_hfgxtr(void) +static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) { + struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + u64 r_val, w_val; + + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + return; + + ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2); + ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2); if (cpus_have_final_cap(ARM64_SME)) { tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; @@ -98,26 +104,72 @@ static inline void __activate_traps_hfgxtr(void) if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) w_set |= HFGxTR_EL2_TCR_EL1_MASK; - sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); - sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set); + compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set); + } + + /* The default is not to trap anything but ACCDATA_EL1 */ + r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1; + r_val |= r_set; + r_val &= ~r_clr; + + w_val = __HFGWTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1; + w_val |= w_set; + w_val &= ~w_clr; + + write_sysreg_s(r_val, SYS_HFGRTR_EL2); + write_sysreg_s(w_val, SYS_HFGWTR_EL2); + + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return; + + ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2); + + r_set = r_clr = 0; + compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set); + r_val = __HFGITR_EL2_nMASK; + r_val |= r_set; + r_val &= ~r_clr; + + write_sysreg_s(r_val, SYS_HFGITR_EL2); + + ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2); + ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2); + + r_clr = r_set = w_clr = w_set = 0; + + compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set); + compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set); + + r_val = __HDFGRTR_EL2_nMASK; + r_val |= r_set; + r_val &= ~r_clr; + + w_val = __HDFGWTR_EL2_nMASK; + w_val |= w_set; + w_val &= ~w_clr; + + write_sysreg_s(r_val, SYS_HDFGRTR_EL2); + write_sysreg_s(w_val, SYS_HDFGWTR_EL2); } -static inline void __deactivate_traps_hfgxtr(void) +static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) { - u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - if (cpus_have_final_cap(ARM64_SME)) { - tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + return; - r_set |= tmp; - w_set |= tmp; - } + write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - w_clr |= HFGxTR_EL2_TCR_EL1_MASK; + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return; - sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); - sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); + write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } static inline void __activate_traps_common(struct kvm_vcpu *vcpu) @@ -145,8 +197,21 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - if (__hfgxtr_traps_required()) - __activate_traps_hfgxtr(); + if (cpus_have_final_cap(ARM64_HAS_HCX)) { + u64 hcrx = HCRX_GUEST_FLAGS; + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + u64 clr = 0, set = 0; + + compute_clr_set(vcpu, HCRX_EL2, clr, set); + + hcrx |= set; + hcrx &= ~clr; + } + + write_sysreg_s(hcrx, SYS_HCRX_EL2); + } + + __activate_traps_hfgxtr(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -162,8 +227,10 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); } - if (__hfgxtr_traps_required()) - __deactivate_traps_hfgxtr(); + if (cpus_have_final_cap(ARM64_HAS_HCX)) + write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); + + __deactivate_traps_hfgxtr(vcpu); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) @@ -177,9 +244,6 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu) if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); - - if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); } static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) @@ -194,9 +258,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~HCR_VSE; vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; } - - if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); } static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index d5ec972b5c1e..230e4f2527de 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot, unsigned long *haddr); +int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr); int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr); #endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a169c619db60..857d9bc04fd4 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -135,6 +135,16 @@ static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctx __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); } +static void +handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); + DECLARE_REG(phys_addr_t, start, host_ctxt, 2); + DECLARE_REG(unsigned long, pages, host_ctxt, 3); + + __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages); +} + static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); @@ -327,6 +337,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh), HANDLE_FUNC(__kvm_tlb_flush_vmid), + HANDLE_FUNC(__kvm_tlb_flush_vmid_range), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), HANDLE_FUNC(__vgic_v3_read_vmcr), diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 318298eb3d6b..65a7a186d7b2 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, return err; } +static int __pkvm_alloc_private_va_range(unsigned long start, size_t size) +{ + unsigned long cur; + + hyp_assert_lock_held(&pkvm_pgd_lock); + + if (!start || start < __io_map_base) + return -EINVAL; + + /* The allocated size is always a multiple of PAGE_SIZE */ + cur = start + PAGE_ALIGN(size); + + /* Are we overflowing on the vmemmap ? */ + if (cur > __hyp_vmemmap) + return -ENOMEM; + + __io_map_base = cur; + + return 0; +} + /** * pkvm_alloc_private_va_range - Allocates a private VA range. * @size: The size of the VA range to reserve. @@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, */ int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr) { - unsigned long base, addr; - int ret = 0; + unsigned long addr; + int ret; hyp_spin_lock(&pkvm_pgd_lock); - - /* Align the allocation based on the order of its size */ - addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size)); - - /* The allocated size is always a multiple of PAGE_SIZE */ - base = addr + PAGE_ALIGN(size); - - /* Are we overflowing on the vmemmap ? */ - if (!addr || base > __hyp_vmemmap) - ret = -ENOMEM; - else { - __io_map_base = base; - *haddr = addr; - } - + addr = __io_map_base; + ret = __pkvm_alloc_private_va_range(addr, size); hyp_spin_unlock(&pkvm_pgd_lock); + *haddr = addr; + return ret; } @@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits) return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); } +int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) +{ + unsigned long addr, prev_base; + size_t size; + int ret; + + hyp_spin_lock(&pkvm_pgd_lock); + + prev_base = __io_map_base; + /* + * Efficient stack verification using the PAGE_SHIFT bit implies + * an alignment of our allocation on the order of the size. + */ + size = PAGE_SIZE * 2; + addr = ALIGN(__io_map_base, size); + + ret = __pkvm_alloc_private_va_range(addr, size); + if (!ret) { + /* + * Since the stack grows downwards, map the stack to the page + * at the higher address and leave the lower guard page + * unbacked. + * + * Any valid stack address now has the PAGE_SHIFT bit as 1 + * and addresses corresponding to the guard page have the + * PAGE_SHIFT bit as 0 - this is used for overflow detection. + */ + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE, + PAGE_SIZE, phys, PAGE_HYP); + if (ret) + __io_map_base = prev_base; + } + hyp_spin_unlock(&pkvm_pgd_lock); + + *haddr = addr + size; + + return ret; +} + static void *admit_host_page(void *arg) { struct kvm_hyp_memcache *host_mc = arg; diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index bb98630dfeaf..0d5e0a89ddce 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -113,7 +113,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, for (i = 0; i < hyp_nr_cpus; i++) { struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i); - unsigned long hyp_addr; start = (void *)kern_hyp_va(per_cpu_base[i]); end = start + PAGE_ALIGN(hyp_percpu_size); @@ -121,33 +120,9 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, if (ret) return ret; - /* - * Allocate a contiguous HYP private VA range for the stack - * and guard page. The allocation is also aligned based on - * the order of its size. - */ - ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); + ret = pkvm_create_stack(params->stack_pa, ¶ms->stack_hyp_va); if (ret) return ret; - - /* - * Since the stack grows downwards, map the stack to the page - * at the higher address and leave the lower guard page - * unbacked. - * - * Any valid stack address now has the PAGE_SHIFT bit as 1 - * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. - */ - hyp_spin_lock(&pkvm_pgd_lock); - ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE, - PAGE_SIZE, params->stack_pa, PAGE_HYP); - hyp_spin_unlock(&pkvm_pgd_lock); - if (ret) - return ret; - - /* Update stack_hyp_va to end of the stack's private VA range */ - params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } /* diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index e89a23153e85..c353a06ee7e6 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -236,7 +236,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) * KVM_ARM_VCPU_INIT, however, this is likely not possible for * protected VMs. */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; } diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index b9991bbd8e3f..1b265713d6be 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -182,6 +182,36 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t start, unsigned long pages) +{ + struct tlb_inv_context cxt; + unsigned long stride; + + /* + * Since the range of addresses may not be mapped at + * the same level, assume the worst case as PAGE_SIZE + */ + stride = PAGE_SIZE; + start = round_down(start, stride); + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt, false); + + __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); + + dsb(ish); + __tlbi(vmalle1is); + dsb(ish); + isb(); + + /* See the comment in __kvm_tlb_flush_vmid_ipa() */ + if (icache_is_vpipt()) + icache_inval_all_pou(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f7a93ef29250..f155b8c9e98c 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -670,6 +670,26 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt) return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); } +void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t addr, size_t size) +{ + unsigned long pages, inval_pages; + + if (!system_supports_tlb_range()) { + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + return; + } + + pages = size >> PAGE_SHIFT; + while (pages > 0) { + inval_pages = min(pages, MAX_TLBI_RANGE_PAGES); + kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages); + + addr += inval_pages << PAGE_SHIFT; + pages -= inval_pages; + } +} + #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, @@ -786,7 +806,8 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, * evicted pte value (if any). */ if (kvm_pte_table(ctx->old, ctx->level)) - kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + kvm_tlb_flush_vmid_range(mmu, ctx->addr, + kvm_granule_size(ctx->level)); else if (kvm_pte_valid(ctx->old)) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); @@ -810,16 +831,36 @@ static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t n smp_store_release(ctx->ptep, new); } -static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, - struct kvm_pgtable_mm_ops *mm_ops) +static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) +{ + /* + * If FEAT_TLBIRANGE is implemented, defer the individual + * TLB invalidations until the entire walk is finished, and + * then use the range-based TLBI instructions to do the + * invalidations. Condition deferred TLB invalidation on the + * system supporting FWB as the optimization is entirely + * pointless when the unmap walker needs to perform CMOs. + */ + return system_supports_tlb_range() && stage2_has_fwb(pgt); +} + +static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, + struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops) { + struct kvm_pgtable *pgt = ctx->arg; + /* - * Clear the existing PTE, and perform break-before-make with - * TLB maintenance if it was valid. + * Clear the existing PTE, and perform break-before-make if it was + * valid. Depending on the system support, defer the TLB maintenance + * for the same until the entire unmap walk is completed. */ if (kvm_pte_valid(ctx->old)) { kvm_clear_pte(ctx->ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); + + if (!stage2_unmap_defer_tlb_flush(pgt)) + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, + ctx->addr, ctx->level); } mm_ops->put_page(ctx->ptep); @@ -1077,7 +1118,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, * block entry and rely on the remaining portions being faulted * back lazily. */ - stage2_put_pte(ctx, mmu, mm_ops); + stage2_unmap_put_pte(ctx, mmu, mm_ops); if (need_flush && mm_ops->dcache_clean_inval_poc) mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), @@ -1091,13 +1132,19 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { + int ret; struct kvm_pgtable_walker walker = { .cb = stage2_unmap_walker, .arg = pgt, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; - return kvm_pgtable_walk(pgt, addr, size, &walker); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + if (stage2_unmap_defer_tlb_flush(pgt)) + /* Perform the deferred TLB invalidations */ + kvm_tlb_flush_vmid_range(pgt->mmu, addr, size); + + return ret; } struct stage2_attr_data { diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index e69da550cdc5..46bd43f61d76 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -143,6 +143,34 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t start, unsigned long pages) +{ + struct tlb_inv_context cxt; + unsigned long stride; + + /* + * Since the range of addresses may not be mapped at + * the same level, assume the worst case as PAGE_SIZE + */ + stride = PAGE_SIZE; + start = round_down(start, stride); + + dsb(ishst); + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt); + + __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); + + dsb(ish); + __tlbi(vmalle1is); + dsb(ish); + isb(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index d3b4feed460c..587a104f66c3 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -161,15 +161,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot) } /** - * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 + * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8 * @kvm: pointer to kvm structure. * * Interface to HYP function to flush all VM TLB entries */ -void kvm_flush_remote_tlbs(struct kvm *kvm) +int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { - ++kvm->stat.generic.remote_tlb_flush_requests; kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); + return 0; +} + +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, + gfn_t gfn, u64 nr_pages) +{ + kvm_tlb_flush_vmid_range(&kvm->arch.mmu, + gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); + return 0; } static bool kvm_is_device_pfn(unsigned long pfn) @@ -592,6 +600,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) return 0; } +static int __hyp_alloc_private_va_range(unsigned long base) +{ + lockdep_assert_held(&kvm_hyp_pgd_mutex); + + if (!PAGE_ALIGNED(base)) + return -EINVAL; + + /* + * Verify that BIT(VA_BITS - 1) hasn't been flipped by + * allocating the new area, as it would indicate we've + * overflowed the idmap/IO address range. + */ + if ((base ^ io_map_base) & BIT(VA_BITS - 1)) + return -ENOMEM; + + io_map_base = base; + + return 0; +} /** * hyp_alloc_private_va_range - Allocates a private VA range. @@ -612,26 +639,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr) /* * This assumes that we have enough space below the idmap - * page to allocate our VAs. If not, the check below will - * kick. A potential alternative would be to detect that - * overflow and switch to an allocation above the idmap. + * page to allocate our VAs. If not, the check in + * __hyp_alloc_private_va_range() will kick. A potential + * alternative would be to detect that overflow and switch + * to an allocation above the idmap. * * The allocated size is always a multiple of PAGE_SIZE. */ - base = io_map_base - PAGE_ALIGN(size); - - /* Align the allocation based on the order of its size */ - base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size)); - - /* - * Verify that BIT(VA_BITS - 1) hasn't been flipped by - * allocating the new area, as it would indicate we've - * overflowed the idmap/IO address range. - */ - if ((base ^ io_map_base) & BIT(VA_BITS - 1)) - ret = -ENOMEM; - else - *haddr = io_map_base = base; + size = PAGE_ALIGN(size); + base = io_map_base - size; + ret = __hyp_alloc_private_va_range(base); mutex_unlock(&kvm_hyp_pgd_mutex); @@ -668,6 +685,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, return ret; } +int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) +{ + unsigned long base; + size_t size; + int ret; + + mutex_lock(&kvm_hyp_pgd_mutex); + /* + * Efficient stack verification using the PAGE_SHIFT bit implies + * an alignment of our allocation on the order of the size. + */ + size = PAGE_SIZE * 2; + base = ALIGN_DOWN(io_map_base - size, size); + + ret = __hyp_alloc_private_va_range(base); + + mutex_unlock(&kvm_hyp_pgd_mutex); + + if (ret) { + kvm_err("Cannot allocate hyp stack guard page\n"); + return ret; + } + + /* + * Since the stack grows downwards, map the stack to the page + * at the higher address and leave the lower guard page + * unbacked. + * + * Any valid stack address now has the PAGE_SHIFT bit as 1 + * and addresses corresponding to the guard page have the + * PAGE_SHIFT bit as 0 - this is used for overflow detection. + */ + ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr, + PAGE_HYP); + if (ret) + kvm_err("Cannot map hyp stack\n"); + + *haddr = base + size; + + return ret; +} + /** * create_hyp_io_mappings - Map IO into both kernel and HYP * @phys_addr: The physical start address which gets mapped @@ -1075,7 +1134,7 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) write_lock(&kvm->mmu_lock); stage2_wp_range(&kvm->arch.mmu, start, end); write_unlock(&kvm->mmu_lock); - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_memslot(kvm, memslot); } /** @@ -1541,7 +1600,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, out_unlock: read_unlock(&kvm->mmu_lock); - kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; } @@ -1721,7 +1779,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - kvm_pfn_t pfn = pte_pfn(range->pte); + kvm_pfn_t pfn = pte_pfn(range->arg.pte); if (!kvm->arch.mmu.pgt) return false; diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 315354d27978..042695a210ce 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -71,8 +71,9 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR0_EL1: - /* Hide ECV, FGT, ExS, Secure Memory */ - val &= ~(GENMASK_ULL(63, 43) | + /* Hide ECV, ExS, Secure Memory */ + val &= ~(NV_FTR(MMFR0, ECV) | + NV_FTR(MMFR0, EXS) | NV_FTR(MMFR0, TGRAN4_2) | NV_FTR(MMFR0, TGRAN16_2) | NV_FTR(MMFR0, TGRAN64_2) | @@ -116,7 +117,8 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR1_EL1: - val &= (NV_FTR(MMFR1, PAN) | + val &= (NV_FTR(MMFR1, HCX) | + NV_FTR(MMFR1, PAN) | NV_FTR(MMFR1, LO) | NV_FTR(MMFR1, HPDS) | NV_FTR(MMFR1, VH) | @@ -124,8 +126,7 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR2_EL1: - val &= ~(NV_FTR(MMFR2, EVT) | - NV_FTR(MMFR2, BBM) | + val &= ~(NV_FTR(MMFR2, BBM) | NV_FTR(MMFR2, TTL) | GENMASK_ULL(47, 44) | NV_FTR(MMFR2, ST) | diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 560650972478..6b066e04dc5d 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -14,6 +14,7 @@ #include <asm/kvm_emulate.h> #include <kvm/arm_pmu.h> #include <kvm/arm_vgic.h> +#include <asm/arm_pmuv3.h> #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) @@ -35,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx) return &vcpu->arch.pmu.pmc[cnt_idx]; } -static u32 kvm_pmu_event_mask(struct kvm *kvm) +static u32 __kvm_pmu_event_mask(unsigned int pmuver) { - unsigned int pmuver; - - pmuver = kvm->arch.arm_pmu->pmuver; - switch (pmuver) { case ID_AA64DFR0_EL1_PMUVer_IMP: return GENMASK(9, 0); @@ -55,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) } } +static u32 kvm_pmu_event_mask(struct kvm *kvm) +{ + u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1); + u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0); + + return __kvm_pmu_event_mask(pmuver); +} + /** * kvm_pmc_is_64bit - determine if counter is 64bit * @pmc: counter context @@ -672,8 +677,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || - pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) + /* + * Check the sanitised PMU version for the system, as KVM does not + * support implementations where PMUv3 exists on a subset of CPUs. + */ + if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit())) return; mutex_lock(&arm_pmus_lock); @@ -750,11 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) } else { val = read_sysreg(pmceid1_el0); /* - * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled + * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) - val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); + val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) | + BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) | + BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32)); base = 32; } @@ -950,11 +959,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return 0; } case KVM_ARM_VCPU_PMU_V3_FILTER: { + u8 pmuver = kvm_arm_pmu_get_pmuver_limit(); struct kvm_pmu_event_filter __user *uaddr; struct kvm_pmu_event_filter filter; int nr_events; - nr_events = kvm_pmu_event_mask(kvm) + 1; + /* + * Allow userspace to specify an event filter for the entire + * event range supported by PMUVer of the hardware, rather + * than the guest's PMUVer for KVM backward compatibility. + */ + nr_events = __kvm_pmu_event_mask(pmuver) + 1; uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 121f1a14c829..0eea225fd09a 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val) ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; return true; } + +/* + * If we interrupted the guest to update the host PMU context, make + * sure we re-apply the guest EL0 state. + */ +void kvm_vcpu_pmu_resync_el0(void) +{ + struct kvm_vcpu *vcpu; + + if (!has_vhe() || !in_interrupt()) + return; + + vcpu = kvm_get_running_vcpu(); + if (!vcpu) + return; + + kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bc8556b6f459..7a65a35ee4ac 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -248,21 +248,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } } - switch (vcpu->arch.target) { - default: - if (vcpu_el1_is_32bit(vcpu)) { - pstate = VCPU_RESET_PSTATE_SVC; - } else if (vcpu_has_nv(vcpu)) { - pstate = VCPU_RESET_PSTATE_EL2; - } else { - pstate = VCPU_RESET_PSTATE_EL1; - } - - if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { - ret = -EINVAL; - goto out; - } - break; + if (vcpu_el1_is_32bit(vcpu)) + pstate = VCPU_RESET_PSTATE_SVC; + else if (vcpu_has_nv(vcpu)) + pstate = VCPU_RESET_PSTATE_EL2; + else + pstate = VCPU_RESET_PSTATE_EL1; + + if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { + ret = -EINVAL; + goto out; } /* Reset core registers */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2ca2973abe66..e92ec810d449 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2151,6 +2151,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, + { SYS_DESC(SYS_ACCDATA_EL1), undef_access }, + { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, @@ -2365,8 +2367,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(MDCR_EL2, access_rw, reset_val, 0), EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), EL2_REG(HSTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGITR_EL2, access_rw, reset_val, 0), EL2_REG(HACR_EL2, access_rw, reset_val, 0), + EL2_REG(HCRX_EL2, access_rw, reset_val, 0), + EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), EL2_REG(TTBR1_EL2, access_rw, reset_val, 0), EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1), @@ -2374,6 +2381,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(VTCR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, + EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0), + EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0), EL2_REG(SPSR_EL2, access_rw, reset_val, 0), EL2_REG(ELR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_SP_EL1), access_sp_el1}, @@ -3170,6 +3179,9 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) trace_kvm_handle_sys_reg(esr); + if (__check_nv_sr_forward(vcpu)) + return 1; + params = esr_sys64_to_params(esr); params.regval = vcpu_get_reg(vcpu, Rt); @@ -3587,5 +3599,8 @@ int __init kvm_sys_reg_table_init(void) if (!first_idreg) return -EINVAL; + if (kvm_get_mode() == KVM_MODE_NV) + return populate_nv_trap_config(); + return 0; } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 6ce5c025218d..8ad53104934d 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -364,6 +364,32 @@ TRACE_EVENT(kvm_inject_nested_exception, __entry->hcr_el2) ); +TRACE_EVENT(kvm_forward_sysreg_trap, + TP_PROTO(struct kvm_vcpu *vcpu, u32 sysreg, bool is_read), + TP_ARGS(vcpu, sysreg, is_read), + + TP_STRUCT__entry( + __field(u64, pc) + __field(u32, sysreg) + __field(bool, is_read) + ), + + TP_fast_assign( + __entry->pc = *vcpu_pc(vcpu); + __entry->sysreg = sysreg; + __entry->is_read = is_read; + ), + + TP_printk("%llx %c (%d,%d,%d,%d,%d)", + __entry->pc, + __entry->is_read ? 'R' : 'W', + sys_reg_Op0(__entry->sysreg), + sys_reg_Op1(__entry->sysreg), + sys_reg_CRn(__entry->sysreg), + sys_reg_CRm(__entry->sysreg), + sys_reg_Op2(__entry->sysreg)) +); + #endif /* _TRACE_ARM_ARM64_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index f9923beedd27..0ab09b0d4440 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); -void vgic_v2_set_npie(struct kvm_vcpu *vcpu); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); @@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); -void vgic_v3_set_npie(struct kvm_vcpu *vcpu); void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_enable(struct kvm_vcpu *vcpu); |