summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2024-02-14 13:18:20 +0000
committerOliver Upton <oliver.upton@linux.dev>2024-02-19 17:13:01 +0000
commitc5bac1ef7df6bcce4feaa5a609119cab5d5e4730 (patch)
tree45d2beb431d589ed7a73cc88d7d6d33037bea523 /arch/arm64/kvm
parentf5a5a406b4b8bb6c1fc7a1e92a872bd86061a53f (diff)
KVM: arm64: Move existing feature disabling over to FGU infrastructure
We already trap a bunch of existing features for the purpose of disabling them (MAIR2, POR, ACCDATA, SME...). Let's move them over to our brand new FGU infrastructure. Reviewed-by: Joey Gouly <joey.gouly@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20240214131827.2856277-20-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/arm.c6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h17
-rw-r--r--arch/arm64/kvm/sys_regs.c23
3 files changed, 32 insertions, 14 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c063e84fc72c..9f806c9b7d5d 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -675,6 +675,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
}
+ /*
+ * This needs to happen after NV has imposed its own restrictions on
+ * the feature set
+ */
+ kvm_init_sysreg(vcpu);
+
ret = kvm_timer_enable(vcpu);
if (ret)
return ret;
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a09149fd91ed..245f9c1ca666 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -157,7 +157,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+ u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0;
u64 r_val, w_val;
CHECK_FGT_MASKS(HFGRTR_EL2);
@@ -174,13 +174,6 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
-
- r_clr |= tmp;
- w_clr |= tmp;
- }
-
/*
* Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
*/
@@ -195,15 +188,11 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
compute_undef_clr_set(vcpu, kvm, HFGRTR_EL2, r_clr, r_set);
compute_undef_clr_set(vcpu, kvm, HFGWTR_EL2, w_clr, w_set);
- /* The default to trap everything not handled or supported in KVM. */
- tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
-
- r_val = __HFGRTR_EL2_nMASK & ~tmp;
+ r_val = __HFGRTR_EL2_nMASK;
r_val |= r_set;
r_val &= ~r_clr;
- w_val = __HFGWTR_EL2_nMASK & ~tmp;
+ w_val = __HFGWTR_EL2_nMASK;
w_val |= w_set;
w_val &= ~w_clr;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 938df5dc3684..39e7c7f74717 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -3942,6 +3942,29 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
return 0;
}
+void kvm_init_sysreg(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
+ goto out;
+
+ kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
+ HFGxTR_EL2_nMAIR2_EL1 |
+ HFGxTR_EL2_nS2POR_EL1 |
+ HFGxTR_EL2_nPOR_EL1 |
+ HFGxTR_EL2_nPOR_EL0 |
+ HFGxTR_EL2_nACCDATA_EL1 |
+ HFGxTR_EL2_nSMPRI_EL1_MASK |
+ HFGxTR_EL2_nTPIDR2_EL0_MASK);
+
+ set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
+out:
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
int __init kvm_sys_reg_table_init(void)
{
struct sys_reg_params params;