diff options
| author | Marc Zyngier <maz@kernel.org> | 2025-11-20 17:25:10 +0000 |
|---|---|---|
| committer | Oliver Upton <oupton@kernel.org> | 2025-11-24 14:29:13 -0800 |
| commit | cf72ee63711916ad808f82eb054dd9d69727a5bf (patch) | |
| tree | 8b1b561c4cd03794e4d76b39168bedf31a66919c | |
| parent | dd598fc1139f7181118719574a4e270e51e0a0eb (diff) | |
KVM: arm64: Eagerly save VMCR on exit
We currently save/restore the VMCR register in a pretty lazy way
(on load/put, consistently with what we do with the APRs).
However, we are going to need the group-enable bits that are backed
by VMCR on each entry (so that we can avoid injecting interrupts for
disabled groups).
Move the synchronisation from put to sync, which results in some minor
churn in the nVHE hypercalls to simplify things.
Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-21-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
| -rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 2 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/arm.c | 3 | ||||
| -rw-r--r-- | arch/arm64/kvm/hyp/nvhe/hyp-main.c | 7 | ||||
| -rw-r--r-- | arch/arm64/kvm/hyp/vgic-v3-sr.c | 15 | ||||
| -rw-r--r-- | arch/arm64/kvm/vgic/vgic-v2.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/vgic/vgic-v3-nested.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/vgic/vgic-v3.c | 2 |
8 files changed, 13 insertions, 22 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 9da54d4ee49e..f8adbd535b4a 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -79,7 +79,7 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, - __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs, + __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm, __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm, diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index e6be1f5d0967..dbf16a9f6772 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -82,7 +82,7 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if); -void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if); +void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 870953b4a8a7..733195ef183e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -659,8 +659,7 @@ nommu: void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { if (is_protected_kvm_enabled()) { - kvm_call_hyp(__vgic_v3_save_vmcr_aprs, - &vcpu->arch.vgic_cpu.vgic_v3); + kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3); kvm_call_hyp_nvhe(__pkvm_vcpu_put); } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 29430c031095..a7c689152f68 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -157,6 +157,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; + host_cpu_if->vgic_vmcr = hyp_cpu_if->vgic_vmcr; for (i = 0; i < hyp_cpu_if->used_lrs; ++i) host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; } @@ -464,11 +465,11 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) __vgic_v3_init_lrs(); } -static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt) +static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); - __vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if)); + __vgic_v3_save_aprs(kern_hyp_va(cpu_if)); } static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt) @@ -616,7 +617,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_tlb_flush_vmid_range), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), - HANDLE_FUNC(__vgic_v3_save_vmcr_aprs), + HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs), HANDLE_FUNC(__pkvm_reserve_vm), HANDLE_FUNC(__pkvm_unreserve_vm), diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 2509b52bbd62..cafbb41b4c33 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -235,6 +235,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if) } } + cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); + if (cpu_if->vgic_hcr & ICH_HCR_EL2_LRENPIE) { u64 val = read_gicreg(ICH_HCR_EL2); cpu_if->vgic_hcr &= ~ICH_HCR_EL2_EOIcount; @@ -332,10 +334,6 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if) { u64 val; - if (!cpu_if->vgic_sre) { - cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); - } - /* Only restore SRE if the host implements the GICv2 interface */ if (static_branch_unlikely(&vgic_v3_has_v2_compat)) { val = read_gicreg(ICC_SRE_EL2); @@ -357,7 +355,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if) write_gicreg(0, ICH_HCR_EL2); } -static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) +void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) { u64 val; u32 nr_pre_bits; @@ -518,13 +516,6 @@ static void __vgic_v3_write_vmcr(u32 vmcr) write_gicreg(vmcr, ICH_VMCR_EL2); } -void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if) -{ - __vgic_v3_save_aprs(cpu_if); - if (cpu_if->vgic_sre) - cpu_if->vgic_vmcr = __vgic_v3_read_vmcr(); -} - void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if) { __vgic_v3_compat_mode_enable(); diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 5a2165a8d22c..07e93acafd04 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -451,6 +451,7 @@ void vgic_v2_save_state(struct kvm_vcpu *vcpu) if (!base) return; + cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); if (used_lrs) save_lrs(vcpu, base); @@ -495,6 +496,5 @@ void vgic_v2_put(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); } diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c index 1fc9e0780abe..1531e4907c65 100644 --- a/arch/arm64/kvm/vgic/vgic-v3-nested.c +++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c @@ -340,7 +340,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu) u64 val; int i; - __vgic_v3_save_vmcr_aprs(s_cpu_if); + __vgic_v3_save_aprs(s_cpu_if); __vgic_v3_deactivate_traps(s_cpu_if); __vgic_v3_save_state(s_cpu_if); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index bcce7f35a6d6..5b276e303aab 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -815,7 +815,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) } if (likely(!is_protected_kvm_enabled())) - kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if); + kvm_call_hyp(__vgic_v3_save_aprs, cpu_if); WARN_ON(vgic_v4_put(vcpu)); if (has_vhe()) |
