diff options
author | Yong-Xuan Wang <yongxuan.wang@sifive.com> | 2024-04-17 15:45:25 +0800 |
---|---|---|
committer | Anup Patel <anup@brainfault.org> | 2024-04-22 10:37:11 +0530 |
commit | 2121cadec45aaf61fa45b3aa3d99723ed4e6683a (patch) | |
tree | 4e2988e18b8011af58bca07e987a3c48d73bea6b /arch/riscv/include/asm | |
parent | f1c48c1ec73538a8e49695445a0fbc52156aac42 (diff) |
RISCV: KVM: Introduce mp_state_lock to avoid lock inversion
Documentation/virt/kvm/locking.rst advises that kvm->lock should be
acquired outside vcpu->mutex and kvm->srcu. However, when KVM/RISC-V
handling SBI_EXT_HSM_HART_START, the lock ordering is vcpu->mutex,
kvm->srcu then kvm->lock.
Although the lockdep checking no longer complains about this after commit
f0f44752f5f6 ("rcu: Annotate SRCU's update-side lockdep dependencies"),
it's necessary to replace kvm->lock with a new dedicated lock to ensure
only one hart can execute the SBI_EXT_HSM_HART_START call for the target
hart simultaneously.
Additionally, this patch also rename "power_off" to "mp_state" with two
possible values. The vcpu->mp_state_lock also protects the access of
vcpu->mp_state.
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20240417074528.16506-2-yongxuan.wang@sifive.com
Signed-off-by: Anup Patel <anup@brainfault.org>
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index da4ab7e175ff..48691f55d1a5 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -264,8 +264,9 @@ struct kvm_vcpu_arch { /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_memory_cache mmu_page_cache; - /* VCPU power-off state */ - bool power_off; + /* VCPU power state */ + struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; /* Don't run the VCPU (blocked) */ bool pause; @@ -386,8 +387,11 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); +void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); |