From d0823cb346bc6f685f230bdbec51910a329e3fe3 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 3 Aug 2018 21:57:04 +0800 Subject: KVM: arm/arm64: vgic: Do not use spin_lock_irqsave/restore with irq disabled kvm_vgic_sync_hwstate is only called with IRQ being disabled. There is thus no need to call spin_lock_irqsave/restore in vgic_fold_lr_state and vgic_prune_ap_list. This patch replace them with the non irq-safe version. Signed-off-by: Jia He Acked-by: Christoffer Dall [maz: commit message tidy-up] Signed-off-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'virt/kvm/arm/vgic/vgic.c') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index c22cea678a66..7cfdfbc910e0 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -593,10 +593,11 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq, *tmp; - unsigned long flags; + + DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); retry: - spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); + spin_lock(&vgic_cpu->ap_list_lock); list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; @@ -637,7 +638,7 @@ retry: /* This interrupt looks like it has to be migrated. */ spin_unlock(&irq->irq_lock); - spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); + spin_unlock(&vgic_cpu->ap_list_lock); /* * Ensure locking order by always locking the smallest @@ -651,7 +652,7 @@ retry: vcpuB = vcpu; } - spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); + spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, SINGLE_DEPTH_NESTING); spin_lock(&irq->irq_lock); @@ -676,7 +677,7 @@ retry: spin_unlock(&irq->irq_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); - spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); + spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); if (target_vcpu_needs_kick) { kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); @@ -686,7 +687,7 @@ retry: goto retry; } - spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); + spin_unlock(&vgic_cpu->ap_list_lock); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) -- cgit