diff options
-rw-r--r-- | arch/x86/kvm/svm/avic.c | 65 |
1 files changed, 37 insertions, 28 deletions
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 3a59c5e77a4f..1a9862c7106e 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -829,7 +829,7 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu) WARN_ON_ONCE(amd_iommu_update_ga(cpu, irqfd->irq_bypass_data)); } -void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); int h_physical_id = kvm_cpu_get_apicid(cpu); @@ -846,16 +846,6 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) return; /* - * No need to update anything if the vCPU is blocking, i.e. if the vCPU - * is being scheduled in after being preempted. The CPU entries in the - * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'. - * If the vCPU was migrated, its new CPU value will be stuffed when the - * vCPU unblocks. - */ - if (kvm_vcpu_is_blocking(vcpu)) - return; - - /* * Grab the per-vCPU interrupt remapping lock even if the VM doesn't * _currently_ have assigned devices, as that can change. Holding * ir_list_lock ensures that either svm_ir_list_add() will consume @@ -889,12 +879,27 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) spin_unlock_irqrestore(&svm->ir_list_lock, flags); } -void avic_vcpu_put(struct kvm_vcpu *vcpu) +void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + /* + * No need to update anything if the vCPU is blocking, i.e. if the vCPU + * is being scheduled in after being preempted. The CPU entries in the + * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'. + * If the vCPU was migrated, its new CPU value will be stuffed when the + * vCPU unblocks. + */ + if (kvm_vcpu_is_blocking(vcpu)) + return; + + __avic_vcpu_load(vcpu, cpu); +} + +static void __avic_vcpu_put(struct kvm_vcpu *vcpu) { struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); struct vcpu_svm *svm = to_svm(vcpu); unsigned long flags; - u64 entry; + u64 entry = svm->avic_physical_id_entry; lockdep_assert_preemption_disabled(); @@ -902,19 +907,6 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) return; /* - * Note, reading the Physical ID entry outside of ir_list_lock is safe - * as only the pCPU that has loaded (or is loading) the vCPU is allowed - * to modify the entry, and preemption is disabled. I.e. the vCPU - * can't be scheduled out and thus avic_vcpu_{put,load}() can't run - * recursively. - */ - entry = svm->avic_physical_id_entry; - - /* Nothing to do if IsRunning == '0' due to vCPU blocking. */ - if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) - return; - - /* * Take and hold the per-vCPU interrupt remapping lock while updating * the Physical ID entry even though the lock doesn't protect against * multiple writers (see above). Holding ir_list_lock ensures that @@ -933,7 +925,24 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); spin_unlock_irqrestore(&svm->ir_list_lock, flags); +} + +void avic_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* + * Note, reading the Physical ID entry outside of ir_list_lock is safe + * as only the pCPU that has loaded (or is loading) the vCPU is allowed + * to modify the entry, and preemption is disabled. I.e. the vCPU + * can't be scheduled out and thus avic_vcpu_{put,load}() can't run + * recursively. + */ + u64 entry = to_svm(vcpu)->avic_physical_id_entry; + + /* Nothing to do if IsRunning == '0' due to vCPU blocking. */ + if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) + return; + __avic_vcpu_put(vcpu); } void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) @@ -974,9 +983,9 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) avic_refresh_virtual_apic_mode(vcpu); if (activated) - avic_vcpu_load(vcpu, vcpu->cpu); + __avic_vcpu_load(vcpu, vcpu->cpu); else - avic_vcpu_put(vcpu); + __avic_vcpu_put(vcpu); /* * Here, we go through the per-vcpu ir_list to update all existing |