summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/vgic/vgic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/vgic/vgic.c')
-rw-r--r--arch/arm64/kvm/vgic/vgic.c543
1 files changed, 352 insertions, 191 deletions
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index d97e6080b421..430aa98888fd 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -24,16 +24,23 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
* kvm->lock (mutex)
- * its->cmd_lock (mutex)
- * its->its_lock (mutex)
- * vgic_cpu->ap_list_lock must be taken with IRQs disabled
- * kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vcpu->mutex (mutex)
+ * kvm->arch.config_lock (mutex)
+ * its->cmd_lock (mutex)
+ * its->its_lock (mutex)
+ * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
+ * vgic_cpu->ap_list_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
* than it.
*
+ * The config_lock has additional ordering requirements:
+ * kvm->slots_lock
+ * kvm->srcu
+ * kvm->arch.config_lock
+ *
* If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
* If you are already holding a lock and need to take a higher one, you
@@ -52,32 +59,22 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
*/
/*
- * Iterate over the VM's list of mapped LPIs to find the one with a
- * matching interrupt ID and return a reference to the IRQ structure.
+ * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
+ * structure. The caller is expected to call vgic_put_irq() later once it's
+ * finished with the IRQ.
*/
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
- unsigned long flags;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ rcu_read_lock();
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
- if (irq->intid != intid)
- continue;
+ irq = xa_load(&dist->lpi_xa, intid);
+ if (!vgic_try_get_irq_ref(irq))
+ irq = NULL;
- /*
- * This increases the refcount, the caller is expected to
- * call vgic_put_irq() later once it's finished with the IRQ.
- */
- vgic_get_irq_kref(irq);
- goto out_unlock;
- }
- irq = NULL;
-
-out_unlock:
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ rcu_read_unlock();
return irq;
}
@@ -87,17 +84,11 @@ out_unlock:
* struct vgic_irq. It also increases the refcount, so any caller is expected
* to call vgic_put_irq() once it's finished with this IRQ.
*/
-struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
- u32 intid)
+struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid)
{
- /* SGIs and PPIs */
- if (intid <= VGIC_MAX_PRIVATE) {
- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
- return &vcpu->arch.vgic_cpu.private_irqs[intid];
- }
-
/* SPIs */
- if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
+ if (intid >= VGIC_NR_PRIVATE_IRQS &&
+ intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
}
@@ -109,29 +100,42 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
return NULL;
}
-/*
- * We can't do anything in here, because we lack the kvm pointer to
- * lock and remove the item from the lpi_list. So we keep this function
- * empty and use the return value of kref_put() to trigger the freeing.
- */
-static void vgic_irq_release(struct kref *ref)
+struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
{
+ if (WARN_ON(!vcpu))
+ return NULL;
+
+ /* SGIs and PPIs */
+ if (intid < VGIC_NR_PRIVATE_IRQS) {
+ intid = array_index_nospec(intid, VGIC_NR_PRIVATE_IRQS);
+ return &vcpu->arch.vgic_cpu.private_irqs[intid];
+ }
+
+ return vgic_get_irq(vcpu->kvm, intid);
}
-/*
- * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
- */
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
+static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
+ lockdep_assert_held(&dist->lpi_xa.xa_lock);
+ __xa_erase(&dist->lpi_xa, irq->intid);
+ kfree_rcu(irq, rcu);
+}
- if (!kref_put(&irq->refcount, vgic_irq_release))
- return;
+static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+{
+ if (irq->intid < VGIC_MIN_LPI)
+ return false;
- list_del(&irq->lpi_list);
- dist->lpi_list_count--;
+ return refcount_dec_and_test(&irq->refcount);
+}
- kfree(irq);
+static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
+{
+ if (!__vgic_put_irq(kvm, irq))
+ return false;
+
+ irq->pending_release = true;
+ return true;
}
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
@@ -139,18 +143,44 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
- if (irq->intid < VGIC_MIN_LPI)
+ /*
+ * Normally the lock is only taken when the refcount drops to 0.
+ * Acquire/release it early on lockdep kernels to make locking issues
+ * in rare release paths a bit more obvious.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
+ guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
+ }
+
+ if (!__vgic_put_irq(kvm, irq))
return;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- __vgic_put_lpi_locked(kvm, irq);
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ xa_lock_irqsave(&dist->lpi_xa, flags);
+ vgic_release_lpi_locked(dist, irq);
+ xa_unlock_irqrestore(&dist->lpi_xa, flags);
+}
+
+static void vgic_release_deleted_lpis(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ unsigned long flags, intid;
+ struct vgic_irq *irq;
+
+ xa_lock_irqsave(&dist->lpi_xa, flags);
+
+ xa_for_each(&dist->lpi_xa, intid, irq) {
+ if (irq->pending_release)
+ vgic_release_lpi_locked(dist, irq);
+ }
+
+ xa_unlock_irqrestore(&dist->lpi_xa, flags);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
+ bool deleted = false;
unsigned long flags;
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
@@ -161,11 +191,14 @@ void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
list_del(&irq->ap_list);
irq->vcpu = NULL;
raw_spin_unlock(&irq->irq_lock);
- vgic_put_irq(vcpu->kvm, irq);
+ deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
}
}
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+
+ if (deleted)
+ vgic_release_deleted_lpis(vcpu->kvm);
}
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
@@ -201,7 +234,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
}
/**
- * kvm_vgic_target_oracle - compute the target vcpu for an irq
+ * vgic_target_oracle - compute the target vcpu for an irq
*
* @irq: The irq to route. Must be already locked.
*
@@ -211,7 +244,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*
* Requires the IRQ lock to be held.
*/
-static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
+struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
lockdep_assert_held(&irq->irq_lock);
@@ -239,17 +272,20 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
return NULL;
}
+struct vgic_sort_info {
+ struct kvm_vcpu *vcpu;
+ struct vgic_vmcr vmcr;
+};
+
/*
* The order of items in the ap_lists defines how we'll pack things in LRs as
* well, the first items in the list being the first things populated in the
* LRs.
*
- * A hard rule is that active interrupts can never be pushed out of the LRs
- * (and therefore take priority) since we cannot reliably trap on deactivation
- * of IRQs and therefore they have to be present in the LRs.
- *
+ * Pending, non-active interrupts must be placed at the head of the list.
* Otherwise things should be sorted by the priority field and the GIC
* hardware support will take care of preemption of priority groups etc.
+ * Interrupts that are not deliverable should be at the end of the list.
*
* Return negative if "a" sorts before "b", 0 to preserve order, and positive
* to sort "b" before "a".
@@ -259,6 +295,8 @@ static int vgic_irq_cmp(void *priv, const struct list_head *a,
{
struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
+ struct vgic_sort_info *info = priv;
+ struct kvm_vcpu *vcpu = info->vcpu;
bool penda, pendb;
int ret;
@@ -272,21 +310,32 @@ static int vgic_irq_cmp(void *priv, const struct list_head *a,
raw_spin_lock(&irqa->irq_lock);
raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
- if (irqa->active || irqb->active) {
- ret = (int)irqb->active - (int)irqa->active;
+ /* Undeliverable interrupts should be last */
+ ret = (int)(vgic_target_oracle(irqb) == vcpu) - (int)(vgic_target_oracle(irqa) == vcpu);
+ if (ret)
goto out;
- }
- penda = irqa->enabled && irq_is_pending(irqa);
- pendb = irqb->enabled && irq_is_pending(irqb);
+ /* Same thing for interrupts targeting a disabled group */
+ ret = (int)(irqb->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
+ ret -= (int)(irqa->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
+ if (ret)
+ goto out;
+
+ penda = irqa->enabled && irq_is_pending(irqa) && !irqa->active;
+ pendb = irqb->enabled && irq_is_pending(irqb) && !irqb->active;
+
+ ret = (int)pendb - (int)penda;
+ if (ret)
+ goto out;
- if (!penda || !pendb) {
- ret = (int)pendb - (int)penda;
+ /* Both pending and enabled, sort by priority (lower number first) */
+ ret = (int)irqa->priority - (int)irqb->priority;
+ if (ret)
goto out;
- }
- /* Both pending and enabled, sort by priority */
- ret = irqa->priority - irqb->priority;
+ /* Finally, HW bit active interrupts have priority over non-HW ones */
+ ret = (int)irqb->hw - (int)irqa->hw;
+
out:
raw_spin_unlock(&irqb->irq_lock);
raw_spin_unlock(&irqa->irq_lock);
@@ -297,10 +346,12 @@ out:
static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_sort_info info = { .vcpu = vcpu, };
lockdep_assert_held(&vgic_cpu->ap_list_lock);
- list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
+ vgic_get_vmcr(vcpu, &info.vmcr);
+ list_sort(&info, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}
/*
@@ -323,6 +374,20 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
return false;
}
+static bool vgic_model_needs_bcst_kick(struct kvm *kvm)
+{
+ /*
+ * A GICv3 (or GICv3-like) system exposing a GICv3 to the guest
+ * needs a broadcast kick to set TDIR globally.
+ *
+ * For systems that do not have TDIR (ARM's own v8.0 CPUs), the
+ * shadow TDIR bit is always set, and so is the register's TC bit,
+ * so no need to kick the CPUs.
+ */
+ return (cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR) &&
+ kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
+}
+
/*
* Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
* Do the queuing if necessary, taking the right locks in the right order.
@@ -332,9 +397,10 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
* with all locks dropped.
*/
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
- unsigned long flags)
+ unsigned long flags) __releases(&irq->irq_lock)
{
struct kvm_vcpu *vcpu;
+ bool bcast;
lockdep_assert_held(&irq->irq_lock);
@@ -402,17 +468,27 @@ retry:
/*
* Grab a reference to the irq to reflect the fact that it is
- * now in the ap_list.
+ * now in the ap_list. This is safe as the caller must already hold a
+ * reference on the irq.
*/
- vgic_get_irq_kref(irq);
+ vgic_get_irq_ref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
+ /* A new SPI may result in deactivation trapping on all vcpus */
+ bcast = (vgic_model_needs_bcst_kick(vcpu->kvm) &&
+ vgic_valid_spi(vcpu->kvm, irq->intid) &&
+ atomic_fetch_inc(&vcpu->kvm->arch.vgic.active_spis) == 0);
+
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
- kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
- kvm_vcpu_kick(vcpu);
+ if (!bcast) {
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+ kvm_vcpu_kick(vcpu);
+ } else {
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_IRQ_PENDING);
+ }
return true;
}
@@ -420,7 +496,7 @@ retry:
/**
* kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
* @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
+ * @vcpu: The CPU for PPIs or NULL for global interrupts
* @intid: The INTID to inject a new state to.
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
@@ -434,25 +510,26 @@ retry:
* level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner)
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner)
{
- struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
unsigned long flags;
int ret;
- trace_vgic_update_irq_pending(cpuid, intid, level);
-
ret = vgic_lazy_init(kvm);
if (ret)
return ret;
- vcpu = kvm_get_vcpu(kvm, cpuid);
if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
- irq = vgic_get_irq(kvm, vcpu, intid);
+ trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
+
+ if (intid < VGIC_NR_PRIVATE_IRQS)
+ irq = vgic_get_vcpu_irq(vcpu, intid);
+ else
+ irq = vgic_get_irq(kvm, intid);
if (!irq)
return -EINVAL;
@@ -514,7 +591,7 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
unsigned long flags;
int ret;
@@ -539,7 +616,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
*/
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
unsigned long flags;
if (!irq->hw)
@@ -562,7 +639,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
- irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ irq = vgic_get_vcpu_irq(vcpu, vintid);
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
@@ -573,6 +650,21 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
return 0;
}
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
+{
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
+ unsigned long flags;
+ int ret = -1;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw)
+ ret = irq->hwintid;
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ vgic_put_irq(vcpu->kvm, irq);
+ return ret;
+}
+
/**
* kvm_vgic_set_owner - Set the owner of an interrupt for a VM
*
@@ -596,7 +688,7 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
return -EINVAL;
- irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+ irq = vgic_get_vcpu_irq(vcpu, intid);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner)
ret = -EEXIST;
@@ -619,6 +711,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
+ bool deleted_lpis = false;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
@@ -646,12 +739,12 @@ retry:
/*
* This vgic_put_irq call matches the
- * vgic_get_irq_kref in vgic_queue_irq_unlock,
+ * vgic_get_irq_ref in vgic_queue_irq_unlock,
* where we added the LPI to the ap_list. As
* we remove the irq from the list, we drop
* also drop the refcount.
*/
- vgic_put_irq(vcpu->kvm, irq);
+ deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
continue;
}
@@ -714,6 +807,9 @@ retry:
}
raw_spin_unlock(&vgic_cpu->ap_list_lock);
+
+ if (unlikely(deleted_lpis))
+ vgic_release_deleted_lpis(vcpu->kvm);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -744,98 +840,148 @@ static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
vgic_v3_clear_lr(vcpu, lr);
}
-static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
-{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_set_underflow(vcpu);
- else
- vgic_v3_set_underflow(vcpu);
-}
-
-/* Requires the ap_list_lock to be held. */
-static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
- bool *multi_sgi)
+static void summarize_ap_list(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
- int count = 0;
-
- *multi_sgi = false;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
+ *als = (typeof(*als)){};
+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- int w;
+ guard(raw_spinlock)(&irq->irq_lock);
- raw_spin_lock(&irq->irq_lock);
- /* GICv2 SGIs can count for more than one... */
- w = vgic_irq_get_lr_count(irq);
- raw_spin_unlock(&irq->irq_lock);
+ if (unlikely(vgic_target_oracle(irq) != vcpu))
+ continue;
- count += w;
- *multi_sgi |= (w > 1);
+ if (!irq->active)
+ als->nr_pend++;
+ else
+ als->nr_act++;
+
+ if (irq->intid < VGIC_NR_SGIS)
+ als->nr_sgi++;
}
- return count;
}
-/* Requires the VCPU's ap_list_lock to be held. */
+/*
+ * Dealing with LR overflow is close to black magic -- dress accordingly.
+ *
+ * We have to present an almost infinite number of interrupts through a very
+ * limited number of registers. Therefore crucial decisions must be made to
+ * ensure we feed the most relevant interrupts into the LRs, and yet have
+ * some facilities to let the guest interact with those that are not there.
+ *
+ * All considerations below are in the context of interrupts targeting a
+ * single vcpu with non-idle state (either pending, active, or both),
+ * colloquially called the ap_list:
+ *
+ * - Pending interrupts must have priority over active interrupts. This also
+ * excludes pending+active interrupts. This ensures that a guest can
+ * perform priority drops on any number of interrupts, and yet be
+ * presented the next pending one.
+ *
+ * - Deactivation of interrupts outside of the LRs must be tracked by using
+ * either the EOIcount-driven maintenance interrupt, and sometimes by
+ * trapping the DIR register.
+ *
+ * - For EOImode=0, a non-zero EOIcount means walking the ap_list past the
+ * point that made it into the LRs, and deactivate interrupts that would
+ * have made it onto the LRs if we had the space.
+ *
+ * - The MI-generation bits must be used to try and force an exit when the
+ * guest has done enough changes to the LRs that we want to reevaluate the
+ * situation:
+ *
+ * - if the total number of pending interrupts exceeds the number of
+ * LR, NPIE must be set in order to exit once no pending interrupts
+ * are present in the LRs, allowing us to populate the next batch.
+ *
+ * - if there are active interrupts outside of the LRs, then LRENPIE
+ * must be set so that we exit on deactivation of one of these, and
+ * work out which one is to be deactivated. Note that this is not
+ * enough to deal with EOImode=1, see below.
+ *
+ * - if the overall number of interrupts exceeds the number of LRs,
+ * then UIE must be set to allow refilling of the LRs once the
+ * majority of them has been processed.
+ *
+ * - as usual, MI triggers are only an optimisation, since we cannot
+ * rely on the MI being delivered in timely manner...
+ *
+ * - EOImode=1 creates some additional problems:
+ *
+ * - deactivation can happen in any order, and we cannot rely on
+ * EOImode=0's coupling of priority-drop and deactivation which
+ * imposes strict reverse Ack order. This means that DIR must
+ * trap if we have active interrupts outside of the LRs.
+ *
+ * - deactivation of SPIs can occur on any CPU, while the SPI is only
+ * present in the ap_list of the CPU that actually ack-ed it. In that
+ * case, EOIcount doesn't provide enough information, and we must
+ * resort to trapping DIR even if we don't overflow the LRs. Bonus
+ * point for not trapping DIR when no SPIs are pending or active in
+ * the whole VM.
+ *
+ * - LPIs do not suffer the same problem as SPIs on deactivation, as we
+ * have to essentially discard the active state, see below.
+ *
+ * - Virtual LPIs have an active state (surprise!), which gets removed on
+ * priority drop (EOI). However, EOIcount doesn't get bumped when the LPI
+ * is not present in the LR (surprise again!). Special care must therefore
+ * be taken to remove the active state from any activated LPI when exiting
+ * from the guest. This is in a way no different from what happens on the
+ * physical side. We still rely on the running priority to have been
+ * removed from the APRs, irrespective of the LPI being present in the LRs
+ * or not.
+ *
+ * - Virtual SGIs directly injected via GICv4.1 must not affect EOIcount, as
+ * they are not managed in SW and don't have a true active state. So only
+ * set vSGIEOICount when no SGIs are in the ap_list.
+ *
+ * - GICv2 SGIs with multiple sources are injected one source at a time, as
+ * if they were made pending sequentially. This may mean that we don't
+ * always present the HPPI if other interrupts with lower priority are
+ * pending in the LRs. Big deal.
+ */
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct ap_list_summary als;
struct vgic_irq *irq;
- int count;
- bool multi_sgi;
- u8 prio = 0xff;
- int i = 0;
+ int count = 0;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
- count = compute_ap_list_depth(vcpu, &multi_sgi);
- if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
- vgic_sort_ap_list(vcpu);
+ summarize_ap_list(vcpu, &als);
- count = 0;
+ if (irqs_outside_lrs(&als))
+ vgic_sort_ap_list(vcpu);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- raw_spin_lock(&irq->irq_lock);
-
- /*
- * If we have multi-SGIs in the pipeline, we need to
- * guarantee that they are all seen before any IRQ of
- * lower priority. In that case, we need to filter out
- * these interrupts by exiting early. This is easy as
- * the AP list has been sorted already.
- */
- if (multi_sgi && irq->priority > prio) {
- _raw_spin_unlock(&irq->irq_lock);
- break;
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ if (likely(vgic_target_oracle(irq) == vcpu)) {
+ vgic_populate_lr(vcpu, irq, count++);
+ }
}
- if (likely(vgic_target_oracle(irq) == vcpu)) {
- vgic_populate_lr(vcpu, irq, count++);
-
- if (irq->source)
- prio = irq->priority;
- }
-
- raw_spin_unlock(&irq->irq_lock);
-
- if (count == kvm_vgic_global_state.nr_lr) {
- if (!list_is_last(&irq->ap_list,
- &vgic_cpu->ap_list_head))
- vgic_set_underflow(vcpu);
+ if (count == kvm_vgic_global_state.nr_lr)
break;
- }
}
/* Nuke remaining LRs */
- for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
+ for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
vgic_clear_lr(vcpu, i);
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
- else
+ vgic_v2_configure_hcr(vcpu, &als);
+ } else {
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
+ vgic_v3_configure_hcr(vcpu, &als);
+ }
}
static inline bool can_access_vgic_from_kernel(void)
@@ -859,23 +1005,31 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- int used_lrs;
-
- /* An empty ap_list_head implies used_lrs == 0 */
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+ /* If nesting, emulate the HW effect from L0 to L1 */
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_sync_nested(vcpu);
return;
+ }
+
+ if (vcpu_has_nv(vcpu))
+ vgic_v3_nested_update_mi(vcpu);
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
- used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
- else
- used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
+ vgic_fold_lr_state(vcpu);
+ vgic_prune_ap_list(vcpu);
+}
+
+/* Sync interrupts that were deactivated through a DIR trap */
+void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
- if (used_lrs)
- vgic_fold_lr_state(vcpu);
+ /* Make sure we're in the same context as LR handling */
+ local_irq_save(flags);
vgic_prune_ap_list(vcpu);
+ local_irq_restore(flags);
}
static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
@@ -890,42 +1044,57 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
/*
- * If there are no virtual interrupts active or pending for this
- * VCPU, then there is no work to do and we can bail out without
- * taking any lock. There is a potential race with someone injecting
- * interrupts to the VCPU, but it is a benign race as the VCPU will
- * either observe the new interrupt before or after doing this check,
- * and introducing additional synchronization mechanism doesn't change
- * this.
+ * If in a nested state, we must return early. Two possibilities:
*
- * Note that we still need to go through the whole thing if anything
- * can be directly injected (GICv4).
+ * - If we have any pending IRQ for the guest and the guest
+ * expects IRQs to be handled in its virtual EL2 mode (the
+ * virtual IMO bit is set) and it is not already running in
+ * virtual EL2 mode, then we have to emulate an IRQ
+ * exception to virtual EL2.
+ *
+ * We do that by placing a request to ourselves which will
+ * abort the entry procedure and inject the exception at the
+ * beginning of the run loop.
+ *
+ * - Otherwise, do exactly *NOTHING* apart from enabling the virtual
+ * CPU interface. The guest state is already loaded, and we can
+ * carry on with running it.
+ *
+ * If we have NV, but are not in a nested state, compute the
+ * maintenance interrupt state, as it may fire.
*/
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
- !vgic_supports_direct_msis(vcpu->kvm))
+ if (vgic_state_is_nested(vcpu)) {
+ if (kvm_vgic_vcpu_pending_irq(vcpu))
+ kvm_make_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu);
+
+ vgic_v3_flush_nested(vcpu);
return;
+ }
+
+ if (vcpu_has_nv(vcpu))
+ vgic_v3_nested_update_mi(vcpu);
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock)
vgic_flush_lr_state(vcpu);
- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
- }
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);
- if (vgic_supports_direct_msis(vcpu->kvm))
+ if (vgic_supports_direct_irqs(vcpu->kvm))
vgic_v4_commit(vcpu);
}
void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
+ if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
+ }
- if (kvm_vgic_global_state.type == VGIC_V2)
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_load(vcpu);
else
vgic_v3_load(vcpu);
@@ -933,26 +1102,18 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
+ if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
+ }
- if (kvm_vgic_global_state.type == VGIC_V2)
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_put(vcpu);
else
vgic_v3_put(vcpu);
}
-void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
-{
- if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
- return;
-
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_vmcr_sync(vcpu);
- else
- vgic_v3_vmcr_sync(vcpu);
-}
-
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1013,7 +1174,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
if (!vgic_initialized(vcpu->kvm))
return false;
- irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ irq = vgic_get_vcpu_irq(vcpu, vintid);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);