summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-11-20 17:25:17 +0000
committerOliver Upton <oupton@kernel.org>2025-11-24 14:29:13 -0800
commit3cfd59f81e0f3fbdf8a1b2f576bdc63ab6cc3277 (patch)
tree5d93f1e53e7a707c979f0ba7ac70a0f495cead86
parenta69e2d6f8934bdb9d08a6740ca6c7a44525e2e95 (diff)
KVM: arm64: GICv3: Handle LR overflow when EOImode==0
Now that we can identify interrupts that have not made it into the LRs, it becomes relatively easy to use EOIcount to walk the overflow list. What is a bit odd is that we compute a fake LR for the original state of the interrupt, clear the active bit, and feed into the existing logic for processing. In a way, this is what would have happened if the interrupt was in an LR. Tested-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Tested-by: Mark Brown <broonie@kernel.org> Link: https://msgid.link/20251120172540.2267180-28-maz@kernel.org Signed-off-by: Oliver Upton <oupton@kernel.org>
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 312226cc2565..d4f27f451c8f 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -112,16 +112,62 @@ static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
vgic_put_irq(vcpu->kvm, irq);
}
+static u64 vgic_v3_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq);
+
+static void vgic_v3_deactivate_phys(u32 intid)
+{
+ if (cpus_have_final_cap(ARM64_HAS_GICV5_LEGACY))
+ gic_insn(intid | FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, 1), CDDI);
+ else
+ gic_write_dir(intid);
+}
+
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
+ u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr);
+ struct vgic_irq *irq;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
for (int lr = 0; lr < cpuif->used_lrs; lr++)
vgic_v3_fold_lr(vcpu, cpuif->vgic_lr[lr]);
+ /*
+ * EOIMode=0: use EOIcount to emulate deactivation. We are
+ * guaranteed to deactivate in reverse order of the activation, so
+ * just pick one active interrupt after the other in the ap_list,
+ * and replay the deactivation as if the CPU was doing it. We also
+ * rely on priority drop to have taken place, and the list to be
+ * sorted by priority.
+ */
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ u64 lr;
+
+ /*
+ * I would have loved to write this using a scoped_guard(),
+ * but using 'continue' here is a total train wreck.
+ */
+ if (!eoicount) {
+ break;
+ } else {
+ guard(raw_spinlock)(&irq->irq_lock);
+
+ if (!(likely(vgic_target_oracle(irq) == vcpu) &&
+ irq->active))
+ continue;
+
+ lr = vgic_v3_compute_lr(vcpu, irq) & ~ICH_LR_ACTIVE_BIT;
+ }
+
+ if (lr & ICH_LR_HW)
+ vgic_v3_deactivate_phys(FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
+
+ vgic_v3_fold_lr(vcpu, lr);
+ eoicount--;
+ }
+
cpuif->used_lrs = 0;
}