diff options
Diffstat (limited to 'arch/riscv/kvm/aia_imsic.c')
| -rw-r--r-- | arch/riscv/kvm/aia_imsic.c | 87 |
1 files changed, 78 insertions, 9 deletions
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index 6cf23b8adb71..e597e86491c3 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -9,13 +9,14 @@ #include <linux/atomic.h> #include <linux/bitmap.h> +#include <linux/irqchip/riscv-imsic.h> #include <linux/kvm_host.h> #include <linux/math.h> #include <linux/spinlock.h> #include <linux/swab.h> #include <kvm/iodev.h> #include <asm/csr.h> -#include <asm/kvm_aia_imsic.h> +#include <asm/kvm_mmu.h> #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) @@ -55,6 +56,7 @@ struct imsic { /* IMSIC SW-file */ struct imsic_mrif *swfile; phys_addr_t swfile_pa; + raw_spinlock_t swfile_extirq_lock; }; #define imsic_vs_csr_read(__c) \ @@ -613,12 +615,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) { struct imsic *imsic = vcpu->arch.aia_context.imsic_state; struct imsic_mrif *mrif = imsic->swfile; + unsigned long flags; + + /* + * The critical section is necessary during external interrupt + * updates to avoid the risk of losing interrupts due to potential + * interruptions between reading topei and updating pending status. + */ + + raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); else kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); + + raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); } static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, @@ -664,6 +677,60 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu, imsic_swfile_extirq_update(vcpu); } +bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + unsigned long flags; + bool ret = false; + + /* + * The IMSIC SW-file directly injects interrupt via hvip so + * only check for interrupt when IMSIC VS-file is being used. + */ + + read_lock_irqsave(&imsic->vsfile_lock, flags); + if (imsic->vsfile_cpu > -1) { + /* + * This function is typically called from kvm_vcpu_block() via + * kvm_arch_vcpu_runnable() upon WFI trap. The kvm_vcpu_block() + * can be preempted and the blocking VCPU might resume on a + * different CPU. This means it is possible that current CPU + * does not match the imsic->vsfile_cpu hence this function + * must check imsic->vsfile_cpu before accessing HGEIP CSR. + */ + if (imsic->vsfile_cpu != vcpu->cpu) + ret = true; + else + ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei)); + } + read_unlock_irqrestore(&imsic->vsfile_lock, flags); + + return ret; +} + +void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu) +{ + /* + * No need to explicitly clear HGEIE CSR bits because the + * hgei interrupt handler (aka hgei_interrupt()) will always + * clear it for us. + */ +} + +void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu) +{ + struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + unsigned long flags; + + if (!kvm_vcpu_is_blocking(vcpu)) + return; + + read_lock_irqsave(&imsic->vsfile_lock, flags); + if (imsic->vsfile_cpu > -1) + csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei)); + read_unlock_irqrestore(&imsic->vsfile_lock, flags); +} + void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -691,9 +758,8 @@ void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) */ /* Purge the G-stage mapping */ - kvm_riscv_gstage_iounmap(vcpu->kvm, - vcpu->arch.aia_context.imsic_addr, - IMSIC_MMIO_PAGE_SZ); + kvm_riscv_mmu_iounmap(vcpu->kvm, vcpu->arch.aia_context.imsic_addr, + IMSIC_MMIO_PAGE_SZ); /* TODO: Purge the IOMMU mapping ??? */ @@ -748,7 +814,7 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) /* For HW acceleration mode, we can't continue */ if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) { run->fail_entry.hardware_entry_failure_reason = - CSR_HSTATUS; + KVM_EXIT_FAIL_ENTRY_NO_VSFILE; run->fail_entry.cpu = vcpu->cpu; run->exit_reason = KVM_EXIT_FAIL_ENTRY; return 0; @@ -769,13 +835,16 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) * producers to the new IMSIC VS-file. */ + /* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */ + csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei)); + /* Zero-out new IMSIC VS-file */ imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix); /* Update G-stage mapping for the new IMSIC VS-file */ - ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr, - new_vsfile_pa, IMSIC_MMIO_PAGE_SZ, - true, true); + ret = kvm_riscv_mmu_ioremap(kvm, vcpu->arch.aia_context.imsic_addr, + new_vsfile_pa, IMSIC_MMIO_PAGE_SZ, + true, true); if (ret) goto fail_free_vsfile_hgei; @@ -962,7 +1031,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, if (imsic->vsfile_cpu >= 0) { writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); - kvm_vcpu_kick(vcpu); } else { eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip); @@ -1039,6 +1107,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu) } imsic->swfile = page_to_virt(swfile_page); imsic->swfile_pa = page_to_phys(swfile_page); + raw_spin_lock_init(&imsic->swfile_extirq_lock); /* Setup IO device */ kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); |
