summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/aia.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/aia.c')
-rw-r--r--arch/riscv/kvm/aia.c200
1 files changed, 104 insertions, 96 deletions
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 0f0a9d11bb5f..dad318185660 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -10,12 +10,13 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irq.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/cpufeature.h>
-#include <asm/kvm_aia_imsic.h>
+#include <asm/kvm_nacl.h>
struct aia_hgei_control {
raw_spinlock_t lock;
@@ -29,29 +30,7 @@ unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
-static int aia_find_hgei(struct kvm_vcpu *owner)
-{
- int i, hgei;
- unsigned long flags;
- struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
-
- raw_spin_lock_irqsave(&hgctrl->lock, flags);
-
- hgei = -1;
- for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
- if (hgctrl->owners[i] == owner) {
- hgei = i;
- break;
- }
- }
-
- raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
-
- put_cpu_ptr(&aia_hgei);
- return hgei;
-}
-
-static void aia_set_hvictl(bool ext_irq_pending)
+static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
{
unsigned long hvictl;
@@ -62,7 +41,7 @@ static void aia_set_hvictl(bool ext_irq_pending)
hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
hvictl |= ext_irq_pending;
- csr_write(CSR_HVICTL, hvictl);
+ return hvictl;
}
#ifdef CONFIG_32BIT
@@ -88,13 +67,12 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (kvm_riscv_aia_available())
- csr->vsieh = csr_read(CSR_VSIEH);
+ csr->vsieh = ncsr_read(CSR_VSIEH);
}
#endif
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
@@ -113,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
- hgei = aia_find_hgei(vcpu);
- if (hgei > 0)
- return !!(csr_read(CSR_HGEIP) & BIT(hgei));
-
- return false;
+ return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
}
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@@ -128,45 +102,79 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
return;
#ifdef CONFIG_32BIT
- csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
+ ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
#endif
- aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
+ ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
}
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ void *nsh;
if (!kvm_riscv_aia_available())
return;
- csr_write(CSR_VSISELECT, csr->vsiselect);
- csr_write(CSR_HVIPRIO1, csr->hviprio1);
- csr_write(CSR_HVIPRIO2, csr->hviprio2);
+ if (kvm_riscv_nacl_sync_csr_available()) {
+ nsh = nacl_shmem();
+ nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
+ nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
+ nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
+#ifdef CONFIG_32BIT
+ nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
+ nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
+ nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
+ nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
+#endif
+ } else {
+ csr_write(CSR_VSISELECT, csr->vsiselect);
+ csr_write(CSR_HVIPRIO1, csr->hviprio1);
+ csr_write(CSR_HVIPRIO2, csr->hviprio2);
#ifdef CONFIG_32BIT
- csr_write(CSR_VSIEH, csr->vsieh);
- csr_write(CSR_HVIPH, csr->hviph);
- csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
- csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
+ csr_write(CSR_VSIEH, csr->vsieh);
+ csr_write(CSR_HVIPH, csr->hviph);
+ csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
+ csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
#endif
+ }
+
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
}
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ void *nsh;
if (!kvm_riscv_aia_available())
return;
- csr->vsiselect = csr_read(CSR_VSISELECT);
- csr->hviprio1 = csr_read(CSR_HVIPRIO1);
- csr->hviprio2 = csr_read(CSR_HVIPRIO2);
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_put(vcpu);
+
+ if (kvm_riscv_nacl_available()) {
+ nsh = nacl_shmem();
+ csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
+ csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
+ csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
#ifdef CONFIG_32BIT
- csr->vsieh = csr_read(CSR_VSIEH);
- csr->hviph = csr_read(CSR_HVIPH);
- csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
- csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
+ csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
+ csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
+ csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
+ csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
#endif
+ } else {
+ csr->vsiselect = csr_read(CSR_VSISELECT);
+ csr->hviprio1 = csr_read(CSR_HVIPRIO1);
+ csr->hviprio2 = csr_read(CSR_HVIPRIO2);
+#ifdef CONFIG_32BIT
+ csr->vsieh = csr_read(CSR_VSIEH);
+ csr->hviph = csr_read(CSR_HVIPH);
+ csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
+ csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
+#endif
+ }
}
int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
@@ -250,20 +258,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
switch (bitpos / BITS_PER_LONG) {
case 0:
- hviprio = csr_read(CSR_HVIPRIO1);
+ hviprio = ncsr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
#else
- hviprio = csr_read(CSR_HVIPRIO1H);
+ hviprio = ncsr_read(CSR_HVIPRIO1H);
break;
case 2:
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
case 3:
- hviprio = csr_read(CSR_HVIPRIO2H);
+ hviprio = ncsr_read(CSR_HVIPRIO2H);
break;
#endif
default:
@@ -283,20 +291,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
switch (bitpos / BITS_PER_LONG) {
case 0:
- hviprio = csr_read(CSR_HVIPRIO1);
+ hviprio = ncsr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
#else
- hviprio = csr_read(CSR_HVIPRIO1H);
+ hviprio = ncsr_read(CSR_HVIPRIO1H);
break;
case 2:
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
case 3:
- hviprio = csr_read(CSR_HVIPRIO2H);
+ hviprio = ncsr_read(CSR_HVIPRIO2H);
break;
#endif
default:
@@ -308,20 +316,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
switch (bitpos / BITS_PER_LONG) {
case 0:
- csr_write(CSR_HVIPRIO1, hviprio);
+ ncsr_write(CSR_HVIPRIO1, hviprio);
break;
case 1:
#ifndef CONFIG_32BIT
- csr_write(CSR_HVIPRIO2, hviprio);
+ ncsr_write(CSR_HVIPRIO2, hviprio);
break;
#else
- csr_write(CSR_HVIPRIO1H, hviprio);
+ ncsr_write(CSR_HVIPRIO1H, hviprio);
break;
case 2:
- csr_write(CSR_HVIPRIO2, hviprio);
+ ncsr_write(CSR_HVIPRIO2, hviprio);
break;
case 3:
- csr_write(CSR_HVIPRIO2H, hviprio);
+ ncsr_write(CSR_HVIPRIO2H, hviprio);
break;
#endif
default:
@@ -377,7 +385,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
return KVM_INSN_ILLEGAL_TRAP;
/* First try to emulate in kernel space */
- isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
+ isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
@@ -394,6 +402,8 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
{
int ret = -ENOENT;
unsigned long flags;
+ const struct imsic_global_config *gc;
+ const struct imsic_local_config *lc;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
if (!kvm_riscv_aia_available() || !hgctrl)
@@ -409,11 +419,14 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
- /* TODO: To be updated later by AIA IMSIC HW guest file support */
- if (hgei_va)
- *hgei_va = NULL;
- if (hgei_pa)
- *hgei_pa = 0;
+ gc = imsic_get_global_config();
+ lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
+ if (lc && ret > 0) {
+ if (hgei_va)
+ *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
+ if (hgei_pa)
+ *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
+ }
return ret;
}
@@ -438,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei)
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}
-void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
-{
- int hgei;
-
- if (!kvm_riscv_aia_available())
- return;
-
- hgei = aia_find_hgei(owner);
- if (hgei > 0) {
- if (enable)
- csr_set(CSR_HGEIE, BIT(hgei));
- else
- csr_clear(CSR_HGEIE, BIT(hgei));
- }
-}
-
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
@@ -494,6 +491,10 @@ static int aia_hgei_init(void)
hgctrl->free_bitmap = 0;
}
+ /* Skip SGEI interrupt setup for zero guest external interrupts */
+ if (!kvm_riscv_aia_nr_hgei)
+ goto skip_sgei_interrupt;
+
/* Find INTC irq domain */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
DOMAIN_BUS_ANY);
@@ -517,11 +518,16 @@ static int aia_hgei_init(void)
return rc;
}
+skip_sgei_interrupt:
return 0;
}
static void aia_hgei_exit(void)
{
+ /* Do nothing for zero guest external interrupts */
+ if (!kvm_riscv_aia_nr_hgei)
+ return;
+
/* Free per-CPU SGEI interrupt */
free_percpu_irq(hgei_parent_irq, &aia_hgei);
}
@@ -531,7 +537,7 @@ void kvm_riscv_aia_enable(void)
if (!kvm_riscv_aia_available())
return;
- aia_set_hvictl(false);
+ csr_write(CSR_HVICTL, aia_hvictl_value(false));
csr_write(CSR_HVIPRIO1, 0x0);
csr_write(CSR_HVIPRIO2, 0x0);
#ifdef CONFIG_32BIT
@@ -547,7 +553,7 @@ void kvm_riscv_aia_enable(void)
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
- csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
+ csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
}
void kvm_riscv_aia_disable(void)
@@ -567,7 +573,7 @@ void kvm_riscv_aia_disable(void)
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
disable_percpu_irq(hgei_parent_irq);
- aia_set_hvictl(false);
+ csr_write(CSR_HVICTL, aia_hvictl_value(false));
raw_spin_lock_irqsave(&hgctrl->lock, flags);
@@ -605,9 +611,11 @@ void kvm_riscv_aia_disable(void)
int kvm_riscv_aia_init(void)
{
int rc;
+ const struct imsic_global_config *gc;
if (!riscv_isa_extension_available(NULL, SxAIA))
return -ENODEV;
+ gc = imsic_get_global_config();
/* Figure-out number of bits in HGEIE */
csr_write(CSR_HGEIE, -1UL);
@@ -619,17 +627,17 @@ int kvm_riscv_aia_init(void)
/*
* Number of usable HGEI lines should be minimum of per-HART
* IMSIC guest files and number of bits in HGEIE
- *
- * TODO: To be updated later by AIA IMSIC HW guest file support
*/
- kvm_riscv_aia_nr_hgei = 0;
+ if (gc)
+ kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
+ BIT(gc->guest_index_bits) - 1);
+ else
+ kvm_riscv_aia_nr_hgei = 0;
- /*
- * Find number of guest MSI IDs
- *
- * TODO: To be updated later by AIA IMSIC HW guest file support
- */
+ /* Find number of guest MSI IDs */
kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
+ if (gc && kvm_riscv_aia_nr_hgei)
+ kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
/* Initialize guest external interrupt line management */
rc = aia_hgei_init();