summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/aia.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/aia.c')
-rw-r--r--arch/riscv/kvm/aia.c369
1 files changed, 326 insertions, 43 deletions
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 4f1286fc7f17..dad318185660 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -8,12 +8,29 @@
*/
#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
-#include <asm/hwcap.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_nacl.h>
+
+struct aia_hgei_control {
+ raw_spinlock_t lock;
+ unsigned long free_bitmap;
+ struct kvm_vcpu *owners[BITS_PER_LONG];
+};
+static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
+static int hgei_parent_irq;
+unsigned int kvm_riscv_aia_nr_hgei;
+unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
-static void aia_set_hvictl(bool ext_irq_pending)
+static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
{
unsigned long hvictl;
@@ -24,7 +41,7 @@ static void aia_set_hvictl(bool ext_irq_pending)
hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
hvictl |= ext_irq_pending;
- csr_write(CSR_HVICTL, hvictl);
+ return hvictl;
}
#ifdef CONFIG_32BIT
@@ -50,7 +67,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (kvm_riscv_aia_available())
- csr->vsieh = csr_read(CSR_VSIEH);
+ csr->vsieh = ncsr_read(CSR_VSIEH);
}
#endif
@@ -74,7 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
- return false;
+ return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
}
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@@ -85,45 +102,79 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
return;
#ifdef CONFIG_32BIT
- csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
+ ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
#endif
- aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
+ ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
}
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ void *nsh;
if (!kvm_riscv_aia_available())
return;
- csr_write(CSR_VSISELECT, csr->vsiselect);
- csr_write(CSR_HVIPRIO1, csr->hviprio1);
- csr_write(CSR_HVIPRIO2, csr->hviprio2);
+ if (kvm_riscv_nacl_sync_csr_available()) {
+ nsh = nacl_shmem();
+ nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
+ nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
+ nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
#ifdef CONFIG_32BIT
- csr_write(CSR_VSIEH, csr->vsieh);
- csr_write(CSR_HVIPH, csr->hviph);
- csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
- csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
+ nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
+ nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
+ nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
+ nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
#endif
+ } else {
+ csr_write(CSR_VSISELECT, csr->vsiselect);
+ csr_write(CSR_HVIPRIO1, csr->hviprio1);
+ csr_write(CSR_HVIPRIO2, csr->hviprio2);
+#ifdef CONFIG_32BIT
+ csr_write(CSR_VSIEH, csr->vsieh);
+ csr_write(CSR_HVIPH, csr->hviph);
+ csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
+ csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
+#endif
+ }
+
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
}
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ void *nsh;
if (!kvm_riscv_aia_available())
return;
- csr->vsiselect = csr_read(CSR_VSISELECT);
- csr->hviprio1 = csr_read(CSR_HVIPRIO1);
- csr->hviprio2 = csr_read(CSR_HVIPRIO2);
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_put(vcpu);
+
+ if (kvm_riscv_nacl_available()) {
+ nsh = nacl_shmem();
+ csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
+ csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
+ csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
#ifdef CONFIG_32BIT
- csr->vsieh = csr_read(CSR_VSIEH);
- csr->hviph = csr_read(CSR_HVIPH);
- csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
- csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
+ csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
+ csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
+ csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
+ csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
+#endif
+ } else {
+ csr->vsiselect = csr_read(CSR_VSISELECT);
+ csr->hviprio1 = csr_read(CSR_HVIPRIO1);
+ csr->hviprio2 = csr_read(CSR_HVIPRIO2);
+#ifdef CONFIG_32BIT
+ csr->vsieh = csr_read(CSR_VSIEH);
+ csr->hviph = csr_read(CSR_HVIPH);
+ csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
+ csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
#endif
+ }
}
int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
@@ -133,7 +184,7 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
*out_val = 0;
if (kvm_riscv_aia_available())
@@ -149,7 +200,7 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (kvm_riscv_aia_available()) {
((unsigned long *)csr)[reg_num] = val;
@@ -207,20 +258,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
switch (bitpos / BITS_PER_LONG) {
case 0:
- hviprio = csr_read(CSR_HVIPRIO1);
+ hviprio = ncsr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
#else
- hviprio = csr_read(CSR_HVIPRIO1H);
+ hviprio = ncsr_read(CSR_HVIPRIO1H);
break;
case 2:
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
case 3:
- hviprio = csr_read(CSR_HVIPRIO2H);
+ hviprio = ncsr_read(CSR_HVIPRIO2H);
break;
#endif
default:
@@ -240,20 +291,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
switch (bitpos / BITS_PER_LONG) {
case 0:
- hviprio = csr_read(CSR_HVIPRIO1);
+ hviprio = ncsr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
#else
- hviprio = csr_read(CSR_HVIPRIO1H);
+ hviprio = ncsr_read(CSR_HVIPRIO1H);
break;
case 2:
- hviprio = csr_read(CSR_HVIPRIO2);
+ hviprio = ncsr_read(CSR_HVIPRIO2);
break;
case 3:
- hviprio = csr_read(CSR_HVIPRIO2H);
+ hviprio = ncsr_read(CSR_HVIPRIO2H);
break;
#endif
default:
@@ -265,20 +316,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
switch (bitpos / BITS_PER_LONG) {
case 0:
- csr_write(CSR_HVIPRIO1, hviprio);
+ ncsr_write(CSR_HVIPRIO1, hviprio);
break;
case 1:
#ifndef CONFIG_32BIT
- csr_write(CSR_HVIPRIO2, hviprio);
+ ncsr_write(CSR_HVIPRIO2, hviprio);
break;
#else
- csr_write(CSR_HVIPRIO1H, hviprio);
+ ncsr_write(CSR_HVIPRIO1H, hviprio);
break;
case 2:
- csr_write(CSR_HVIPRIO2, hviprio);
+ ncsr_write(CSR_HVIPRIO2, hviprio);
break;
case 3:
- csr_write(CSR_HVIPRIO2H, hviprio);
+ ncsr_write(CSR_HVIPRIO2H, hviprio);
break;
#endif
default:
@@ -323,8 +374,6 @@ static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
-#define IMSIC_FIRST 0x70
-#define IMSIC_LAST 0xff
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
@@ -336,7 +385,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
return KVM_INSN_ILLEGAL_TRAP;
/* First try to emulate in kernel space */
- isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
+ isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
@@ -348,12 +397,147 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
return KVM_INSN_EXIT_TO_USER_SPACE;
}
+int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
+ void __iomem **hgei_va, phys_addr_t *hgei_pa)
+{
+ int ret = -ENOENT;
+ unsigned long flags;
+ const struct imsic_global_config *gc;
+ const struct imsic_local_config *lc;
+ struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+
+ if (!kvm_riscv_aia_available() || !hgctrl)
+ return -ENODEV;
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ if (hgctrl->free_bitmap) {
+ ret = __ffs(hgctrl->free_bitmap);
+ hgctrl->free_bitmap &= ~BIT(ret);
+ hgctrl->owners[ret] = owner;
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ gc = imsic_get_global_config();
+ lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
+ if (lc && ret > 0) {
+ if (hgei_va)
+ *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
+ if (hgei_pa)
+ *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
+ }
+
+ return ret;
+}
+
+void kvm_riscv_aia_free_hgei(int cpu, int hgei)
+{
+ unsigned long flags;
+ struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+
+ if (!kvm_riscv_aia_available() || !hgctrl)
+ return;
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
+ if (!(hgctrl->free_bitmap & BIT(hgei))) {
+ hgctrl->free_bitmap |= BIT(hgei);
+ hgctrl->owners[hgei] = NULL;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+}
+
+static irqreturn_t hgei_interrupt(int irq, void *dev_id)
+{
+ int i;
+ unsigned long hgei_mask, flags;
+ struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
+
+ hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
+ csr_clear(CSR_HGEIE, hgei_mask);
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
+ if (hgctrl->owners[i])
+ kvm_vcpu_kick(hgctrl->owners[i]);
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ put_cpu_ptr(&aia_hgei);
+ return IRQ_HANDLED;
+}
+
+static int aia_hgei_init(void)
+{
+ int cpu, rc;
+ struct irq_domain *domain;
+ struct aia_hgei_control *hgctrl;
+
+ /* Initialize per-CPU guest external interrupt line management */
+ for_each_possible_cpu(cpu) {
+ hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+ raw_spin_lock_init(&hgctrl->lock);
+ if (kvm_riscv_aia_nr_hgei) {
+ hgctrl->free_bitmap =
+ BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
+ hgctrl->free_bitmap &= ~BIT(0);
+ } else
+ hgctrl->free_bitmap = 0;
+ }
+
+ /* Skip SGEI interrupt setup for zero guest external interrupts */
+ if (!kvm_riscv_aia_nr_hgei)
+ goto skip_sgei_interrupt;
+
+ /* Find INTC irq domain */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ DOMAIN_BUS_ANY);
+ if (!domain) {
+ kvm_err("unable to find INTC domain\n");
+ return -ENOENT;
+ }
+
+ /* Map per-CPU SGEI interrupt from INTC domain */
+ hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
+ if (!hgei_parent_irq) {
+ kvm_err("unable to map SGEI IRQ\n");
+ return -ENOMEM;
+ }
+
+ /* Request per-CPU SGEI interrupt */
+ rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
+ "riscv-kvm", &aia_hgei);
+ if (rc) {
+ kvm_err("failed to request SGEI IRQ\n");
+ return rc;
+ }
+
+skip_sgei_interrupt:
+ return 0;
+}
+
+static void aia_hgei_exit(void)
+{
+ /* Do nothing for zero guest external interrupts */
+ if (!kvm_riscv_aia_nr_hgei)
+ return;
+
+ /* Free per-CPU SGEI interrupt */
+ free_percpu_irq(hgei_parent_irq, &aia_hgei);
+}
+
void kvm_riscv_aia_enable(void)
{
if (!kvm_riscv_aia_available())
return;
- aia_set_hvictl(false);
+ csr_write(CSR_HVICTL, aia_hvictl_value(false));
csr_write(CSR_HVIPRIO1, 0x0);
csr_write(CSR_HVIPRIO2, 0x0);
#ifdef CONFIG_32BIT
@@ -362,20 +546,111 @@ void kvm_riscv_aia_enable(void)
csr_write(CSR_HVIPRIO1H, 0x0);
csr_write(CSR_HVIPRIO2H, 0x0);
#endif
+
+ /* Enable per-CPU SGEI interrupt */
+ enable_percpu_irq(hgei_parent_irq,
+ irq_get_trigger_type(hgei_parent_irq));
+ csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
+ /* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
+ csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
}
void kvm_riscv_aia_disable(void)
{
+ int i;
+ unsigned long flags;
+ struct kvm_vcpu *vcpu;
+ struct aia_hgei_control *hgctrl;
+
if (!kvm_riscv_aia_available())
return;
+ hgctrl = get_cpu_ptr(&aia_hgei);
+
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
+ csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
+ /* Disable per-CPU SGEI interrupt */
+ csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
+ disable_percpu_irq(hgei_parent_irq);
+
+ csr_write(CSR_HVICTL, aia_hvictl_value(false));
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
+ vcpu = hgctrl->owners[i];
+ if (!vcpu)
+ continue;
+
+ /*
+ * We release hgctrl->lock before notifying IMSIC
+ * so that we don't have lock ordering issues.
+ */
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ /* Notify IMSIC */
+ kvm_riscv_vcpu_aia_imsic_release(vcpu);
+
+ /*
+ * Wakeup VCPU if it was blocked so that it can
+ * run on other HARTs
+ */
+ if (csr_read(CSR_HGEIE) & BIT(i)) {
+ csr_clear(CSR_HGEIE, BIT(i));
+ kvm_vcpu_kick(vcpu);
+ }
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+ }
- aia_set_hvictl(false);
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ put_cpu_ptr(&aia_hgei);
}
int kvm_riscv_aia_init(void)
{
+ int rc;
+ const struct imsic_global_config *gc;
+
if (!riscv_isa_extension_available(NULL, SxAIA))
return -ENODEV;
+ gc = imsic_get_global_config();
+
+ /* Figure-out number of bits in HGEIE */
+ csr_write(CSR_HGEIE, -1UL);
+ kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
+ csr_write(CSR_HGEIE, 0);
+ if (kvm_riscv_aia_nr_hgei)
+ kvm_riscv_aia_nr_hgei--;
+
+ /*
+ * Number of usable HGEI lines should be minimum of per-HART
+ * IMSIC guest files and number of bits in HGEIE
+ */
+ if (gc)
+ kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
+ BIT(gc->guest_index_bits) - 1);
+ else
+ kvm_riscv_aia_nr_hgei = 0;
+
+ /* Find number of guest MSI IDs */
+ kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
+ if (gc && kvm_riscv_aia_nr_hgei)
+ kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
+
+ /* Initialize guest external interrupt line management */
+ rc = aia_hgei_init();
+ if (rc)
+ return rc;
+
+ /* Register device operations */
+ rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
+ KVM_DEV_TYPE_RISCV_AIA);
+ if (rc) {
+ aia_hgei_exit();
+ return rc;
+ }
/* Enable KVM AIA support */
static_branch_enable(&kvm_riscv_aia_available);
@@ -385,4 +660,12 @@ int kvm_riscv_aia_init(void)
void kvm_riscv_aia_exit(void)
{
+ if (!kvm_riscv_aia_available())
+ return;
+
+ /* Unregister device operations */
+ kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
+
+ /* Cleanup the HGEI state */
+ aia_hgei_exit();
}