summaryrefslogtreecommitdiff
path: root/arch/loongarch/kvm/vcpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kvm/vcpu.c')
-rw-r--r--arch/loongarch/kvm/vcpu.c126
1 files changed, 99 insertions, 27 deletions
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index cab1818be68d..6d833599ef2e 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -4,7 +4,6 @@
*/
#include <linux/kvm_host.h>
-#include <linux/entry-kvm.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/loongarch.h>
@@ -20,7 +19,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, hypercall_exits)
+ STATS_DESC_COUNTER(VCPU, hypercall_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_read_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_write_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -127,6 +132,9 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
* Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
* exiting the guest, so that the next time trap into the guest.
* We don't need to deal with PMU CSRs contexts.
+ *
+ * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
+ * before entering guest VM
*/
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
@@ -134,16 +142,12 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
if (!(val & KVM_PMU_EVENT_ENABLED))
vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
+ else
+ kvm_make_request(KVM_REQ_PMU, vcpu);
kvm_restore_host_pmu(vcpu);
}
-static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
-{
- if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
- kvm_make_request(KVM_REQ_PMU, vcpu);
-}
-
static void kvm_check_pmu(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
@@ -240,16 +244,18 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
*/
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
{
- int ret;
+ int idx, ret;
/*
* Check conditions before entering the guest
*/
- ret = xfer_to_guest_mode_handle_work(vcpu);
+ ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret < 0)
return ret;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_check_requests(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
@@ -292,6 +298,10 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
+ kvm_lose_pmu(vcpu);
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -309,7 +319,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret = RESUME_GUEST;
unsigned long estat = vcpu->arch.host_estat;
- u32 intr = estat & 0x1fff; /* Ignore NMI */
+ u32 intr = estat & CSR_ESTAT_IS;
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -359,6 +369,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
+ unsigned long val;
+
+ preempt_disable();
+ val = gcsr_read(LOONGARCH_CSR_CRMD);
+ preempt_enable();
+
+ return (val & CSR_PRMD_PPLV) == PLV_KERN;
+}
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
+ * any event that arrives while a vCPU is loaded is considered to be "in guest".
+ */
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
+}
+#endif
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
return false;
}
@@ -621,8 +659,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v = GENMASK(31, 0);
return 0;
case LOONGARCH_CPUCFG1:
- /* CPUCFG1_MSGINT is not supported by KVM */
- *v = GENMASK(25, 0);
+ *v = GENMASK(26, 0);
return 0;
case LOONGARCH_CPUCFG2:
/* CPUCFG2 features unconditionally supported by KVM */
@@ -643,6 +680,8 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v |= CPUCFG2_ARMBT;
if (cpu_has_lbt_mips)
*v |= CPUCFG2_MIPSBT;
+ if (cpu_has_ptw)
+ *v |= CPUCFG2_PTW;
return 0;
case LOONGARCH_CPUCFG3:
@@ -688,6 +727,10 @@ static int kvm_check_cpucfg(int id, u64 val)
return -EINVAL;
switch (id) {
+ case LOONGARCH_CPUCFG1:
+ if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
+ return -EINVAL;
+ return 0;
case LOONGARCH_CPUCFG2:
if (!(val & CPUCFG2_LLFTP))
/* Guests must have a constant timer */
@@ -872,6 +915,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
+
+ /*
+ * When vCPU reset, clear the ESTAT and GINTC registers
+ * Other CSR registers are cleared with function _kvm_setcsr().
+ */
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;
@@ -1239,9 +1289,11 @@ int kvm_own_lbt(struct kvm_vcpu *vcpu)
return -EINVAL;
preempt_disable();
- set_csr_euen(CSR_EUEN_LBTEN);
- _restore_lbt(&vcpu->arch.lbt);
- vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
+ set_csr_euen(CSR_EUEN_LBTEN);
+ _restore_lbt(&vcpu->arch.lbt);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ }
preempt_enable();
return 0;
@@ -1424,8 +1476,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
return 0;
}
-long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct kvm_vcpu *vcpu = filp->private_data;
@@ -1457,8 +1509,17 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.vpid = 0;
vcpu->arch.flush_gpa = INVALID_GPA;
- hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
- vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
+ hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_HARD);
+
+ /* Get GPA (=HVA) of PGD for kvm hypervisor */
+ vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
+
+ /*
+ * Get PGD for primary mmu, virtual address is used since there is
+ * memory access after loading from CSR_PGD in tlb exception fast path.
+ */
+ vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
vcpu->arch.handle_exit = kvm_handle_exit;
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
@@ -1546,14 +1607,8 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Restore timer state regardless */
kvm_restore_timer(vcpu);
-
- /* Control guest page CCA attribute */
- change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
- /* Restore hardware PMU CSRs */
- kvm_restore_pmu(vcpu);
-
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
return 0;
@@ -1605,6 +1660,12 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
+ if (cpu_has_msgint) {
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
+ }
/* Restore Root.GINTC from unused Guest.GINTC register */
write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
@@ -1694,6 +1755,12 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
+ if (cpu_has_msgint) {
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
+ }
vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
@@ -1730,9 +1797,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mmio_needed = 0;
}
- if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_HYPERCALL:
+ kvm_complete_user_service(vcpu, run);
+ break;
+ case KVM_EXIT_LOONGARCH_IOCSR:
if (!run->iocsr_io.is_write)
kvm_complete_iocsr_read(vcpu, run);
+ break;
}
if (!vcpu->wants_to_run)