diff options
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
| -rw-r--r-- | arch/riscv/kvm/vcpu.c | 1014 |
1 files changed, 371 insertions, 643 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index d12ef99901fc..a55a95da54d0 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -7,34 +7,39 @@ */ #include <linux/bitops.h> -#include <linux/entry-kvm.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/percpu.h> -#include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/kvm_host.h> -#include <asm/csr.h> #include <asm/cacheflush.h> -#include <asm/hwcap.h> -#include <asm/sbi.h> -#include <asm/vector.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_nacl.h> #include <asm/kvm_vcpu_vector.h> +#define CREATE_TRACE_POINTS +#include "trace.h" + const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), STATS_DESC_COUNTER(VCPU, ecall_exit_stat), STATS_DESC_COUNTER(VCPU, wfi_exit_stat), + STATS_DESC_COUNTER(VCPU, wrs_exit_stat), STATS_DESC_COUNTER(VCPU, mmio_exit_user), STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), STATS_DESC_COUNTER(VCPU, csr_exit_user), STATS_DESC_COUNTER(VCPU, csr_exit_kernel), STATS_DESC_COUNTER(VCPU, signal_exits), - STATS_DESC_COUNTER(VCPU, exits) + STATS_DESC_COUNTER(VCPU, exits), + STATS_DESC_COUNTER(VCPU, instr_illegal_exits), + STATS_DESC_COUNTER(VCPU, load_misaligned_exits), + STATS_DESC_COUNTER(VCPU, store_misaligned_exits), + STATS_DESC_COUNTER(VCPU, load_access_exits), + STATS_DESC_COUNTER(VCPU, store_access_exits), }; const struct kvm_stats_header kvm_vcpu_stats_header = { @@ -46,85 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) - -#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext - -/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ -static const unsigned long kvm_isa_ext_arr[] = { - [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, - [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, - [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, - [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, - [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, - [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, - [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, - [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, - - KVM_ISA_EXT_ARR(SSAIA), - KVM_ISA_EXT_ARR(SSTC), - KVM_ISA_EXT_ARR(SVINVAL), - KVM_ISA_EXT_ARR(SVNAPOT), - KVM_ISA_EXT_ARR(SVPBMT), - KVM_ISA_EXT_ARR(ZBB), - KVM_ISA_EXT_ARR(ZIHINTPAUSE), - KVM_ISA_EXT_ARR(ZICBOM), - KVM_ISA_EXT_ARR(ZICBOZ), -}; - -static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) +static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu, + bool kvm_sbi_reset) { - unsigned long i; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + void *vector_datap = cntx->vector.datap; - for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { - if (kvm_isa_ext_arr[i] == base_ext) - return i; - } + memset(cntx, 0, sizeof(*cntx)); + memset(csr, 0, sizeof(*csr)); + memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); - return KVM_RISCV_ISA_EXT_MAX; -} + /* Restore datap as it's not a part of the guest context. */ + cntx->vector.datap = vector_datap; -static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) -{ - switch (ext) { - case KVM_RISCV_ISA_EXT_H: - return false; - case KVM_RISCV_ISA_EXT_V: - return riscv_v_vstate_ctrl_user_allowed(); - default: - break; - } - - return true; -} + if (kvm_sbi_reset) + kvm_riscv_vcpu_sbi_load_reset_state(vcpu); -static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) -{ - switch (ext) { - case KVM_RISCV_ISA_EXT_A: - case KVM_RISCV_ISA_EXT_C: - case KVM_RISCV_ISA_EXT_I: - case KVM_RISCV_ISA_EXT_M: - case KVM_RISCV_ISA_EXT_SSAIA: - case KVM_RISCV_ISA_EXT_SSTC: - case KVM_RISCV_ISA_EXT_SVINVAL: - case KVM_RISCV_ISA_EXT_SVNAPOT: - case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: - case KVM_RISCV_ISA_EXT_ZBB: - return false; - default: - break; - } + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ + cntx->sstatus = SR_SPP | SR_SPIE; - return true; + cntx->hstatus |= HSTATUS_VTW; + cntx->hstatus |= HSTATUS_SPVP; + cntx->hstatus |= HSTATUS_SPV; } -static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) +static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset) { - struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; bool loaded; /** @@ -139,9 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.last_exit_cpu = -1; - memcpy(csr, reset_csr, sizeof(*csr)); - - memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset); kvm_riscv_vcpu_fp_reset(vcpu); @@ -160,6 +111,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.hfence_tail = 0; memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); + kvm_riscv_vcpu_sbi_reset(vcpu); + /* Reset the guest CSRs for hotplug usecase */ if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); @@ -174,22 +127,18 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int rc; - struct kvm_cpu_context *cntx; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; - unsigned long host_isa, i; + + spin_lock_init(&vcpu->arch.mp_state_lock); /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; + + vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT; vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); /* Setup ISA features available to VCPU */ - for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { - host_isa = kvm_isa_ext_arr[i]; - if (__riscv_isa_extension_available(NULL, host_isa) && - kvm_riscv_vcpu_isa_enable_allowed(i)) - set_bit(host_isa, vcpu->arch.isa); - } + kvm_riscv_vcpu_setup_isa(vcpu); /* Setup vendor, arch, and implementation details */ vcpu->arch.mvendorid = sbi_get_mvendorid(); @@ -199,19 +148,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup VCPU hfence queue */ spin_lock_init(&vcpu->arch.hfence_lock); - /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ - cntx = &vcpu->arch.guest_reset_context; - cntx->sstatus = SR_SPP | SR_SPIE; - cntx->hstatus = 0; - cntx->hstatus |= HSTATUS_VTW; - cntx->hstatus |= HSTATUS_SPVP; - cntx->hstatus |= HSTATUS_SPV; - - if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) - return -ENOMEM; + spin_lock_init(&vcpu->arch.reset_state.lock); - /* By default, make CY, TM, and IR counters accessible in VU mode */ - reset_csr->scounteren = 0x7; + rc = kvm_riscv_vcpu_alloc_vector_context(vcpu); + if (rc) + return rc; /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); @@ -220,12 +161,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_pmu_init(vcpu); /* Setup VCPU AIA */ - rc = kvm_riscv_vcpu_aia_init(vcpu); - if (rc) - return rc; + kvm_riscv_vcpu_aia_init(vcpu); + + /* + * Setup SBI extensions + * NOTE: This must be the last thing to be initialized. + */ + kvm_riscv_vcpu_sbi_init(vcpu); /* Reset VCPU */ - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, false); return 0; } @@ -243,6 +188,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_sbi_deinit(vcpu); + /* Cleanup VCPU AIA context */ kvm_riscv_vcpu_aia_deinit(vcpu); @@ -263,20 +210,10 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return kvm_riscv_vcpu_timer_pending(vcpu); } -void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) -{ - kvm_riscv_aia_wakeon_hgei(vcpu, true); -} - -void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) -{ - kvm_riscv_aia_wakeon_hgei(vcpu, false); -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { - return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && - !vcpu->arch.power_off && !vcpu->arch.pause); + return (kvm_riscv_vcpu_has_interrupts(vcpu, -1ULL) && + !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) @@ -289,457 +226,20 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; } -vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} - -static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CONFIG); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - switch (reg_num) { - case KVM_REG_RISCV_CONFIG_REG(isa): - reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; - break; - case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) - return -EINVAL; - reg_val = riscv_cbom_block_size; - break; - case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) - return -EINVAL; - reg_val = riscv_cboz_block_size; - break; - case KVM_REG_RISCV_CONFIG_REG(mvendorid): - reg_val = vcpu->arch.mvendorid; - break; - case KVM_REG_RISCV_CONFIG_REG(marchid): - reg_val = vcpu->arch.marchid; - break; - case KVM_REG_RISCV_CONFIG_REG(mimpid): - reg_val = vcpu->arch.mimpid; - break; - default: - return -EINVAL; - } - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CONFIG); - unsigned long i, isa_ext, reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - switch (reg_num) { - case KVM_REG_RISCV_CONFIG_REG(isa): - /* - * This ONE REG interface is only defined for - * single letter extensions. - */ - if (fls(reg_val) >= RISCV_ISA_EXT_BASE) - return -EINVAL; - - if (!vcpu->arch.ran_atleast_once) { - /* Ignore the enable/disable request for certain extensions */ - for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { - isa_ext = kvm_riscv_vcpu_base2isa_ext(i); - if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) { - reg_val &= ~BIT(i); - continue; - } - if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) - if (reg_val & BIT(i)) - reg_val &= ~BIT(i); - if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) - if (!(reg_val & BIT(i))) - reg_val |= BIT(i); - } - reg_val &= riscv_isa_extension_base(NULL); - /* Do not modify anything beyond single letter extensions */ - reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | - (reg_val & KVM_RISCV_BASE_ISA_MASK); - vcpu->arch.isa[0] = reg_val; - kvm_riscv_vcpu_fp_reset(vcpu); - } else { - return -EOPNOTSUPP; - } - break; - case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - return -EOPNOTSUPP; - case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - return -EOPNOTSUPP; - case KVM_REG_RISCV_CONFIG_REG(mvendorid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.mvendorid = reg_val; - else - return -EBUSY; - break; - case KVM_REG_RISCV_CONFIG_REG(marchid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.marchid = reg_val; - else - return -EBUSY; - break; - case KVM_REG_RISCV_CONFIG_REG(mimpid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.mimpid = reg_val; - else - return -EBUSY; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CORE); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) - reg_val = cntx->sepc; - else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && - reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) - reg_val = ((unsigned long *)cntx)[reg_num]; - else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) - reg_val = (cntx->sstatus & SR_SPP) ? - KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; - else - return -EINVAL; - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CORE); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) - cntx->sepc = reg_val; - else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && - reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) - ((unsigned long *)cntx)[reg_num] = reg_val; - else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { - if (reg_val == KVM_RISCV_MODE_S) - cntx->sstatus |= SR_SPP; - else - cntx->sstatus &= ~SR_SPP; - } else - return -EINVAL; - - return 0; -} - -static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long *out_val) +#ifdef CONFIG_GUEST_PERF_EVENTS +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { - kvm_riscv_vcpu_flush_interrupts(vcpu); - *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; - *out_val |= csr->hvip & ~IRQ_LOCAL_MASK; - } else - *out_val = ((unsigned long *)csr)[reg_num]; - - return 0; -} - -static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long reg_val) -{ - struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { - reg_val &= VSIP_VALID_MASK; - reg_val <<= VSIP_TO_HVIP_SHIFT; - } - - ((unsigned long *)csr)[reg_num] = reg_val; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) - WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0); - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - int rc; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CSR); - unsigned long reg_val, reg_subtype; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; - reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_CSR_GENERAL: - rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val); - break; - case KVM_REG_RISCV_CSR_AIA: - rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); - break; - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - int rc; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CSR); - unsigned long reg_val, reg_subtype; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; - reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_CSR_GENERAL: - rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val); - break; - case KVM_REG_RISCV_CSR_AIA: - rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); - break; - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val = 0; - unsigned long host_isa_ext; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || - reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; - - host_isa_ext = kvm_isa_ext_arr[reg_num]; - if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) - reg_val = 1; /* Mark the given extension as available */ - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val; - unsigned long host_isa_ext; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || - reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - host_isa_ext = kvm_isa_ext_arr[reg_num]; - if (!__riscv_isa_extension_available(NULL, host_isa_ext)) - return -EOPNOTSUPP; - - if (!vcpu->arch.ran_atleast_once) { - /* - * All multi-letter extension and a few single letter - * extension can be disabled - */ - if (reg_val == 1 && - kvm_riscv_vcpu_isa_enable_allowed(reg_num)) - set_bit(host_isa_ext, vcpu->arch.isa); - else if (!reg_val && - kvm_riscv_vcpu_isa_disable_allowed(reg_num)) - clear_bit(host_isa_ext, vcpu->arch.isa); - else - return -EINVAL; - kvm_riscv_vcpu_fp_reset(vcpu); - } else { - return -EOPNOTSUPP; - } - - return 0; -} - -static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { - case KVM_REG_RISCV_CONFIG: - return kvm_riscv_vcpu_set_reg_config(vcpu, reg); - case KVM_REG_RISCV_CORE: - return kvm_riscv_vcpu_set_reg_core(vcpu, reg); - case KVM_REG_RISCV_CSR: - return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); - case KVM_REG_RISCV_TIMER: - return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); - case KVM_REG_RISCV_FP_F: - return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_F); - case KVM_REG_RISCV_FP_D: - return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_D); - case KVM_REG_RISCV_ISA_EXT: - return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); - case KVM_REG_RISCV_SBI_EXT: - return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); - default: - break; - } - - return -EINVAL; + return vcpu->arch.guest_context.sepc; } +#endif -static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { - switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { - case KVM_REG_RISCV_CONFIG: - return kvm_riscv_vcpu_get_reg_config(vcpu, reg); - case KVM_REG_RISCV_CORE: - return kvm_riscv_vcpu_get_reg_core(vcpu, reg); - case KVM_REG_RISCV_CSR: - return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); - case KVM_REG_RISCV_TIMER: - return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); - case KVM_REG_RISCV_FP_F: - return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_F); - case KVM_REG_RISCV_FP_D: - return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_D); - case KVM_REG_RISCV_ISA_EXT: - return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); - case KVM_REG_RISCV_SBI_EXT: - return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); - default: - break; - } - - return -EINVAL; + return VM_FAULT_SIGBUS; } -long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; @@ -781,6 +281,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_riscv_vcpu_get_reg(vcpu, ®); break; } + case KVM_GET_REG_LIST: { + struct kvm_reg_list __user *user_list = argp; + struct kvm_reg_list reg_list; + unsigned int n; + + r = -EFAULT; + if (copy_from_user(®_list, user_list, sizeof(reg_list))) + break; + n = reg_list.n; + reg_list.n = kvm_riscv_vcpu_num_regs(vcpu); + if (copy_to_user(user_list, ®_list, sizeof(reg_list))) + break; + r = -E2BIG; + if (n < reg_list.n) + break; + r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); + break; + } default: break; } @@ -850,10 +368,10 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; /* Read current HVIP and VSIE CSRs */ - csr->vsie = csr_read(CSR_VSIE); + csr->vsie = ncsr_read(CSR_VSIE); /* Sync-up HVIP.VSSIP bit changes does by Guest */ - hvip = csr_read(CSR_HVIP); + hvip = ncsr_read(CSR_HVIP); if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { if (hvip & (1UL << IRQ_VS_SOFT)) { if (!test_and_set_bit(IRQ_VS_SOFT, @@ -866,6 +384,13 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) } } + /* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */ + if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) { + if (!(hvip & (1UL << IRQ_PMU_OVF)) && + !test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask)) + clear_bit(IRQ_PMU_OVF, v->irqs_pending); + } + /* Sync-up AIA high interrupts */ kvm_riscv_vcpu_aia_sync_interrupts(vcpu); @@ -883,7 +408,8 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) if (irq < IRQ_LOCAL_MAX && irq != IRQ_VS_SOFT && irq != IRQ_VS_TIMER && - irq != IRQ_VS_EXT) + irq != IRQ_VS_EXT && + irq != IRQ_PMU_OVF) return -EINVAL; set_bit(irq, vcpu->arch.irqs_pending); @@ -898,14 +424,15 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) { /* - * We only allow VS-mode software, timer, and external + * We only allow VS-mode software, timer, counter overflow and external * interrupts when irq is one of the local interrupts * defined by RISC-V privilege specification. */ if (irq < IRQ_LOCAL_MAX && irq != IRQ_VS_SOFT && irq != IRQ_VS_TIMER && - irq != IRQ_VS_EXT) + irq != IRQ_VS_EXT && + irq != IRQ_PMU_OVF) return -EINVAL; clear_bit(irq, vcpu->arch.irqs_pending); @@ -930,26 +457,42 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); } -void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) +void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) { - vcpu->arch.power_off = true; + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); kvm_make_request(KVM_REQ_SLEEP, vcpu); kvm_vcpu_kick(vcpu); } -void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) +void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) { - vcpu->arch.power_off = false; + spin_lock(&vcpu->arch.mp_state_lock); + __kvm_riscv_vcpu_power_off(vcpu); + spin_unlock(&vcpu->arch.mp_state_lock); +} + +void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) +{ + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); kvm_vcpu_wake_up(vcpu); } +void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) +{ + spin_lock(&vcpu->arch.mp_state_lock); + __kvm_riscv_vcpu_power_on(vcpu); + spin_unlock(&vcpu->arch.mp_state_lock); +} + +bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu) +{ + return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; +} + int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - if (vcpu->arch.power_off) - mp_state->mp_state = KVM_MP_STATE_STOPPED; - else - mp_state->mp_state = KVM_MP_STATE_RUNNABLE; + *mp_state = READ_ONCE(vcpu->arch.mp_state); return 0; } @@ -959,66 +502,127 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, { int ret = 0; + spin_lock(&vcpu->arch.mp_state_lock); + switch (mp_state->mp_state) { case KVM_MP_STATE_RUNNABLE: - vcpu->arch.power_off = false; + WRITE_ONCE(vcpu->arch.mp_state, *mp_state); break; case KVM_MP_STATE_STOPPED: - kvm_riscv_vcpu_power_off(vcpu); + __kvm_riscv_vcpu_power_off(vcpu); + break; + case KVM_MP_STATE_INIT_RECEIVED: + if (vcpu->kvm->arch.mp_state_reset) + kvm_riscv_reset_vcpu(vcpu, false); + else + ret = -EINVAL; break; default: ret = -EINVAL; } + spin_unlock(&vcpu->arch.mp_state_lock); + return ret; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - /* TODO; To be implemented later. */ - return -EINVAL; + if (dbg->control & KVM_GUESTDBG_ENABLE) { + vcpu->guest_debug = dbg->control; + vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT); + } else { + vcpu->guest_debug = 0; + vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT); + } + + return 0; } -static void kvm_riscv_vcpu_update_config(const unsigned long *isa) +static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) { - u64 henvcfg = 0; + const unsigned long *isa = vcpu->arch.isa; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; if (riscv_isa_extension_available(isa, SVPBMT)) - henvcfg |= ENVCFG_PBMTE; + cfg->henvcfg |= ENVCFG_PBMTE; if (riscv_isa_extension_available(isa, SSTC)) - henvcfg |= ENVCFG_STCE; + cfg->henvcfg |= ENVCFG_STCE; if (riscv_isa_extension_available(isa, ZICBOM)) - henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); if (riscv_isa_extension_available(isa, ZICBOZ)) - henvcfg |= ENVCFG_CBZE; + cfg->henvcfg |= ENVCFG_CBZE; + + if (riscv_isa_extension_available(isa, SVADU) && + !riscv_isa_extension_available(isa, SVADE)) + cfg->henvcfg |= ENVCFG_ADUE; + + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { + cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; + if (riscv_isa_extension_available(isa, SSAIA)) + cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | + SMSTATEEN0_AIA | + SMSTATEEN0_AIA_ISEL; + if (riscv_isa_extension_available(isa, SMSTATEEN)) + cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; + } - csr_write(CSR_HENVCFG, henvcfg); -#ifdef CONFIG_32BIT - csr_write(CSR_HENVCFGH, henvcfg >> 32); -#endif + if (vcpu->guest_debug) + cfg->hedeleg &= ~BIT(EXC_BREAKPOINT); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { + void *nsh; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + if (kvm_riscv_nacl_sync_csr_available()) { + nsh = nacl_shmem(); + nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus); + nacl_csr_write(nsh, CSR_VSIE, csr->vsie); + nacl_csr_write(nsh, CSR_VSTVEC, csr->vstvec); + nacl_csr_write(nsh, CSR_VSSCRATCH, csr->vsscratch); + nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc); + nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause); + nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval); + nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg); + nacl_csr_write(nsh, CSR_HVIP, csr->hvip); + nacl_csr_write(nsh, CSR_VSATP, csr->vsatp); + nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg); + if (IS_ENABLED(CONFIG_32BIT)) + nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { + nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0); + if (IS_ENABLED(CONFIG_32BIT)) + nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32); + } + } else { + csr_write(CSR_VSSTATUS, csr->vsstatus); + csr_write(CSR_VSIE, csr->vsie); + csr_write(CSR_VSTVEC, csr->vstvec); + csr_write(CSR_VSSCRATCH, csr->vsscratch); + csr_write(CSR_VSEPC, csr->vsepc); + csr_write(CSR_VSCAUSE, csr->vscause); + csr_write(CSR_VSTVAL, csr->vstval); + csr_write(CSR_HEDELEG, cfg->hedeleg); + csr_write(CSR_HVIP, csr->hvip); + csr_write(CSR_VSATP, csr->vsatp); + csr_write(CSR_HENVCFG, cfg->henvcfg); + if (IS_ENABLED(CONFIG_32BIT)) + csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { + csr_write(CSR_HSTATEEN0, cfg->hstateen0); + if (IS_ENABLED(CONFIG_32BIT)) + csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); + } + } - csr_write(CSR_VSSTATUS, csr->vsstatus); - csr_write(CSR_VSIE, csr->vsie); - csr_write(CSR_VSTVEC, csr->vstvec); - csr_write(CSR_VSSCRATCH, csr->vsscratch); - csr_write(CSR_VSEPC, csr->vsepc); - csr_write(CSR_VSCAUSE, csr->vscause); - csr_write(CSR_VSTVAL, csr->vstval); - csr_write(CSR_HVIP, csr->hvip); - csr_write(CSR_VSATP, csr->vsatp); - - kvm_riscv_vcpu_update_config(vcpu->arch.isa); - - kvm_riscv_gstage_update_hgatp(vcpu); + kvm_riscv_mmu_update_hgatp(vcpu); kvm_riscv_vcpu_timer_restore(vcpu); @@ -1031,11 +635,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_aia_load(vcpu, cpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + vcpu->cpu = cpu; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + void *nsh; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; vcpu->cpu = -1; @@ -1051,18 +658,38 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->arch.isa); kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); - csr->vsstatus = csr_read(CSR_VSSTATUS); - csr->vsie = csr_read(CSR_VSIE); - csr->vstvec = csr_read(CSR_VSTVEC); - csr->vsscratch = csr_read(CSR_VSSCRATCH); - csr->vsepc = csr_read(CSR_VSEPC); - csr->vscause = csr_read(CSR_VSCAUSE); - csr->vstval = csr_read(CSR_VSTVAL); - csr->hvip = csr_read(CSR_HVIP); - csr->vsatp = csr_read(CSR_VSATP); + if (kvm_riscv_nacl_available()) { + nsh = nacl_shmem(); + csr->vsstatus = nacl_csr_read(nsh, CSR_VSSTATUS); + csr->vsie = nacl_csr_read(nsh, CSR_VSIE); + csr->vstvec = nacl_csr_read(nsh, CSR_VSTVEC); + csr->vsscratch = nacl_csr_read(nsh, CSR_VSSCRATCH); + csr->vsepc = nacl_csr_read(nsh, CSR_VSEPC); + csr->vscause = nacl_csr_read(nsh, CSR_VSCAUSE); + csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL); + csr->hvip = nacl_csr_read(nsh, CSR_HVIP); + csr->vsatp = nacl_csr_read(nsh, CSR_VSATP); + } else { + csr->vsstatus = csr_read(CSR_VSSTATUS); + csr->vsie = csr_read(CSR_VSIE); + csr->vstvec = csr_read(CSR_VSTVEC); + csr->vsscratch = csr_read(CSR_VSSCRATCH); + csr->vsepc = csr_read(CSR_VSEPC); + csr->vscause = csr_read(CSR_VSCAUSE); + csr->vstval = csr_read(CSR_VSTVAL); + csr->hvip = csr_read(CSR_HVIP); + csr->vsatp = csr_read(CSR_VSATP); + } } -static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) +/** + * kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests + * @vcpu: the VCPU pointer + * + * Return: 1 if we should enter the guest + * 0 if we should exit to userspace + */ +static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); @@ -1070,11 +697,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { kvm_vcpu_srcu_read_unlock(vcpu); rcuwait_wait_event(wait, - (!vcpu->arch.power_off) && (!vcpu->arch.pause), + (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), TASK_INTERRUPTIBLE); kvm_vcpu_srcu_read_lock(vcpu); - if (vcpu->arch.power_off || vcpu->arch.pause) { + if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { /* * Awaken to handle a signal, request to * sleep again later. @@ -1084,37 +711,69 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, true); if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) - kvm_riscv_gstage_update_hgatp(vcpu); + kvm_riscv_mmu_update_hgatp(vcpu); if (kvm_check_request(KVM_REQ_FENCE_I, vcpu)) kvm_riscv_fence_i_process(vcpu); - /* - * The generic KVM_REQ_TLB_FLUSH is same as - * KVM_REQ_HFENCE_GVMA_VMID_ALL - */ - if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu)) - kvm_riscv_hfence_gvma_vmid_all_process(vcpu); + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + kvm_riscv_tlb_flush_process(vcpu); if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu)) kvm_riscv_hfence_vvma_all_process(vcpu); if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) kvm_riscv_hfence_process(vcpu); + + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) + kvm_riscv_vcpu_record_steal_time(vcpu); + + if (kvm_dirty_ring_check_request(vcpu)) + return 0; } + + return 1; } static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - csr_write(CSR_HVIP, csr->hvip); + ncsr_write(CSR_HVIP, csr->hvip); kvm_riscv_vcpu_aia_update_hvip(vcpu); } +static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren); + vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, + smcsr->sstateen0); +} + +static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren); + csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, + vcpu->arch.host_sstateen0); +} + /* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. @@ -1122,12 +781,84 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) * This must be noinstr as instrumentation may make use of RCU, and this is not * safe during the EQS. */ -static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) +static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu, + struct kvm_cpu_trap *trap) { + void *nsh; + struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; + struct kvm_cpu_context *hcntx = &vcpu->arch.host_context; + + /* + * We save trap CSRs (such as SEPC, SCAUSE, STVAL, HTVAL, and + * HTINST) here because we do local_irq_enable() after this + * function in kvm_arch_vcpu_ioctl_run() which can result in + * an interrupt immediately after local_irq_enable() and can + * potentially change trap CSRs. + */ + + kvm_riscv_vcpu_swap_in_guest_state(vcpu); guest_state_enter_irqoff(); - __kvm_riscv_switch_to(&vcpu->arch); + + if (kvm_riscv_nacl_sync_sret_available()) { + nsh = nacl_shmem(); + + if (kvm_riscv_nacl_autoswap_csr_available()) { + hcntx->hstatus = + nacl_csr_read(nsh, CSR_HSTATUS); + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET + + SBI_NACL_SHMEM_AUTOSWAP_HSTATUS, + gcntx->hstatus); + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET, + SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS); + } else if (kvm_riscv_nacl_sync_csr_available()) { + hcntx->hstatus = nacl_csr_swap(nsh, + CSR_HSTATUS, gcntx->hstatus); + } else { + hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); + } + + nacl_scratch_write_longs(nsh, + SBI_NACL_SHMEM_SRET_OFFSET + + SBI_NACL_SHMEM_SRET_X(1), + &gcntx->ra, + SBI_NACL_SHMEM_SRET_X_LAST); + + __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL, + SBI_EXT_NACL_SYNC_SRET); + + if (kvm_riscv_nacl_autoswap_csr_available()) { + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET, + 0); + gcntx->hstatus = nacl_scratch_read_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET + + SBI_NACL_SHMEM_AUTOSWAP_HSTATUS); + } else { + gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); + } + + trap->htval = nacl_csr_read(nsh, CSR_HTVAL); + trap->htinst = nacl_csr_read(nsh, CSR_HTINST); + } else { + hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); + + __kvm_riscv_switch_to(&vcpu->arch); + + gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); + + trap->htval = csr_read(CSR_HTVAL); + trap->htinst = csr_read(CSR_HTINST); + } + + trap->sepc = gcntx->sepc; + trap->scause = csr_read(CSR_SCAUSE); + trap->stval = csr_read(CSR_STVAL); + vcpu->arch.last_exit_cpu = vcpu->cpu; guest_state_exit_irqoff(); + kvm_riscv_vcpu_swap_in_host_state(vcpu); } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) @@ -1136,6 +867,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) struct kvm_cpu_trap trap; struct kvm_run *run = vcpu->run; + if (!vcpu->arch.ran_atleast_once) + kvm_riscv_vcpu_setup_config(vcpu); + /* Mark this VCPU ran at least once */ vcpu->arch.ran_atleast_once = true; @@ -1163,7 +897,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) return ret; } - if (run->immediate_exit) { + if (!vcpu->wants_to_run) { kvm_vcpu_srcu_read_unlock(vcpu); return -EINTR; } @@ -1176,14 +910,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; while (ret > 0) { /* Check conditions before entering the guest */ - ret = xfer_to_guest_mode_handle_work(vcpu); + ret = kvm_xfer_to_guest_mode_handle_work(vcpu); if (ret) continue; ret = 1; kvm_riscv_gstage_vmid_update(vcpu); - kvm_riscv_check_vcpu_requests(vcpu); + ret = kvm_riscv_check_vcpu_requests(vcpu); + if (ret <= 0) + continue; preempt_disable(); @@ -1216,8 +952,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Update HVIP CSR for current CPU */ kvm_riscv_update_hvip(vcpu); - if (ret <= 0 || - kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || + if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -1228,31 +963,22 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } /* - * Cleanup stale TLB enteries + * Sanitize VMID mappings cached (TLB) on current CPU * * Note: This should be done after G-stage VMID has been * updated using kvm_riscv_gstage_vmid_ver_changed() */ kvm_riscv_local_tlb_sanitize(vcpu); + trace_kvm_entry(vcpu); + guest_timing_enter_irqoff(); - kvm_riscv_vcpu_enter_exit(vcpu); + kvm_riscv_vcpu_enter_exit(vcpu, &trap); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; - /* - * Save SCAUSE, STVAL, HTVAL, and HTINST because we might - * get an interrupt between __kvm_riscv_switch_to() and - * local_irq_enable() which can potentially change CSRs. - */ - trap.sepc = vcpu->arch.guest_context.sepc; - trap.scause = csr_read(CSR_SCAUSE); - trap.stval = csr_read(CSR_STVAL); - trap.htval = csr_read(CSR_HTVAL); - trap.htinst = csr_read(CSR_HTINST); - /* Syncup interrupts state with HW */ kvm_riscv_vcpu_sync_interrupts(vcpu); @@ -1273,6 +999,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_enable(); + trace_kvm_exit(&trap); + preempt_enable(); kvm_vcpu_srcu_read_lock(vcpu); |
