summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/vcpu_sbi_replace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/vcpu_sbi_replace.c')
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c131
1 files changed, 93 insertions, 38 deletions
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index 1bc0608a5bfd..506a510b6bff 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -9,22 +9,23 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
-#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long *out_val,
- struct kvm_cpu_trap *utrap, bool *exit)
+ struct kvm_vcpu_sbi_return *retdata)
{
- int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
u64 next_cycle;
- if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
- return -EINVAL;
+ if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ return 0;
+ }
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER);
#if __riscv_xlen == 32
next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
#else
@@ -32,7 +33,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
#endif
kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
- return ret;
+ return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
@@ -42,8 +43,7 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
};
static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long *out_val,
- struct kvm_cpu_trap *utrap, bool *exit)
+ struct kvm_vcpu_sbi_return *retdata)
{
int ret = 0;
unsigned long i;
@@ -51,22 +51,35 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
+ unsigned long hart_bit = 0, sentmask = 0;
- if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
- return -EINVAL;
+ if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ return 0;
+ }
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT);
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
- if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ hart_bit = tmp->vcpu_id - hbase;
+ if (hart_bit >= __riscv_xlen)
+ goto done;
+ if (!(hmask & (1UL << hart_bit)))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
+ sentmask |= 1UL << hart_bit;
+ kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
+done:
+ if (hbase != -1UL && (hmask ^ sentmask))
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+
return ret;
}
@@ -77,52 +90,52 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
};
static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long *out_val,
- struct kvm_cpu_trap *utrap, bool *exit)
+ struct kvm_vcpu_sbi_return *retdata)
{
- int ret = 0;
- unsigned long i;
- struct cpumask cm;
- struct kvm_vcpu *tmp;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long funcid = cp->a6;
-
- cpumask_clear(&cm);
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- if (hbase != -1UL) {
- if (tmp->vcpu_id < hbase)
- continue;
- if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
- continue;
- }
- if (tmp->cpu < 0)
- continue;
- cpumask_set_cpu(tmp->cpu, &cm);
- }
+ unsigned long vmid;
switch (funcid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
- ret = sbi_remote_fence_i(&cm);
+ kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
- ret = sbi_remote_hfence_vvma(&cm, cp->a2, cp->a3);
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+ kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
+ else
+ kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
+ cp->a2, cp->a3, PAGE_SHIFT, vmid);
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
- ret = sbi_remote_hfence_vvma_asid(&cm, cp->a2,
- cp->a3, cp->a4);
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
+ if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+ kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
+ cp->a4, vmid);
+ else
+ kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
+ cp->a3, PAGE_SHIFT, cp->a4, vmid);
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
- /* TODO: implement for nested hypervisor case */
+ /*
+ * Until nested virtualization is implemented, the
+ * SBI HFENCE calls should return not supported
+ * hence fallthrough.
+ */
default:
- ret = -EOPNOTSUPP;
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
- return ret;
+ return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
@@ -130,3 +143,45 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
.extid_end = SBI_EXT_RFENCE,
.handler = kvm_sbi_ext_rfence_handler,
};
+
+static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long funcid = cp->a6;
+ u32 reason = cp->a1;
+ u32 type = cp->a0;
+
+ switch (funcid) {
+ case SBI_EXT_SRST_RESET:
+ switch (type) {
+ case SBI_SRST_RESET_TYPE_SHUTDOWN:
+ kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
+ KVM_SYSTEM_EVENT_SHUTDOWN,
+ reason);
+ retdata->uexit = true;
+ break;
+ case SBI_SRST_RESET_TYPE_COLD_REBOOT:
+ case SBI_SRST_RESET_TYPE_WARM_REBOOT:
+ kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
+ KVM_SYSTEM_EVENT_RESET,
+ reason);
+ retdata->uexit = true;
+ break;
+ default:
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ }
+ break;
+ default:
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
+ .extid_start = SBI_EXT_SRST,
+ .extid_end = SBI_EXT_SRST,
+ .handler = kvm_sbi_ext_srst_handler,
+};