diff options
Diffstat (limited to 'arch/riscv/kvm/vcpu_sbi_replace.c')
| -rw-r--r-- | arch/riscv/kvm/vcpu_sbi_replace.c | 72 |
1 files changed, 25 insertions, 47 deletions
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 9c2ab3dfa93a..506a510b6bff 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, u64 next_cycle; if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; + unsigned long hart_bit = 0, sentmask = 0; if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hbase != -1UL) { if (tmp->vcpu_id < hbase) continue; - if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + hart_bit = tmp->vcpu_id - hbase; + if (hart_bit >= __riscv_xlen) + goto done; + if (!(hmask & (1UL << hart_bit))) continue; } ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + sentmask |= 1UL << hart_bit; kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } +done: + if (hbase != -1UL && (hmask ^ sentmask)) + retdata->err_val = SBI_ERR_INVALID_PARAM; + return ret; } @@ -87,6 +96,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; unsigned long funcid = cp->a6; + unsigned long vmid; switch (funcid) { case SBI_EXT_RFENCE_REMOTE_FENCE_I: @@ -94,22 +104,22 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: - if (cp->a2 == 0 && cp->a3 == 0) - kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); + vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) + kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid); else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, - cp->a2, cp->a3, PAGE_SHIFT); + cp->a2, cp->a3, PAGE_SHIFT, vmid); kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: - if (cp->a2 == 0 && cp->a3 == 0) - kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, - hbase, hmask, cp->a4); + vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) + kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask, + cp->a4, vmid); else - kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, - hbase, hmask, - cp->a2, cp->a3, - PAGE_SHIFT, cp->a4); + kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2, + cp->a3, PAGE_SHIFT, cp->a4, vmid); kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: @@ -118,9 +128,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: /* * Until nested virtualization is implemented, the - * SBI HFENCE calls should be treated as NOPs + * SBI HFENCE calls should return not supported + * hence fallthrough. */ - break; default: retdata->err_val = SBI_ERR_NOT_SUPPORTED; } @@ -175,35 +185,3 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = { .extid_end = SBI_EXT_SRST, .handler = kvm_sbi_ext_srst_handler, }; - -static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu, - struct kvm_run *run, - struct kvm_vcpu_sbi_return *retdata) -{ - struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - unsigned long funcid = cp->a6; - - switch (funcid) { - case SBI_EXT_DBCN_CONSOLE_WRITE: - case SBI_EXT_DBCN_CONSOLE_READ: - case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: - /* - * The SBI debug console functions are unconditionally - * forwarded to the userspace. - */ - kvm_riscv_vcpu_sbi_forward(vcpu, run); - retdata->uexit = true; - break; - default: - retdata->err_val = SBI_ERR_NOT_SUPPORTED; - } - - return 0; -} - -const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = { - .extid_start = SBI_EXT_DBCN, - .extid_end = SBI_EXT_DBCN, - .default_disabled = true, - .handler = kvm_sbi_ext_dbcn_handler, -}; |
