diff options
Diffstat (limited to 'arch/riscv/kvm/tlb.c')
-rw-r--r-- | arch/riscv/kvm/tlb.c | 110 |
1 files changed, 59 insertions, 51 deletions
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 2f91ea5f8493..3c5a70a2b927 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -15,6 +15,8 @@ #include <asm/cpufeature.h> #include <asm/insn-def.h> #include <asm/kvm_nacl.h> +#include <asm/kvm_tlb.h> +#include <asm/kvm_vmid.h> #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) @@ -156,36 +158,13 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) csr_write(CSR_HGATP, hgatp); } -void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu) -{ - unsigned long vmid; - - if (!kvm_riscv_gstage_vmid_bits() || - vcpu->arch.last_exit_cpu == vcpu->cpu) - return; - - /* - * On RISC-V platforms with hardware VMID support, we share same - * VMID for all VCPUs of a particular Guest/VM. This means we might - * have stale G-stage TLB entries on the current Host CPU due to - * some other VCPU of the same Guest which ran previously on the - * current Host CPU. - * - * To cleanup stale TLB entries, we simply flush all G-stage TLB - * entries by VMID whenever underlying Host CPU changes for a VCPU. - */ - - vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); - kvm_riscv_local_hfence_gvma_vmid_all(vmid); -} - void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) { kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD); local_flush_icache_all(); } -void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu) +void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu) { struct kvm_vmid *v = &vcpu->kvm->arch.vmid; unsigned long vmid = READ_ONCE(v->vmid); @@ -258,51 +237,58 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu, void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu) { - unsigned long vmid; struct kvm_riscv_hfence d = { 0 }; - struct kvm_vmid *v = &vcpu->kvm->arch.vmid; while (vcpu_hfence_dequeue(vcpu, &d)) { switch (d.type) { case KVM_RISCV_HFENCE_UNKNOWN: break; case KVM_RISCV_HFENCE_GVMA_VMID_GPA: - vmid = READ_ONCE(v->vmid); if (kvm_riscv_nacl_available()) - nacl_hfence_gvma_vmid(nacl_shmem(), vmid, + nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid, d.addr, d.size, d.order); else - kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr, + kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr, d.size, d.order); break; + case KVM_RISCV_HFENCE_GVMA_VMID_ALL: + if (kvm_riscv_nacl_available()) + nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid); + else + kvm_riscv_local_hfence_gvma_vmid_all(d.vmid); + break; case KVM_RISCV_HFENCE_VVMA_ASID_GVA: kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); - vmid = READ_ONCE(v->vmid); if (kvm_riscv_nacl_available()) - nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid, + nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid, d.addr, d.size, d.order); else - kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr, + kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr, d.size, d.order); break; case KVM_RISCV_HFENCE_VVMA_ASID_ALL: kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); - vmid = READ_ONCE(v->vmid); if (kvm_riscv_nacl_available()) - nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid); + nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid); else - kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid); + kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid); break; case KVM_RISCV_HFENCE_VVMA_GVA: kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD); - vmid = READ_ONCE(v->vmid); if (kvm_riscv_nacl_available()) - nacl_hfence_vvma(nacl_shmem(), vmid, + nacl_hfence_vvma(nacl_shmem(), d.vmid, d.addr, d.size, d.order); else - kvm_riscv_local_hfence_vvma_gva(vmid, d.addr, + kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr, d.size, d.order); break; + case KVM_RISCV_HFENCE_VVMA_ALL: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD); + if (kvm_riscv_nacl_available()) + nacl_hfence_vvma_all(nacl_shmem(), d.vmid); + else + kvm_riscv_local_hfence_vvma_all(d.vmid); + break; default: break; } @@ -355,35 +341,43 @@ void kvm_riscv_fence_i(struct kvm *kvm, void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, unsigned long hbase, unsigned long hmask, gpa_t gpa, gpa_t gpsz, - unsigned long order) + unsigned long order, unsigned long vmid) { struct kvm_riscv_hfence data; data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA; data.asid = 0; + data.vmid = vmid; data.addr = gpa; data.size = gpsz; data.order = order; make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, - KVM_REQ_HFENCE_GVMA_VMID_ALL, &data); + KVM_REQ_TLB_FLUSH, &data); } void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, - unsigned long hbase, unsigned long hmask) + unsigned long hbase, unsigned long hmask, + unsigned long vmid) { - make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL, - KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL); + struct kvm_riscv_hfence data = {0}; + + data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL; + data.vmid = vmid; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_TLB_FLUSH, &data); } void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, - unsigned long order, unsigned long asid) + unsigned long order, unsigned long asid, + unsigned long vmid) { struct kvm_riscv_hfence data; data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA; data.asid = asid; + data.vmid = vmid; data.addr = gva; data.size = gvsz; data.order = order; @@ -393,13 +387,13 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, unsigned long hbase, unsigned long hmask, - unsigned long asid) + unsigned long asid, unsigned long vmid) { - struct kvm_riscv_hfence data; + struct kvm_riscv_hfence data = {0}; data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL; data.asid = asid; - data.addr = data.size = data.order = 0; + data.vmid = vmid; make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, KVM_REQ_HFENCE_VVMA_ALL, &data); } @@ -407,12 +401,13 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, - unsigned long order) + unsigned long order, unsigned long vmid) { struct kvm_riscv_hfence data; data.type = KVM_RISCV_HFENCE_VVMA_GVA; data.asid = 0; + data.vmid = vmid; data.addr = gva; data.size = gvsz; data.order = order; @@ -421,8 +416,21 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, } void kvm_riscv_hfence_vvma_all(struct kvm *kvm, - unsigned long hbase, unsigned long hmask) + unsigned long hbase, unsigned long hmask, + unsigned long vmid) +{ + struct kvm_riscv_hfence data = {0}; + + data.type = KVM_RISCV_HFENCE_VVMA_ALL; + data.vmid = vmid; + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, + KVM_REQ_HFENCE_VVMA_ALL, &data); +} + +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) { - make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL, - KVM_REQ_HFENCE_VVMA_ALL, NULL); + kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, + gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT, + PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid)); + return 0; } |