From 13acfec2dbccfafff3331a3810cd7dde2fb16891 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 9 May 2022 10:44:05 +0530 Subject: RISC-V: KVM: Add remote HFENCE functions based on VCPU requests The generic KVM has support for VCPU requests which can be used to do arch-specific work in the run-loop. We introduce remote HFENCE functions which will internally use VCPU requests instead of host SBI calls. Advantages of doing remote HFENCEs as VCPU requests are: 1) Multiple VCPUs of a Guest may be running on different Host CPUs so it is not always possible to determine the Host CPU mask for doing Host SBI call. For example, when VCPU X wants to do HFENCE on VCPU Y, it is possible that VCPU Y is blocked or in user-space (i.e. vcpu->cpu < 0). 2) To support nested virtualization, we will be having a separate shadow G-stage for each VCPU and a common host G-stage for the entire Guest/VM. The VCPU requests based remote HFENCEs helps us easily synchronize the common host G-stage and shadow G-stage of each VCPU without any additional IPI calls. This is also a preparatory patch for upcoming nested virtualization support where we will be having a shadow G-stage page table for each Guest VCPU. Signed-off-by: Anup Patel Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_host.h | 59 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'arch/riscv/include/asm/kvm_host.h') diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 0a804021db8c..01915d5f0094 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +27,31 @@ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) +#define KVM_REQ_FENCE_I \ + KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH +#define KVM_REQ_HFENCE_VVMA_ALL \ + KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE \ + KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + +enum kvm_riscv_hfence_type { + KVM_RISCV_HFENCE_UNKNOWN = 0, + KVM_RISCV_HFENCE_GVMA_VMID_GPA, + KVM_RISCV_HFENCE_VVMA_ASID_GVA, + KVM_RISCV_HFENCE_VVMA_ASID_ALL, + KVM_RISCV_HFENCE_VVMA_GVA, +}; + +struct kvm_riscv_hfence { + enum kvm_riscv_hfence_type type; + unsigned long asid; + unsigned long order; + gpa_t addr; + gpa_t size; +}; + +#define KVM_RISCV_VCPU_MAX_HFENCE 64 struct kvm_vm_stat { struct kvm_vm_stat_generic generic; @@ -178,6 +204,12 @@ struct kvm_vcpu_arch { /* VCPU Timer */ struct kvm_vcpu_timer timer; + /* HFENCE request queue */ + spinlock_t hfence_lock; + unsigned long hfence_head; + unsigned long hfence_tail; + struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; + /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; @@ -221,6 +253,33 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, unsigned long order); void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); +void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); + +void kvm_riscv_fence_i(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order, unsigned long asid); +void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long asid); +void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_hfence_vvma_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); + int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); -- cgit