diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 25 | ||||
-rw-r--r-- | arch/riscv/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/riscv/kvm/tlb.S | 74 | ||||
-rw-r--r-- | arch/riscv/kvm/tlb.c | 213 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/vmid.c | 2 |
6 files changed, 237 insertions, 83 deletions
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index ecb94dddd5c8..d5c2057f8451 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -201,11 +201,26 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER -void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4, - unsigned long vmid); -void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); -void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4); -void __kvm_riscv_hfence_gvma_all(void); +#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 + +void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); +void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_all(void); +void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, + unsigned long asid, + unsigned long gva, + unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, + unsigned long asid); +void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 8823eb32dcde..1e07603c905b 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -745,7 +745,7 @@ void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu) csr_write(CSR_HGATP, hgatp); if (!kvm_riscv_gstage_vmid_bits()) - __kvm_riscv_hfence_gvma_all(); + kvm_riscv_local_hfence_gvma_all(); } void kvm_riscv_gstage_mode_detect(void) @@ -768,7 +768,7 @@ void kvm_riscv_gstage_mode_detect(void) skip_sv48x4_test: csr_write(CSR_HGATP, 0); - __kvm_riscv_hfence_gvma_all(); + kvm_riscv_local_hfence_gvma_all(); #endif } diff --git a/arch/riscv/kvm/tlb.S b/arch/riscv/kvm/tlb.S deleted file mode 100644 index 899f75d60bad..000000000000 --- a/arch/riscv/kvm/tlb.S +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2019 Western Digital Corporation or its affiliates. - * - * Authors: - * Anup Patel <anup.patel@wdc.com> - */ - -#include <linux/linkage.h> -#include <asm/asm.h> - - .text - .altmacro - .option norelax - - /* - * Instruction encoding of hfence.gvma is: - * HFENCE.GVMA rs1, rs2 - * HFENCE.GVMA zero, rs2 - * HFENCE.GVMA rs1 - * HFENCE.GVMA - * - * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2 - * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2 - * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1 - * rs1==zero and rs2==zero ==> HFENCE.GVMA - * - * Instruction encoding of HFENCE.GVMA is: - * 0110001 rs2(5) rs1(5) 000 00000 1110011 - */ - -ENTRY(__kvm_riscv_hfence_gvma_vmid_gpa) - /* - * rs1 = a0 (GPA >> 2) - * rs2 = a1 (VMID) - * HFENCE.GVMA a0, a1 - * 0110001 01011 01010 000 00000 1110011 - */ - .word 0x62b50073 - ret -ENDPROC(__kvm_riscv_hfence_gvma_vmid_gpa) - -ENTRY(__kvm_riscv_hfence_gvma_vmid) - /* - * rs1 = zero - * rs2 = a0 (VMID) - * HFENCE.GVMA zero, a0 - * 0110001 01010 00000 000 00000 1110011 - */ - .word 0x62a00073 - ret -ENDPROC(__kvm_riscv_hfence_gvma_vmid) - -ENTRY(__kvm_riscv_hfence_gvma_gpa) - /* - * rs1 = a0 (GPA >> 2) - * rs2 = zero - * HFENCE.GVMA a0 - * 0110001 00000 01010 000 00000 1110011 - */ - .word 0x62050073 - ret -ENDPROC(__kvm_riscv_hfence_gvma_gpa) - -ENTRY(__kvm_riscv_hfence_gvma_all) - /* - * rs1 = zero - * rs2 = zero - * HFENCE.GVMA - * 0110001 00000 00000 000 00000 1110011 - */ - .word 0x62000073 - ret -ENDPROC(__kvm_riscv_hfence_gvma_all) diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c new file mode 100644 index 000000000000..e2d4fd610745 --- /dev/null +++ b/arch/riscv/kvm/tlb.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/kvm_host.h> +#include <asm/csr.h> + +/* + * Instruction encoding of hfence.gvma is: + * HFENCE.GVMA rs1, rs2 + * HFENCE.GVMA zero, rs2 + * HFENCE.GVMA rs1 + * HFENCE.GVMA + * + * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2 + * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2 + * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1 + * rs1==zero and rs2==zero ==> HFENCE.GVMA + * + * Instruction encoding of HFENCE.GVMA is: + * 0110001 rs2(5) rs1(5) 000 00000 1110011 + */ + +void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, + gpa_t gpa, gpa_t gpsz, + unsigned long order) +{ + gpa_t pos; + + if (PTRS_PER_PTE < (gpsz >> order)) { + kvm_riscv_local_hfence_gvma_vmid_all(vmid); + return; + } + + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { + /* + * rs1 = a0 (GPA >> 2) + * rs2 = a1 (VMID) + * HFENCE.GVMA a0, a1 + * 0110001 01011 01010 000 00000 1110011 + */ + asm volatile ("srli a0, %0, 2\n" + "add a1, %1, zero\n" + ".word 0x62b50073\n" + :: "r" (pos), "r" (vmid) + : "a0", "a1", "memory"); + } +} + +void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid) +{ + /* + * rs1 = zero + * rs2 = a0 (VMID) + * HFENCE.GVMA zero, a0 + * 0110001 01010 00000 000 00000 1110011 + */ + asm volatile ("add a0, %0, zero\n" + ".word 0x62a00073\n" + :: "r" (vmid) : "a0", "memory"); +} + +void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, + unsigned long order) +{ + gpa_t pos; + + if (PTRS_PER_PTE < (gpsz >> order)) { + kvm_riscv_local_hfence_gvma_all(); + return; + } + + for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) { + /* + * rs1 = a0 (GPA >> 2) + * rs2 = zero + * HFENCE.GVMA a0 + * 0110001 00000 01010 000 00000 1110011 + */ + asm volatile ("srli a0, %0, 2\n" + ".word 0x62050073\n" + :: "r" (pos) : "a0", "memory"); + } +} + +void kvm_riscv_local_hfence_gvma_all(void) +{ + /* + * rs1 = zero + * rs2 = zero + * HFENCE.GVMA + * 0110001 00000 00000 000 00000 1110011 + */ + asm volatile (".word 0x62000073" ::: "memory"); +} + +/* + * Instruction encoding of hfence.gvma is: + * HFENCE.VVMA rs1, rs2 + * HFENCE.VVMA zero, rs2 + * HFENCE.VVMA rs1 + * HFENCE.VVMA + * + * rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2 + * rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2 + * rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1 + * rs1==zero and rs2==zero ==> HFENCE.VVMA + * + * Instruction encoding of HFENCE.VVMA is: + * 0010001 rs2(5) rs1(5) 000 00000 1110011 + */ + +void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, + unsigned long asid, + unsigned long gva, + unsigned long gvsz, + unsigned long order) +{ + unsigned long pos, hgatp; + + if (PTRS_PER_PTE < (gvsz >> order)) { + kvm_riscv_local_hfence_vvma_asid_all(vmid, asid); + return; + } + + hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); + + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { + /* + * rs1 = a0 (GVA) + * rs2 = a1 (ASID) + * HFENCE.VVMA a0, a1 + * 0010001 01011 01010 000 00000 1110011 + */ + asm volatile ("add a0, %0, zero\n" + "add a1, %1, zero\n" + ".word 0x22b50073\n" + :: "r" (pos), "r" (asid) + : "a0", "a1", "memory"); + } + + csr_write(CSR_HGATP, hgatp); +} + +void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, + unsigned long asid) +{ + unsigned long hgatp; + + hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); + + /* + * rs1 = zero + * rs2 = a0 (ASID) + * HFENCE.VVMA zero, a0 + * 0010001 01010 00000 000 00000 1110011 + */ + asm volatile ("add a0, %0, zero\n" + ".word 0x22a00073\n" + :: "r" (asid) : "a0", "memory"); + + csr_write(CSR_HGATP, hgatp); +} + +void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, + unsigned long gva, unsigned long gvsz, + unsigned long order) +{ + unsigned long pos, hgatp; + + if (PTRS_PER_PTE < (gvsz >> order)) { + kvm_riscv_local_hfence_vvma_all(vmid); + return; + } + + hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); + + for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) { + /* + * rs1 = a0 (GVA) + * rs2 = zero + * HFENCE.VVMA a0 + * 0010001 00000 01010 000 00000 1110011 + */ + asm volatile ("add a0, %0, zero\n" + ".word 0x22050073\n" + :: "r" (pos) : "a0", "memory"); + } + + csr_write(CSR_HGATP, hgatp); +} + +void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) +{ + unsigned long hgatp; + + hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); + + /* + * rs1 = zero + * rs2 = zero + * HFENCE.VVMA + * 0010001 00000 00000 000 00000 1110011 + */ + asm volatile (".word 0x22000073" ::: "memory"); + + csr_write(CSR_HGATP, hgatp); +} diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index f8582599c7b2..24cf3bb4be3a 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -693,7 +693,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) kvm_riscv_gstage_update_hgatp(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) - __kvm_riscv_hfence_gvma_all(); + kvm_riscv_local_hfence_gvma_all(); } } diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c index 01fdc342ad76..8987e76aa6db 100644 --- a/arch/riscv/kvm/vmid.c +++ b/arch/riscv/kvm/vmid.c @@ -33,7 +33,7 @@ void kvm_riscv_gstage_vmid_detect(void) csr_write(CSR_HGATP, old); /* We polluted local TLB so flush all guest TLB */ - __kvm_riscv_hfence_gvma_all(); + kvm_riscv_local_hfence_gvma_all(); /* We don't use VMID bits if they are not sufficient */ if ((1UL << vmid_bits) < num_possible_cpus()) |