summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-11-04 16:41:07 +0000
committerAnup Patel <anup@brainfault.org>2022-01-06 14:38:50 +0530
commitcc4f602bc4365d9a8665803a49dddc70eb56f7f1 (patch)
tree9106969469277ef9b1c31a22f654a50544e92b50 /arch/riscv/include/asm/kvm_host.h
parent5e4e84f1124aa02643833b7ea40abd5a8e964388 (diff)
KVM: RISC-V: Use common KVM implementation of MMU memory caches
Use common KVM's implementation of the MMU memory caches, which for all intents and purposes is semantically identical to RISC-V's version, the only difference being that the common implementation will fall back to an atomic allocation if there's a KVM bug that triggers a cache underflow. RISC-V appears to have based its MMU code on arm64 before the conversion to the common caches in commit c1a33aebe91d ("KVM: arm64: Use common KVM implementation of MMU memory caches"), despite having also copy-pasted the definition of KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE in kvm_types.h. Opportunistically drop the superfluous wrapper kvm_riscv_stage2_flush_cache(), whose name is very, very confusing as "cache flush" in the context of MMU code almost always refers to flushing hardware caches, not freeing unused software objects. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Anup Patel <anup.patel@wdc.com>
Diffstat (limited to 'arch/riscv/include/asm/kvm_host.h')
-rw-r--r--arch/riscv/include/asm/kvm_host.h10
1 files changed, 1 insertions, 9 deletions
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 907fafea787e..52e19888ce43 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -77,13 +77,6 @@ struct kvm_sbi_context {
int return_handled;
};
-#define KVM_MMU_PAGE_CACHE_NR_OBJS 32
-
-struct kvm_mmu_page_cache {
- int nobjs;
- void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
-};
-
struct kvm_cpu_trap {
unsigned long sepc;
unsigned long scause;
@@ -193,7 +186,7 @@ struct kvm_vcpu_arch {
struct kvm_sbi_context sbi_context;
/* Cache pages needed to program page tables with spinlock held */
- struct kvm_mmu_page_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
/* VCPU power-off state */
bool power_off;
@@ -220,7 +213,6 @@ void __kvm_riscv_hfence_gvma_all(void);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
-void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);