summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-12-06 20:54:17 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2021-12-08 04:24:24 -0500
commitd01495d4cffb327200d4522db6eb3fabfdc9e2f5 (patch)
tree30aef746ab6a24c39b7c57772538f4e0ed3846c4 /arch/riscv/kvm/mmu.c
parent9d7d18ee3f48903f7b9bbf6305d690078c67271b (diff)
KVM: RISC-V: Use "new" memslot instead of userspace memory region
Get the slot ID, hva, etc... from the "new" memslot instead of the userspace memory region when preparing/committing a memory region. This will allow a future commit to drop @mem from the prepare/commit hooks once all architectures convert to using "new". Opportunistically wait to get the various "new" values until after filtering out the DELETE case in anticipation of a future commit passing NULL for @new when deleting a memslot. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com> Message-Id: <543608ab88a1190e73a958efffafc98d2652c067.1638817640.git.maciej.szmigiero@oracle.com>
Diffstat (limited to 'arch/riscv/kvm/mmu.c')
-rw-r--r--arch/riscv/kvm/mmu.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 50380f525345..573ade138204 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -472,8 +472,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* allocated dirty_bitmap[], dirty pages will be tracked while
* the memory slot is write protected.
*/
- if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
- stage2_wp_memory_region(kvm, mem->slot);
+ if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
+ stage2_wp_memory_region(kvm, new->id);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -482,9 +482,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- hva_t hva = mem->userspace_addr;
- hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
+ hva_t hva, reg_end, size;
+ gpa_t base_gpa;
+ bool writable;
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -499,6 +499,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(stage2_gpa_size >> PAGE_SHIFT))
return -EFAULT;
+ hva = new->userspace_addr;
+ size = new->npages << PAGE_SHIFT;
+ reg_end = hva + size;
+ base_gpa = new->base_gfn << PAGE_SHIFT;
+ writable = !(new->flags & KVM_MEM_READONLY);
+
mmap_read_lock(current->mm);
/*
@@ -534,8 +540,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
+ gpa_t gpa = base_gpa + (vm_start - hva);
phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
@@ -560,8 +565,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
- stage2_unmap_range(kvm, mem->guest_phys_addr,
- mem->memory_size, false);
+ stage2_unmap_range(kvm, base_gpa, size, false);
spin_unlock(&kvm->mmu_lock);
out: