summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorGavin Shan <gshan@redhat.com>2021-03-16 12:11:26 +0800
committerMarc Zyngier <maz@kernel.org>2021-04-07 14:33:22 +0100
commit10ba2d17d2972926c60e01dace6d7a3f8d968c4f (patch)
tree588816d146a0b6a701f31135c29b78eb28c3bde4 /arch/arm64/kvm
parentc728fd4ce75e9c342ea96facc5a2fe5ddb976a67 (diff)
KVM: arm64: Don't retrieve memory slot again in page fault handler
We needn't retrieve the memory slot again in user_mem_abort() because the corresponding memory slot has been passed from the caller. This would save some CPU cycles. For example, the time used to write 1GB memory, which is backed by 2MB hugetlb pages and write-protected, is dropped by 6.8% from 928ms to 864ms. Signed-off-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Keqian Zhu <zhukeqian1@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210316041126.81860-4-gshan@redhat.com
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/mmu.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 192e0df2fc8e..2491b40a294a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -843,10 +843,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* unmapped afterwards, the call to kvm_unmap_hva will take it away
* from us again properly. This smp_rmb() interacts with the smp_wmb()
* in kvm_mmu_notifier_invalidate_<page|range_end>.
+ *
+ * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+ * used to avoid unnecessary overhead introduced to locate the memory
+ * slot because it's always fixed even @gfn is adjusted for huge pages.
*/
smp_rmb();
- pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ write_fault, &writable, NULL);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
@@ -912,7 +917,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
}
out_unlock: