summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/paging_tmpl.h
diff options
context:
space:
mode:
authorLai Jiangshan <jiangshan.ljs@antgroup.com>2023-02-02 18:28:16 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2023-03-14 10:28:56 -0400
commit39fda5d873eb1f59613ce49249ee2effea9f8e06 (patch)
tree997fc47924098d5b22390c6facf6b4ae039cdfd3 /arch/x86/kvm/mmu/paging_tmpl.h
parent258d985f6eb360c9c7aacd025d0dbc080a59423f (diff)
KVM: x86/mmu: Detect write #PF to shadow pages during FNAME(fetch) walk
Move the detection of write #PF to shadow pages, i.e. a fault on a write to a page table that is being shadowed by KVM that is used to translate the write itself, from FNAME(is_self_change_mapping) to FNAME(fetch). There is no need to detect the self-referential write before kvm_faultin_pfn() as KVM does not consume EMULTYPE_WRITE_PF_TO_SP for accesses that resolve to "error or no-slot" pfns, i.e. KVM doesn't allow retrying MMIO accesses or writes to read-only memslots. Detecting the EMULTYPE_WRITE_PF_TO_SP scenario in FNAME(fetch) will allow dropping FNAME(is_self_change_mapping) entirely, as the hugepage interaction can be deferred to kvm_mmu_hugepage_adjust(). Cc: Huang Hang <hhuang@linux.alibaba.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20221213125538.81209-1-jiangshanlai@gmail.com [sean: split to separate patch, write changelog] Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20230202182817.407394-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h12
1 files changed, 5 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 5d2958299b4f..f57d9074fb9b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -685,6 +685,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
if (sp != ERR_PTR(-EEXIST))
link_shadow_page(vcpu, it.sptep, sp);
+
+ if (fault->write && table_gfn == fault->gfn)
+ fault->write_fault_to_shadow_pgtable = true;
}
kvm_mmu_hugepage_adjust(vcpu, fault);
@@ -741,17 +744,13 @@ out_gpte_changed:
* created when kvm establishes shadow page table that stop kvm using large
* page size. Do it early can avoid unnecessary #PF and emulation.
*
- * @write_fault_to_shadow_pgtable will return true if the fault gfn is
- * currently used as its page table.
- *
* Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
* since the PDPT is always shadowed, that means, we can not use large page
* size to map the gfn which is used as PDPT.
*/
static bool
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
- struct guest_walker *walker, bool user_fault,
- bool *write_fault_to_shadow_pgtable)
+ struct guest_walker *walker, bool user_fault)
{
int level;
gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
@@ -765,7 +764,6 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
self_changed |= !(gfn & mask);
- *write_fault_to_shadow_pgtable |= !gfn;
}
return self_changed;
@@ -826,7 +824,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return r;
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
- &walker, fault->user, &fault->write_fault_to_shadow_pgtable);
+ &walker, fault->user);
if (is_self_change_mapping)
fault->max_level = PG_LEVEL_4K;