summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu_internal.h
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-02-27 18:41:46 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2024-05-07 11:59:23 -0400
commitf3310e622f77979f13a36f6bfaf0252eecc7b9f6 (patch)
tree76d25631b2ab51ee9adf8ccf58f3f55727275b6f /arch/x86/kvm/mmu/mmu_internal.h
parent36d4492765fe74d69f91b0f2dae7340c03665649 (diff)
KVM: x86/mmu: Initialize kvm_page_fault's pfn and hva to error values
Explicitly set "pfn" and "hva" to error values in kvm_mmu_do_page_fault() to harden KVM against using "uninitialized" values. In quotes because the fields are actually zero-initialized, and zero is a legal value for both page frame numbers and virtual addresses. E.g. failure to set "pfn" prior to creating an SPTE could result in KVM pointing at physical address '0', which is far less desirable than KVM generating a SPTE with reserved PA bits set and thus effectively killing the VM. Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Kai Huang <kai.huang@intel.com> Message-ID: <20240228024147.41573-16-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu_internal.h')
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index dfd9ff383663..ce2fcd19ba6b 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -307,6 +307,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
.is_private = err & PFERR_PRIVATE_ACCESS,
+
+ .pfn = KVM_PFN_ERR_FAULT,
+ .hva = KVM_HVA_ERR_BAD,
};
int r;