summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorBen Gardon <bgardon@google.com>2020-10-14 11:26:49 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-10-21 18:17:02 -0400
commit7d94531249a54b822f1a8b20d8a8f8d59ad1d985 (patch)
tree5a8c0e04b34a8e581cb73793ea579f9bfa8c21df /arch/x86
parentfaaf05b00aecdb347ffd1d763d024394ec0329f8 (diff)
kvm: x86/mmu: Remove disallowed_hugepage_adjust shadow_walk_iterator arg
In order to avoid creating executable hugepages in the TDP MMU PF handler, remove the dependency between disallowed_hugepage_adjust and the shadow_walk_iterator. This will open the function up to being used by the TDP MMU PF handler in a future patch. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-10-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu/mmu.c13
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h3
2 files changed, 9 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index dd15e519c361..6f22c155381d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2818,13 +2818,12 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
return level;
}
-static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
- gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
+static void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
+ kvm_pfn_t *pfnp, int *levelp)
{
int level = *levelp;
- u64 spte = *it.sptep;
- if (it.level == level && level > PG_LEVEL_4K &&
+ if (cur_level == level && level > PG_LEVEL_4K &&
is_shadow_present_pte(spte) &&
!is_large_pte(spte)) {
/*
@@ -2834,7 +2833,8 @@ static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
* patching back for them into pfn the next 9 bits of
* the address.
*/
- u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
+ u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
+ KVM_PAGES_PER_HPAGE(level - 1);
*pfnp |= gfn & page_mask;
(*levelp)--;
}
@@ -2867,7 +2867,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
* large page, as the leaf could be executable.
*/
if (nx_huge_page_workaround_enabled)
- disallowed_hugepage_adjust(it, gfn, &pfn, &level);
+ disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
+ &pfn, &level);
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 9a1a15f19beb..50e268eb8e1a 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -695,7 +695,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
* large page, as the leaf could be executable.
*/
if (nx_huge_page_workaround_enabled)
- disallowed_hugepage_adjust(it, gw->gfn, &pfn, &level);
+ disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level,
+ &pfn, &level);
base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level)