diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-02-10 07:33:38 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-04-29 12:49:55 -0400 |
commit | 362505deb8e2ce27993a1dee0b3c5755d4d6646e (patch) | |
tree | 24d7ab5693d56ec0e5ba33f7ac869e2cbaf6aadf /arch/x86/kvm/mmu | |
parent | f417e1459a1299bb4984cbffc03d8746cab9f8a8 (diff) |
KVM: x86/mmu: store shadow EFER.NX in the MMU role
Now that the MMU role is separate from the CPU role, it can be a
truthful description of the format of the shadow pages. This includes
whether the shadow pages use the NX bit; so force the efer_nx field
of the MMU role when TDP is disabled, and remove the hardcoding it in
the callers of reset_shadow_zero_bits_mask.
In fact, the initialization of reserved SPTE bits can now be made common
to shadow paging and shadow NPT; move it to shadow_mmu_init_context.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3f4777849040..6a18660616f4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4473,16 +4473,6 @@ static inline u64 reserved_hpa_bits(void) static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - /* - * KVM uses NX when TDP is disabled to handle a variety of scenarios, - * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and - * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. - * The iTLB multi-hit workaround can be toggled at any time, so assume - * NX can be used by any non-nested shadow MMU to avoid having to reset - * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. - */ - bool uses_nx = is_efer_nx(context) || !tdp_enabled; - /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */ bool is_amd = true; /* KVM doesn't use 2-level page tables for the shadow MMU. */ @@ -4494,7 +4484,8 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, uses_nx, + context->shadow_root_level, + context->mmu_role.base.efer_nx, guest_can_use_gbpages(vcpu), is_pse, is_amd); if (!shadow_me_mask) @@ -4858,6 +4849,16 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, else role.base.level = PT64_ROOT_4LEVEL; + /* + * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role. + * KVM uses NX when TDP is disabled to handle a variety of scenarios, + * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and + * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. + * The iTLB multi-hit workaround can be toggled at any time, so assume + * NX can be used by any non-nested shadow MMU to avoid having to reset + * MMU contexts. + */ + role.base.efer_nx = true; return role; } |