summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2020-10-21 18:05:58 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2020-10-21 18:05:58 -0400
commitc0623f5e5d21d65d08f6276ca4e64d470c833761 (patch)
tree5bc1f615341a08b0d2bd9af5a893ae65f8499f05 /arch/x86/kvm/x86.c
parenta4f1d94e6bc1b00c4efa9655ad14e0d49b8f1e37 (diff)
parente2e1a1c86bf32a8d7458b9024f518cf2434414c8 (diff)
Merge branch 'kvm-fixes' into 'next'
Pick up bugfixes from 5.9, otherwise various tests fail.
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51f75c21c86f..397f599b20e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -194,7 +194,7 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);
-bool __read_mostly allow_smaller_maxphyaddr;
+bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
static u64 __read_mostly host_xss;
@@ -982,6 +982,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP;
+ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
@@ -1009,7 +1010,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (kvm_x86_ops.set_cr4(vcpu, cr4))
return 1;
- if (((cr4 ^ old_cr4) & pdptr_bits) ||
+ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
@@ -3400,9 +3401,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* even when not intercepted. AMD manual doesn't explicitly
* state this but appears to behave the same.
*
- * Unconditionally return L1's TSC offset on userspace reads
- * so that userspace reads and writes always operate on L1's
- * offset, e.g. to ensure deterministic behavior for migration.
+ * On userspace reads and writes, however, we unconditionally
+ * return L1's TSC value to ensure backwards-compatible
+ * behavior for migration.
*/
u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
vcpu->arch.tsc_offset;