summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-15 09:05:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-15 09:05:39 -0800
commit74bc8acd6cb4f5cee25fcfe4e9afb06f75949081 (patch)
tree8bc5ffb73324a4758fb5a8c985abfdd17d75b63a /arch
parent5b675f7362ba9debfd6c3e38edda197b35a29f0f (diff)
parent9cb09e7c1c9af2968d5186ef9085f05641ab65d9 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM fixes from Paolo Bonzini: - fixes for CONFIG_KVM_COMPAT=n - two updates to the IFU erratum - selftests build fix - brown paper bag fix * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: Add a comment describing the /dev/kvm no_compat handling KVM: x86/mmu: Take slots_lock when using kvm_mmu_zap_all_fast() KVM: Forbid /dev/kvm being opened by a compat task when CONFIG_KVM_COMPAT=n KVM: X86: Reset the three MSR list number variables to 0 in kvm_init_msr_list() selftests: kvm: fix build with glibc >= 2.30 kvm: x86: disable shattered huge page recovery for PREEMPT_RT.
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/x86.c4
2 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fd6012eef9c9..2ce9da58611e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -51,7 +51,12 @@
extern bool itlb_multihit_kvm_mitigation;
static int __read_mostly nx_huge_pages = -1;
+#ifdef CONFIG_PREEMPT_RT
+/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
+static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+#else
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+#endif
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
@@ -6280,14 +6285,13 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
if (new_val != old_val) {
struct kvm *kvm;
- int idx;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- idx = srcu_read_lock(&kvm->srcu);
+ mutex_lock(&kvm->slots_lock);
kvm_mmu_zap_all_fast(kvm);
- srcu_read_unlock(&kvm->srcu, idx);
+ mutex_unlock(&kvm->slots_lock);
wake_up_process(kvm->arch.nx_lpage_recovery_thread);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7db5c8ef35dd..5d530521f11d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5130,6 +5130,10 @@ static void kvm_init_msr_list(void)
perf_get_x86_pmu_capability(&x86_pmu);
+ num_msrs_to_save = 0;
+ num_emulated_msrs = 0;
+ num_msr_based_features = 0;
+
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue;