summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c68
1 files changed, 49 insertions, 19 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 439d3b4cd1a9..7851f3a1b5f7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
{
}
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
{
- if (unlikely(!cpus))
+ const struct cpumask *cpus;
+
+ if (likely(cpumask_available(tmp)))
+ cpus = tmp;
+ else
cpus = cpu_online_mask;
if (cpumask_empty(cpus))
@@ -263,14 +267,34 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
continue;
kvm_make_request(req, vcpu);
- cpu = vcpu->cpu;
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
continue;
- if (tmp != NULL && cpu != -1 && cpu != me &&
- kvm_request_needs_ipi(vcpu, req))
- __cpumask_set_cpu(cpu, tmp);
+ /*
+ * tmp can be "unavailable" if cpumasks are allocated off stack
+ * as allocation of the mask is deliberately not fatal and is
+ * handled by falling back to kicking all online CPUs.
+ */
+ if (!cpumask_available(tmp))
+ continue;
+
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any
+ * point after kvm_request_needs_ipi(), which could result in
+ * sending an IPI to the previous pCPU. But, that's ok because
+ * the purpose of the IPI is to ensure the vCPU returns to
+ * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+ * Entering READING_SHADOW_PAGE_TABLES after this point is also
+ * ok, as the requirement is only that KVM wait for vCPUs that
+ * were reading SPTEs _before_ any changes were finalized. See
+ * kvm_vcpu_kick() for more details on handling requests.
+ */
+ if (kvm_request_needs_ipi(vcpu, req)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != -1 && cpu != me)
+ __cpumask_set_cpu(cpu, tmp);
+ }
}
called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
@@ -302,13 +326,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- /*
- * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
- * kvm_make_all_cpus_request.
- */
- long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
++kvm->stat.generic.remote_tlb_flush_requests;
+
/*
* We want to publish modifications to the page tables before reading
* mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +342,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
if (!kvm_arch_flush_remote_tlb(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
- cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
#endif
@@ -528,7 +546,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
}
}
- if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+ if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
if (locked)
@@ -3134,15 +3152,19 @@ out:
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- unsigned int old, val, shrink;
+ unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns;
shrink = READ_ONCE(halt_poll_ns_shrink);
+ grow_start = READ_ONCE(halt_poll_ns_grow_start);
if (shrink == 0)
val = 0;
else
val /= shrink;
+ if (val < grow_start)
+ val = 0;
+
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
}
@@ -3290,16 +3312,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
- int me;
- int cpu = vcpu->cpu;
+ int me, cpu;
if (kvm_vcpu_wake_up(vcpu))
return;
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point
+ * after kvm_arch_vcpu_should_kick(), which could result in sending an
+ * IPI to the previous pCPU. But, that's ok because the purpose of the
+ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+ * vCPU also requires it to leave IN_GUEST_MODE.
+ */
me = get_cpu();
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (kvm_arch_vcpu_should_kick(vcpu))
+ if (kvm_arch_vcpu_should_kick(vcpu)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
+ }
put_cpu();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);