summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/kvm_host.h13
-rw-r--r--virt/kvm/kvm_main.c22
2 files changed, 34 insertions, 1 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f30b53a07917..492d183dd7d0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -354,6 +354,13 @@ struct kvm_vcpu {
struct kvm_vcpu_stat stat;
char stats_id[KVM_STATS_NAME_SIZE];
struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The index of the most recently used memslot by this vCPU. It's ok
+ * if this becomes stale due to memslot changes since we always check
+ * it is a valid slot.
+ */
+ int last_used_slot;
};
/* must be called with irqs disabled */
@@ -1200,6 +1207,12 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
if (slot_index < 0 || slot_index >= slots->used_slots)
return NULL;
+ /*
+ * slot_index can come from vcpu->last_used_slot which is not kept
+ * in sync with userspace-controllable memslot deletion. So use nospec
+ * to prevent the CPU from speculating past the end of memslots[].
+ */
+ slot_index = array_index_nospec(slot_index, slots->used_slots);
slot = &slots->memslots[slot_index];
if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1984c7389787..30d322519253 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -415,6 +415,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->preempted = false;
vcpu->ready = false;
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+ vcpu->last_used_slot = 0;
}
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -2025,7 +2026,26 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
+ struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+ struct kvm_memory_slot *slot;
+ int slot_index;
+
+ slot = try_get_memslot(slots, vcpu->last_used_slot, gfn);
+ if (slot)
+ return slot;
+
+ /*
+ * Fall back to searching all memslots. We purposely use
+ * search_memslots() instead of __gfn_to_memslot() to avoid
+ * thrashing the VM-wide last_used_index in kvm_memslots.
+ */
+ slot = search_memslots(slots, gfn, &slot_index);
+ if (slot) {
+ vcpu->last_used_slot = slot_index;
+ return slot;
+ }
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);