summaryrefslogtreecommitdiff
path: root/virt/kvm/pfncache.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/pfncache.c')
-rw-r--r--virt/kvm/pfncache.c62
1 files changed, 46 insertions, 16 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 68ff41d39545..346e47f15572 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
+ if (!gpc->active)
+ return false;
+
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false;
@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
- kvm_pfn_t old_pfn, new_pfn;
+ bool unmap_old = false;
unsigned long old_uhva;
+ kvm_pfn_t old_pfn;
void *old_khva;
- int ret = 0;
+ int ret;
/*
* If must fit within a single page. The 'len' argument is
@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
write_lock_irq(&gpc->lock);
+ if (!gpc->active) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
/* If the HVA→PFN mapping was already valid, don't unmap it. */
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
+ ret = 0;
}
out:
@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->khva = NULL;
}
- /* Snapshot the new pfn before dropping the lock! */
- new_pfn = gpc->pfn;
+ /* Detect a pfn change before dropping the lock! */
+ unmap_old = (old_pfn != gpc->pfn);
+out_unlock:
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
- if (old_pfn != new_pfn)
+ if (unmap_old)
gpc_unmap_khva(kvm, old_pfn, old_khva);
return ret;
@@ -346,42 +357,61 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
+{
+ rwlock_init(&gpc->lock);
+ mutex_init(&gpc->refresh_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_init);
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len)
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len)
{
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
if (!gpc->active) {
- rwlock_init(&gpc->lock);
- mutex_init(&gpc->refresh_lock);
-
gpc->khva = NULL;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->valid = false;
- gpc->active = true;
spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
+
+ /*
+ * Activate the cache after adding it to the list, a concurrent
+ * refresh must not establish a mapping until the cache is
+ * reachable by mmu_notifier events.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = true;
+ write_unlock_irq(&gpc->lock);
}
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
+EXPORT_SYMBOL_GPL(kvm_gpc_activate);
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
if (gpc->active) {
+ /*
+ * Deactivate the cache before removing it from the list, KVM
+ * must stall mmu_notifier events until all users go away, i.e.
+ * until gpc->lock is dropped and refresh is guaranteed to fail.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = false;
+ write_unlock_irq(&gpc->lock);
+
spin_lock(&kvm->gpc_lock);
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
- gpc->active = false;
}
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);
+EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);