summaryrefslogtreecommitdiff
path: root/virt/kvm/async_pf.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/async_pf.c')
-rw-r--r--virt/kvm/async_pf.c176
1 files changed, 101 insertions, 75 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index ea475cd03511..b8aaa96b799b 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* kvm asynchronous fault support
*
@@ -5,25 +6,13 @@
*
* Author:
* Gleb Natapov <gleb@redhat.com>
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
#include "async_pf.h"
#include <trace/events/kvm.h>
@@ -42,8 +31,7 @@ int kvm_async_pf_init(void)
void kvm_async_pf_deinit(void)
{
- if (async_pf_cache)
- kmem_cache_destroy(async_pf_cache);
+ kmem_cache_destroy(async_pf_cache);
async_pf_cache = NULL;
}
@@ -56,40 +44,77 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
static void async_pf_execute(struct work_struct *work)
{
- struct page *page = NULL;
struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work);
- struct mm_struct *mm = apf->mm;
struct kvm_vcpu *vcpu = apf->vcpu;
+ struct mm_struct *mm = vcpu->kvm->mm;
unsigned long addr = apf->addr;
- gva_t gva = apf->gva;
+ gpa_t cr2_or_gpa = apf->cr2_or_gpa;
+ int locked = 1;
+ bool first;
might_sleep();
- use_mm(mm);
- down_read(&mm->mmap_sem);
- get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
- up_read(&mm->mmap_sem);
- unuse_mm(mm);
+ /*
+ * Attempt to pin the VM's host address space, and simply skip gup() if
+ * acquiring a pin fail, i.e. if the process is exiting. Note, KVM
+ * holds a reference to its associated mm_struct until the very end of
+ * kvm_destroy_vm(), i.e. the struct itself won't be freed before this
+ * work item is fully processed.
+ */
+ if (mmget_not_zero(mm)) {
+ mmap_read_lock(mm);
+ get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
+ if (locked)
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+
+ /*
+ * Notify and kick the vCPU even if faulting in the page failed, e.g.
+ * so that the vCPU can retry the fault synchronously.
+ */
+ if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
+ kvm_arch_async_page_present(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
+ first = list_empty(&vcpu->async_pf.done);
list_add_tail(&apf->link, &vcpu->async_pf.done);
- apf->page = page;
- apf->done = true;
spin_unlock(&vcpu->async_pf.lock);
/*
- * apf may be freed by kvm_check_async_pf_completion() after
- * this point
+ * The apf struct may be freed by kvm_check_async_pf_completion() as
+ * soon as the lock is dropped. Nullify it to prevent improper usage.
*/
+ apf = NULL;
+
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
+ kvm_arch_async_page_present_queued(vcpu);
- trace_kvm_async_pf_completed(addr, page, gva);
+ trace_kvm_async_pf_completed(addr, cr2_or_gpa);
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
+ __kvm_vcpu_wake_up(vcpu);
+}
- mmdrop(mm);
- kvm_put_kvm(vcpu->kvm);
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
@@ -97,23 +122,28 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
- list_entry(vcpu->async_pf.queue.next,
- typeof(*work), queue);
- cancel_work_sync(&work->work);
+ list_first_entry(&vcpu->async_pf.queue,
+ typeof(*work), queue);
list_del(&work->queue);
- if (!work->done) /* work was canceled */
+
+#ifdef CONFIG_KVM_ASYNC_PF_SYNC
+ flush_work(&work->work);
+#else
+ if (cancel_work_sync(&work->work))
kmem_cache_free(async_pf_cache, work);
+#endif
}
spin_lock(&vcpu->async_pf.lock);
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
- list_entry(vcpu->async_pf.done.next,
- typeof(*work), link);
+ list_first_entry(&vcpu->async_pf.done,
+ typeof(*work), link);
list_del(&work->link);
- if (!is_error_page(work->page))
- kvm_release_page_clean(work->page);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -125,34 +155,38 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
struct kvm_async_pf *work;
while (!list_empty_careful(&vcpu->async_pf.done) &&
- kvm_arch_can_inject_async_page_present(vcpu)) {
+ kvm_arch_can_dequeue_async_page_present(vcpu)) {
spin_lock(&vcpu->async_pf.lock);
work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
link);
list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock);
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
- kvm_arch_async_page_present(vcpu, work);
+ kvm_arch_async_page_ready(vcpu, work);
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
+ kvm_arch_async_page_present(vcpu, work);
list_del(&work->queue);
vcpu->async_pf.queued--;
- if (!is_error_page(work->page))
- kvm_release_page_clean(work->page);
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
- struct kvm_arch_async_pf *arch)
+/*
+ * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
+ * success, 'false' on failure (page fault has to be handled synchronously).
+ */
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch)
{
struct kvm_async_pf *work;
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
- return 0;
+ return false;
- /* setup delayed work */
+ /* Arch specific code should not do async PF in this case */
+ if (unlikely(kvm_is_error_hva(hva)))
+ return false;
/*
* do alloc nowait since if we are going to sleep anyway we
@@ -160,41 +194,29 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
*/
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
if (!work)
- return 0;
+ return false;
- work->page = NULL;
- work->done = false;
+ work->wakeup_all = false;
work->vcpu = vcpu;
- work->gva = gva;
- work->addr = gfn_to_hva(vcpu->kvm, gfn);
+ work->cr2_or_gpa = cr2_or_gpa;
+ work->addr = hva;
work->arch = *arch;
- work->mm = current->mm;
- atomic_inc(&work->mm->mm_count);
- kvm_get_kvm(work->vcpu->kvm);
-
- /* this can't really happen otherwise gfn_to_pfn_async
- would succeed */
- if (unlikely(kvm_is_error_hva(work->addr)))
- goto retry_sync;
INIT_WORK(&work->work, async_pf_execute);
- if (!schedule_work(&work->work))
- goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++;
- kvm_arch_async_page_not_present(vcpu, work);
- return 1;
-retry_sync:
- kvm_put_kvm(work->vcpu->kvm);
- mmdrop(work->mm);
- kmem_cache_free(async_pf_cache, work);
- return 0;
+ work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
+
+ schedule_work(&work->work);
+
+ return true;
}
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
+ bool first;
if (!list_empty_careful(&vcpu->async_pf.done))
return 0;
@@ -203,13 +225,17 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
if (!work)
return -ENOMEM;
- work->page = KVM_ERR_PTR_BAD_PAGE;
+ work->wakeup_all = true;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock);
+ first = list_empty(&vcpu->async_pf.done);
list_add_tail(&work->link, &vcpu->async_pf.done);
spin_unlock(&vcpu->async_pf.lock);
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
+ kvm_arch_async_page_present_queued(vcpu);
+
vcpu->async_pf.queued++;
return 0;
}