summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig65
-rw-r--r--virt/kvm/Makefile.kvm1
-rw-r--r--virt/kvm/async_pf.c89
-rw-r--r--virt/kvm/coalesced_mmio.c38
-rw-r--r--virt/kvm/dirty_ring.c119
-rw-r--r--virt/kvm/eventfd.c288
-rw-r--r--virt/kvm/guest_memfd.c1016
-rw-r--r--virt/kvm/irqchip.c26
-rw-r--r--virt/kvm/kvm_main.c2629
-rw-r--r--virt/kvm/kvm_mm.h69
-rw-r--r--virt/kvm/pfncache.c298
-rw-r--r--virt/kvm/vfio.c180
-rw-r--r--virt/lib/irqbypass.c190
13 files changed, 3339 insertions, 1669 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 9fb1ff6f19e5..267c7369c765 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# KVM common configuration items and defaults
-config HAVE_KVM
+config KVM_COMMON
bool
+ select EVENTFD
+ select INTERVAL_TREE
+ select PREEMPT_NOTIFIERS
config HAVE_KVM_PFNCACHE
bool
@@ -10,9 +13,6 @@ config HAVE_KVM_PFNCACHE
config HAVE_KVM_IRQCHIP
bool
-config HAVE_KVM_IRQFD
- bool
-
config HAVE_KVM_IRQ_ROUTING
bool
@@ -39,10 +39,6 @@ config NEED_KVM_DIRTY_RING_WITH_BITMAP
bool
depends on HAVE_KVM_DIRTY_RING
-config HAVE_KVM_EVENTFD
- bool
- select EVENTFD
-
config KVM_MMIO
bool
@@ -56,13 +52,13 @@ config KVM_ASYNC_PF_SYNC
config HAVE_KVM_MSI
bool
-config HAVE_KVM_CPU_RELAX_INTERCEPT
+config HAVE_KVM_READONLY_MEM
bool
-config KVM_VFIO
+config HAVE_KVM_CPU_RELAX_INTERCEPT
bool
-config HAVE_KVM_ARCH_TLB_FLUSH_ALL
+config KVM_VFIO
bool
config HAVE_KVM_INVALID_WAKEUPS
@@ -71,15 +67,16 @@ config HAVE_KVM_INVALID_WAKEUPS
config KVM_GENERIC_DIRTYLOG_READ_PROTECT
bool
+config KVM_GENERIC_PRE_FAULT_MEMORY
+ bool
+
config KVM_COMPAT
def_bool y
depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
config HAVE_KVM_IRQ_BYPASS
- bool
-
-config HAVE_KVM_VCPU_ASYNC_IOCTL
- bool
+ tristate
+ select IRQ_BYPASS_MANAGER
config HAVE_KVM_VCPU_RUN_PID_CHANGE
bool
@@ -87,8 +84,44 @@ config HAVE_KVM_VCPU_RUN_PID_CHANGE
config HAVE_KVM_NO_POLL
bool
-config KVM_XFER_TO_GUEST_WORK
+config VIRT_XFER_TO_GUEST_WORK
bool
config HAVE_KVM_PM_NOTIFIER
bool
+
+config KVM_GENERIC_HARDWARE_ENABLING
+ bool
+
+config KVM_GENERIC_MMU_NOTIFIER
+ select MMU_NOTIFIER
+ bool
+
+config KVM_ELIDE_TLB_FLUSH_IF_YOUNG
+ depends on KVM_GENERIC_MMU_NOTIFIER
+ bool
+
+config KVM_MMU_LOCKLESS_AGING
+ depends on KVM_GENERIC_MMU_NOTIFIER
+ bool
+
+config KVM_GENERIC_MEMORY_ATTRIBUTES
+ depends on KVM_GENERIC_MMU_NOTIFIER
+ bool
+
+config KVM_GUEST_MEMFD
+ depends on KVM_GENERIC_MMU_NOTIFIER
+ select XARRAY_MULTI
+ bool
+
+config HAVE_KVM_ARCH_GMEM_PREPARE
+ bool
+ depends on KVM_GUEST_MEMFD
+
+config HAVE_KVM_ARCH_GMEM_INVALIDATE
+ bool
+ depends on KVM_GUEST_MEMFD
+
+config HAVE_KVM_ARCH_GMEM_POPULATE
+ bool
+ depends on KVM_GUEST_MEMFD
diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm
index 2c27d5d0c367..d047d4cf58c9 100644
--- a/virt/kvm/Makefile.kvm
+++ b/virt/kvm/Makefile.kvm
@@ -12,3 +12,4 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o
+kvm-$(CONFIG_KVM_GUEST_MEMFD) += $(KVM)/guest_memfd.o
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 9bfe1d6f6529..b8aaa96b799b 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -46,8 +46,8 @@ static void async_pf_execute(struct work_struct *work)
{
struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work);
- struct mm_struct *mm = apf->mm;
struct kvm_vcpu *vcpu = apf->vcpu;
+ struct mm_struct *mm = vcpu->kvm->mm;
unsigned long addr = apf->addr;
gpa_t cr2_or_gpa = apf->cr2_or_gpa;
int locked = 1;
@@ -56,45 +56,69 @@ static void async_pf_execute(struct work_struct *work)
might_sleep();
/*
- * This work is run asynchronously to the task which owns
- * mm and might be done in another context, so we must
- * access remotely.
+ * Attempt to pin the VM's host address space, and simply skip gup() if
+ * acquiring a pin fail, i.e. if the process is exiting. Note, KVM
+ * holds a reference to its associated mm_struct until the very end of
+ * kvm_destroy_vm(), i.e. the struct itself won't be freed before this
+ * work item is fully processed.
*/
- mmap_read_lock(mm);
- get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
- &locked);
- if (locked)
- mmap_read_unlock(mm);
+ if (mmget_not_zero(mm)) {
+ mmap_read_lock(mm);
+ get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
+ if (locked)
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+ /*
+ * Notify and kick the vCPU even if faulting in the page failed, e.g.
+ * so that the vCPU can retry the fault synchronously.
+ */
if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
kvm_arch_async_page_present(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
first = list_empty(&vcpu->async_pf.done);
list_add_tail(&apf->link, &vcpu->async_pf.done);
- apf->vcpu = NULL;
spin_unlock(&vcpu->async_pf.lock);
- if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
- kvm_arch_async_page_present_queued(vcpu);
-
/*
- * apf may be freed by kvm_check_async_pf_completion() after
- * this point
+ * The apf struct may be freed by kvm_check_async_pf_completion() as
+ * soon as the lock is dropped. Nullify it to prevent improper usage.
*/
+ apf = NULL;
+
+ if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
+ kvm_arch_async_page_present_queued(vcpu);
trace_kvm_async_pf_completed(addr, cr2_or_gpa);
__kvm_vcpu_wake_up(vcpu);
+}
- mmput(mm);
- kvm_put_kvm(vcpu->kvm);
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
{
- spin_lock(&vcpu->async_pf.lock);
-
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
@@ -102,32 +126,24 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
typeof(*work), queue);
list_del(&work->queue);
- /*
- * We know it's present in vcpu->async_pf.done, do
- * nothing here.
- */
- if (!work->vcpu)
- continue;
-
- spin_unlock(&vcpu->async_pf.lock);
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work(&work->work);
#else
- if (cancel_work_sync(&work->work)) {
- mmput(work->mm);
- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ if (cancel_work_sync(&work->work))
kmem_cache_free(async_pf_cache, work);
- }
#endif
- spin_lock(&vcpu->async_pf.lock);
}
+ spin_lock(&vcpu->async_pf.lock);
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -152,7 +168,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue);
vcpu->async_pf.queued--;
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
@@ -176,7 +192,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
if (!work)
return false;
@@ -185,9 +201,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->cr2_or_gpa = cr2_or_gpa;
work->addr = hva;
work->arch = *arch;
- work->mm = current->mm;
- mmget(work->mm);
- kvm_get_kvm(work->vcpu->kvm);
INIT_WORK(&work->work, async_pf_execute);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 0be80c213f7f..375d6285475e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,27 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
-{
- struct kvm_coalesced_mmio_ring *ring;
- unsigned avail;
-
- /* Are we able to batch it ? */
-
- /* last is the first free entry
- * check if we don't meet the first used entry
- * there is always one unused entry in the buffer
- */
- ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail == 0) {
- /* full */
- return 0;
- }
-
- return 1;
-}
-
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr,
int len, const void *val)
@@ -74,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
spin_lock(&dev->kvm->ring_lock);
+ /*
+ * last is the index of the entry to fill. Verify userspace hasn't
+ * set last to be out of range, and that there is room in the ring.
+ * Leave one entry free in the ring so that userspace can differentiate
+ * between an empty ring and a full ring.
+ */
insert = READ_ONCE(ring->last);
- if (!coalesced_mmio_has_room(dev, insert) ||
- insert >= KVM_COALESCED_MMIO_MAX) {
+ if (insert >= KVM_COALESCED_MMIO_MAX ||
+ (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
@@ -186,16 +171,13 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
-
/*
* On failure, unregister destroys all devices on the
- * bus _except_ the target device, i.e. coalesced_zones
- * has been modified. No need to restart the walk as
- * there aren't any zones left.
+ * bus, including the target device. There's no need
+ * to restart the walk as there aren't any zones left.
*/
if (r)
break;
- kvm_iodevice_destructor(&dev->dev);
}
}
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index c1cd7dfe4a90..02bc6b00d76c 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -11,14 +11,14 @@
#include <trace/events/kvm.h>
#include "kvm_mm.h"
-int __weak kvm_cpu_dirty_log_size(void)
+int __weak kvm_cpu_dirty_log_size(struct kvm *kvm)
{
return 0;
}
-u32 kvm_dirty_ring_get_rsvd_entries(void)
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
{
- return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
+ return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size(kvm);
}
bool kvm_use_dirty_bitmap(struct kvm *kvm)
@@ -58,7 +58,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
as_id = slot >> 16;
id = (u16)slot;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return;
memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
@@ -71,14 +71,15 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
KVM_MMU_UNLOCK(kvm);
}
-int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size)
{
ring->dirty_gfns = vzalloc(size);
if (!ring->dirty_gfns)
return -ENOMEM;
ring->size = size / sizeof(struct kvm_dirty_gfn);
- ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
+ ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(kvm);
ring->dirty_index = 0;
ring->reset_index = 0;
ring->index = index;
@@ -101,19 +102,38 @@ static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
}
-int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
{
+ /*
+ * To minimize mmu_lock contention, batch resets for harvested entries
+ * whose gfns are in the same slot, and are within N frame numbers of
+ * each other, where N is the number of bits in an unsigned long. For
+ * simplicity, process the current set of entries when the next entry
+ * can't be included in the batch.
+ *
+ * Track the current batch slot, the gfn offset into the slot for the
+ * batch, and the bitmask of gfns that need to be reset (relative to
+ * offset). Note, the offset may be adjusted backwards, e.g. so that
+ * a sequence of gfns X, X-1, ... X-N-1 can be batched.
+ */
u32 cur_slot, next_slot;
u64 cur_offset, next_offset;
- unsigned long mask;
- int count = 0;
+ unsigned long mask = 0;
struct kvm_dirty_gfn *entry;
- bool first_round = true;
- /* This is only needed to make compilers happy */
- cur_slot = cur_offset = mask = 0;
+ /*
+ * Ensure concurrent calls to KVM_RESET_DIRTY_RINGS are serialized,
+ * e.g. so that KVM fully resets all entries processed by a given call
+ * before returning to userspace. Holding slots_lock also protects
+ * the various memslot accesses.
+ */
+ lockdep_assert_held(&kvm->slots_lock);
+
+ while (likely((*nr_entries_reset) < INT_MAX)) {
+ if (signal_pending(current))
+ return -EINTR;
- while (true) {
entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
if (!kvm_dirty_gfn_harvested(entry))
@@ -126,35 +146,64 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
kvm_dirty_gfn_set_invalid(entry);
ring->reset_index++;
- count++;
- /*
- * Try to coalesce the reset operations when the guest is
- * scanning pages in the same slot.
- */
- if (!first_round && next_slot == cur_slot) {
- s64 delta = next_offset - cur_offset;
-
- if (delta >= 0 && delta < BITS_PER_LONG) {
- mask |= 1ull << delta;
- continue;
+ (*nr_entries_reset)++;
+
+ if (mask) {
+ /*
+ * While the size of each ring is fixed, it's possible
+ * for the ring to be constantly re-dirtied/harvested
+ * while the reset is in-progress (the hard limit exists
+ * only to guard against the count becoming negative).
+ */
+ cond_resched();
+
+ /*
+ * Try to coalesce the reset operations when the guest
+ * is scanning pages in the same slot.
+ */
+ if (next_slot == cur_slot) {
+ s64 delta = next_offset - cur_offset;
+
+ if (delta >= 0 && delta < BITS_PER_LONG) {
+ mask |= 1ull << delta;
+ continue;
+ }
+
+ /* Backwards visit, careful about overflows! */
+ if (delta > -BITS_PER_LONG && delta < 0 &&
+ (mask << -delta >> -delta) == mask) {
+ cur_offset = next_offset;
+ mask = (mask << -delta) | 1;
+ continue;
+ }
}
- /* Backwards visit, careful about overflows! */
- if (delta > -BITS_PER_LONG && delta < 0 &&
- (mask << -delta >> -delta) == mask) {
- cur_offset = next_offset;
- mask = (mask << -delta) | 1;
- continue;
- }
+ /*
+ * Reset the slot for all the harvested entries that
+ * have been gathered, but not yet fully processed.
+ */
+ kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
}
- kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
+
+ /*
+ * The current slot was reset or this is the first harvested
+ * entry, (re)initialize the metadata.
+ */
cur_slot = next_slot;
cur_offset = next_offset;
mask = 1;
- first_round = false;
}
- kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
+ /*
+ * Perform a final reset if there are harvested entries that haven't
+ * been processed, which is guaranteed if at least one harvested was
+ * found. The loop only performs a reset when the "next" entry can't
+ * be batched with the "current" entry(s), and that reset processes the
+ * _current_ entry(s); i.e. the last harvested entry, a.k.a. next, will
+ * always be left pending.
+ */
+ if (mask)
+ kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
/*
* The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
@@ -163,7 +212,7 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
trace_kvm_dirty_ring_reset(ring);
- return count;
+ return 0;
}
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 2a3ed401ce46..0e8b5277be3b 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -28,7 +28,7 @@
#include <kvm/iodev.h>
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
static struct workqueue_struct *irqfd_cleanup_wq;
@@ -55,6 +55,15 @@ irqfd_inject(struct work_struct *work)
irqfd->gsi, 1, false);
}
+static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
+{
+ struct kvm_kernel_irqfd *irqfd;
+
+ list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
+ srcu_read_lock_held(&resampler->kvm->irq_srcu))
+ eventfd_signal(irqfd->resamplefd);
+}
+
/*
* Since resampler irqfds share an IRQ source ID, we de-assert once
* then notify all of the resampler irqfds using this GSI. We can't
@@ -65,7 +74,6 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kernel_irqfd_resampler *resampler;
struct kvm *kvm;
- struct kvm_kernel_irqfd *irqfd;
int idx;
resampler = container_of(kian,
@@ -76,11 +84,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
resampler->notifier.gsi, 0, false);
idx = srcu_read_lock(&kvm->irq_srcu);
-
- list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
- srcu_read_lock_held(&kvm->irq_srcu))
- eventfd_signal(irqfd->resamplefd, 1);
-
+ irqfd_resampler_notify(resampler);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -93,14 +97,19 @@ irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
- synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
- list_del(&resampler->link);
+ list_del_rcu(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
+ /*
+ * synchronize_srcu_expedited(&kvm->irq_srcu) already called
+ * in kvm_unregister_irq_ack_notifier().
+ */
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
+ } else {
+ synchronize_srcu_expedited(&kvm->irq_srcu);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
@@ -118,7 +127,7 @@ irqfd_shutdown(struct work_struct *work)
u64 cnt;
/* Make sure irqfd has been initialized in assign path. */
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
@@ -140,7 +149,7 @@ irqfd_shutdown(struct work_struct *work)
/*
* It is now safe to release the object's resources
*/
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
irq_bypass_unregister_consumer(&irqfd->consumer);
#endif
eventfd_ctx_put(irqfd->eventfd);
@@ -195,6 +204,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
int ret = 0;
if (flags & EPOLLIN) {
+ /*
+ * WARNING: Do NOT take irqfds.lock in any path except EPOLLHUP,
+ * as KVM holds irqfds.lock when registering the irqfd with the
+ * eventfd.
+ */
u64 cnt;
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
@@ -216,6 +230,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
/* The eventfd is closing, detach from KVM */
unsigned long iflags;
+ /*
+ * Taking irqfds.lock is safe here, as KVM holds a reference to
+ * the eventfd when registering the irqfd, i.e. this path can't
+ * be reached while kvm_irqfd_add() is running.
+ */
spin_lock_irqsave(&kvm->irqfds.lock, iflags);
/*
@@ -236,22 +255,14 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
return ret;
}
-static void
-irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
- poll_table *pt)
-{
- struct kvm_kernel_irqfd *irqfd =
- container_of(pt, struct kvm_kernel_irqfd, pt);
- add_wait_queue_priority(wqh, &irqfd->wait);
-}
-
-/* Must be called under irqfds.lock */
static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
int n_entries;
+ lockdep_assert_held(&kvm->irqfds.lock);
+
n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc);
@@ -265,7 +276,64 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
write_seqcount_end(&irqfd->irq_entry_sc);
}
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+struct kvm_irqfd_pt {
+ struct kvm_kernel_irqfd *irqfd;
+ struct kvm *kvm;
+ poll_table pt;
+ int ret;
+};
+
+static void kvm_irqfd_register(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct kvm_irqfd_pt *p = container_of(pt, struct kvm_irqfd_pt, pt);
+ struct kvm_kernel_irqfd *irqfd = p->irqfd;
+ struct kvm *kvm = p->kvm;
+
+ /*
+ * Note, irqfds.lock protects the irqfd's irq_entry, i.e. its routing,
+ * and irqfds.items. It does NOT protect registering with the eventfd.
+ */
+ spin_lock_irq(&kvm->irqfds.lock);
+
+ /*
+ * Initialize the routing information prior to adding the irqfd to the
+ * eventfd's waitqueue, as irqfd_wakeup() can be invoked as soon as the
+ * irqfd is registered.
+ */
+ irqfd_update(kvm, irqfd);
+
+ /*
+ * Add the irqfd as a priority waiter on the eventfd, with a custom
+ * wake-up handler, so that KVM *and only KVM* is notified whenever the
+ * underlying eventfd is signaled.
+ */
+ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
+
+ /*
+ * Temporarily lie to lockdep about holding irqfds.lock to avoid a
+ * false positive regarding potential deadlock with irqfd_wakeup()
+ * (see irqfd_wakeup() for details).
+ *
+ * Adding to the wait queue will fail if there is already a priority
+ * waiter, i.e. if the eventfd is associated with another irqfd (in any
+ * VM). Note, kvm_irqfd_deassign() waits for all in-flight shutdown
+ * jobs to complete, i.e. ensures the irqfd has been removed from the
+ * eventfd's waitqueue before returning to userspace.
+ */
+ spin_release(&kvm->irqfds.lock.dep_map, _RET_IP_);
+ p->ret = add_wait_queue_priority_exclusive(wqh, &irqfd->wait);
+ spin_acquire(&kvm->irqfds.lock.dep_map, 0, 0, _RET_IP_);
+ if (p->ret)
+ goto out;
+
+ list_add_tail(&irqfd->list, &kvm->irqfds.items);
+
+out:
+ spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
@@ -276,27 +344,20 @@ void __attribute__((weak)) kvm_arch_irq_bypass_start(
{
}
-int __attribute__((weak)) kvm_arch_update_irqfd_routing(
- struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
+void __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
{
- return 0;
-}
-bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
- struct kvm_kernel_irq_routing_entry *old,
- struct kvm_kernel_irq_routing_entry *new)
-{
- return true;
}
#endif
static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct kvm_kernel_irqfd *irqfd, *tmp;
- struct fd f;
+ struct kvm_kernel_irqfd *irqfd;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ struct kvm_irqfd_pt irqfd_pt;
int ret;
__poll_t events;
int idx;
@@ -318,16 +379,16 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
- f = fdget(args->fd);
- if (!f.file) {
+ CLASS(fd, f)(args->fd);
+ if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
- goto fail;
+ goto out;
}
irqfd->eventfd = eventfd;
@@ -369,76 +430,67 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
resampler->notifier.irq_acked = irqfd_resampler_ack;
INIT_LIST_HEAD(&resampler->link);
- list_add(&resampler->link, &kvm->irqfds.resampler_list);
+ list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
kvm_register_irq_ack_notifier(kvm,
&resampler->notifier);
irqfd->resampler = resampler;
}
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
mutex_unlock(&kvm->irqfds.resampler_lock);
}
/*
- * Install our own custom wake-up handling so we are notified via
- * a callback whenever someone signals the underlying eventfd
+ * Set the irqfd routing and add it to KVM's list before registering
+ * the irqfd with the eventfd, so that the routing information is valid
+ * and stays valid, e.g. if there are GSI routing changes, prior to
+ * making the irqfd visible, i.e. before it might be signaled.
+ *
+ * Note, holding SRCU ensures a stable read of routing information, and
+ * also prevents irqfd_shutdown() from freeing the irqfd before it's
+ * fully initialized.
*/
- init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
- init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
-
- spin_lock_irq(&kvm->irqfds.lock);
-
- ret = 0;
- list_for_each_entry(tmp, &kvm->irqfds.items, list) {
- if (irqfd->eventfd != tmp->eventfd)
- continue;
- /* This fd is used for another irq already. */
- ret = -EBUSY;
- spin_unlock_irq(&kvm->irqfds.lock);
- goto fail;
- }
-
idx = srcu_read_lock(&kvm->irq_srcu);
- irqfd_update(kvm, irqfd);
-
- list_add_tail(&irqfd->list, &kvm->irqfds.items);
-
- spin_unlock_irq(&kvm->irqfds.lock);
/*
- * Check if there was an event already pending on the eventfd
- * before we registered, and trigger it as if we didn't miss it.
+ * Register the irqfd with the eventfd by polling on the eventfd, and
+ * simultaneously and the irqfd to KVM's list. If there was en event
+ * pending on the eventfd prior to registering, manually trigger IRQ
+ * injection.
*/
- events = vfs_poll(f.file, &irqfd->pt);
+ irqfd_pt.irqfd = irqfd;
+ irqfd_pt.kvm = kvm;
+ init_poll_funcptr(&irqfd_pt.pt, kvm_irqfd_register);
+
+ events = vfs_poll(fd_file(f), &irqfd_pt.pt);
+
+ ret = irqfd_pt.ret;
+ if (ret)
+ goto fail_poll;
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (kvm_arch_has_irq_bypass()) {
- irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
irqfd->consumer.start = kvm_arch_irq_bypass_start;
- ret = irq_bypass_register_consumer(&irqfd->consumer);
+ ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd);
if (ret)
- pr_info("irq bypass consumer (token %p) registration fails: %d\n",
- irqfd->consumer.token, ret);
+ pr_info("irq bypass consumer (eventfd %p) registration fails: %d\n",
+ irqfd->eventfd, ret);
}
#endif
srcu_read_unlock(&kvm->irq_srcu, idx);
-
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
return 0;
+fail_poll:
+ srcu_read_unlock(&kvm->irq_srcu, idx);
fail:
if (irqfd->resampler)
irqfd_resampler_shutdown(irqfd);
@@ -449,8 +501,6 @@ fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
- fdput(f);
-
out:
kfree(irqfd);
return ret;
@@ -475,7 +525,7 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_irq_has_notifier);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
@@ -515,24 +565,10 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
-#endif
-void
-kvm_eventfd_init(struct kvm *kvm)
-{
-#ifdef CONFIG_HAVE_KVM_IRQFD
- spin_lock_init(&kvm->irqfds.lock);
- INIT_LIST_HEAD(&kvm->irqfds.items);
- INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
- mutex_init(&kvm->irqfds.resampler_lock);
-#endif
- INIT_LIST_HEAD(&kvm->ioeventfds);
-}
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
/*
* shutdown any irqfd's that match fd+gsi
*/
@@ -614,7 +650,7 @@ kvm_irqfd_release(struct kvm *kvm)
/*
* Take note of a change in irq routing.
- * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
+ * Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards.
*/
void kvm_irq_routing_update(struct kvm *kvm)
{
@@ -623,27 +659,47 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
/* Under irqfds.lock, so can read irq_entry safely */
struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
#endif
irqfd_update(kvm, irqfd);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
- if (irqfd->producer &&
- kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
- int ret = kvm_arch_update_irqfd_routing(
- irqfd->kvm, irqfd->producer->irq,
- irqfd->gsi, 1);
- WARN_ON(ret);
- }
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ if (irqfd->producer)
+ kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry);
#endif
}
spin_unlock_irq(&kvm->irqfds.lock);
}
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
+{
+ struct kvm_kernel_irqfd_resampler *resampler;
+ int gsi, idx;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ if (gsi != -1) {
+ list_for_each_entry_srcu(resampler,
+ &kvm->irqfds.resampler_list, link,
+ srcu_read_lock_held(&kvm->irq_srcu)) {
+ if (resampler->notifier.gsi == gsi) {
+ irqfd_resampler_notify(resampler);
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return true;
+ }
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ return false;
+}
+
/*
* create a host-wide workqueue for issuing deferred shutdown requests
* aggregated from all vm* instances. We need our own isolated
@@ -651,7 +707,7 @@ void kvm_irq_routing_update(struct kvm *kvm)
*/
int kvm_irqfd_init(void)
{
- irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
+ irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", WQ_PERCPU, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
@@ -753,7 +809,7 @@ ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
return 0;
}
@@ -856,9 +912,9 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
unlock_fail:
mutex_unlock(&kvm->slots_lock);
+ kfree(p);
fail:
- kfree(p);
eventfd_ctx_put(eventfd);
return ret;
@@ -868,7 +924,7 @@ static int
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_ioeventfd *args)
{
- struct _ioeventfd *p, *tmp;
+ struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus;
int ret = -ENOENT;
@@ -882,8 +938,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
mutex_lock(&kvm->slots_lock);
- list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
-
+ list_for_each_entry(p, &kvm->ioeventfds, list) {
if (p->bus_idx != bus_idx ||
p->eventfd != eventfd ||
p->addr != args->addr ||
@@ -898,7 +953,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
bus = kvm_get_bus(kvm, bus_idx);
if (bus)
bus->ioeventfd_count--;
- ioeventfd_release(p);
ret = 0;
break;
}
@@ -981,3 +1035,15 @@ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return kvm_assign_ioeventfd(kvm, args);
}
+
+void
+kvm_eventfd_init(struct kvm *kvm)
+{
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+ spin_lock_init(&kvm->irqfds.lock);
+ INIT_LIST_HEAD(&kvm->irqfds.items);
+ INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
+ mutex_init(&kvm->irqfds.resampler_lock);
+#endif
+ INIT_LIST_HEAD(&kvm->ioeventfds);
+}
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
new file mode 100644
index 000000000000..fdaea3422c30
--- /dev/null
+++ b/virt/kvm/guest_memfd.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/anon_inodes.h>
+#include <linux/backing-dev.h>
+#include <linux/falloc.h>
+#include <linux/fs.h>
+#include <linux/kvm_host.h>
+#include <linux/mempolicy.h>
+#include <linux/pseudo_fs.h>
+#include <linux/pagemap.h>
+
+#include "kvm_mm.h"
+
+static struct vfsmount *kvm_gmem_mnt;
+
+/*
+ * A guest_memfd instance can be associated multiple VMs, each with its own
+ * "view" of the underlying physical memory.
+ *
+ * The gmem's inode is effectively the raw underlying physical storage, and is
+ * used to track properties of the physical memory, while each gmem file is
+ * effectively a single VM's view of that storage, and is used to track assets
+ * specific to its associated VM, e.g. memslots=>gmem bindings.
+ */
+struct gmem_file {
+ struct kvm *kvm;
+ struct xarray bindings;
+ struct list_head entry;
+};
+
+struct gmem_inode {
+ struct shared_policy policy;
+ struct inode vfs_inode;
+
+ u64 flags;
+};
+
+static __always_inline struct gmem_inode *GMEM_I(struct inode *inode)
+{
+ return container_of(inode, struct gmem_inode, vfs_inode);
+}
+
+#define kvm_gmem_for_each_file(f, mapping) \
+ list_for_each_entry(f, &(mapping)->i_private_list, entry)
+
+/**
+ * folio_file_pfn - like folio_file_page, but return a pfn.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Return: The pfn for this index.
+ */
+static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
+{
+ return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
+}
+
+static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return gfn - slot->base_gfn + slot->gmem.pgoff;
+}
+
+static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
+ pgoff_t index, struct folio *folio)
+{
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
+ kvm_pfn_t pfn = folio_file_pfn(folio, index);
+ gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
+ int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
+ if (rc) {
+ pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
+ index, gfn, pfn, rc);
+ return rc;
+ }
+#endif
+
+ return 0;
+}
+
+static inline void kvm_gmem_mark_prepared(struct folio *folio)
+{
+ folio_mark_uptodate(folio);
+}
+
+/*
+ * Process @folio, which contains @gfn, so that the guest can use it.
+ * The folio must be locked and the gfn must be contained in @slot.
+ * On successful return the guest sees a zero page so as to avoid
+ * leaking host data and the up-to-date flag is set.
+ */
+static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, struct folio *folio)
+{
+ unsigned long nr_pages, i;
+ pgoff_t index;
+ int r;
+
+ nr_pages = folio_nr_pages(folio);
+ for (i = 0; i < nr_pages; i++)
+ clear_highpage(folio_page(folio, i));
+
+ /*
+ * Preparing huge folios should always be safe, since it should
+ * be possible to split them later if needed.
+ *
+ * Right now the folio order is always going to be zero, but the
+ * code is ready for huge folios. The only assumption is that
+ * the base pgoff of memslots is naturally aligned with the
+ * requested page order, ensuring that huge folios can also use
+ * huge page table entries for GPA->HPA mapping.
+ *
+ * The order will be passed when creating the guest_memfd, and
+ * checked when creating memslots.
+ */
+ WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio)));
+ index = kvm_gmem_get_index(slot, gfn);
+ index = ALIGN_DOWN(index, folio_nr_pages(folio));
+ r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
+ if (!r)
+ kvm_gmem_mark_prepared(folio);
+
+ return r;
+}
+
+/*
+ * Returns a locked folio on success. The caller is responsible for
+ * setting the up-to-date flag before the memory is mapped into the guest.
+ * There is no backing storage for the memory, so the folio will remain
+ * up-to-date until it's removed.
+ *
+ * Ignore accessed, referenced, and dirty flags. The memory is
+ * unevictable and there is no storage to write back to.
+ */
+static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+{
+ /* TODO: Support huge pages. */
+ struct mempolicy *policy;
+ struct folio *folio;
+
+ /*
+ * Fast-path: See if folio is already present in mapping to avoid
+ * policy_lookup.
+ */
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED, 0);
+ if (!IS_ERR(folio))
+ return folio;
+
+ policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index);
+ folio = __filemap_get_folio_mpol(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(inode->i_mapping), policy);
+ mpol_cond_put(policy);
+
+ return folio;
+}
+
+static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode)
+{
+ if (GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_INIT_SHARED)
+ return KVM_FILTER_SHARED;
+
+ return KVM_FILTER_PRIVATE;
+}
+
+static void __kvm_gmem_invalidate_begin(struct gmem_file *f, pgoff_t start,
+ pgoff_t end,
+ enum kvm_gfn_range_filter attr_filter)
+{
+ bool flush = false, found_memslot = false;
+ struct kvm_memory_slot *slot;
+ struct kvm *kvm = f->kvm;
+ unsigned long index;
+
+ xa_for_each_range(&f->bindings, index, slot, start, end - 1) {
+ pgoff_t pgoff = slot->gmem.pgoff;
+
+ struct kvm_gfn_range gfn_range = {
+ .start = slot->base_gfn + max(pgoff, start) - pgoff,
+ .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
+ .slot = slot,
+ .may_block = true,
+ .attr_filter = attr_filter,
+ };
+
+ if (!found_memslot) {
+ found_memslot = true;
+
+ KVM_MMU_LOCK(kvm);
+ kvm_mmu_invalidate_begin(kvm);
+ }
+
+ flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ if (found_memslot)
+ KVM_MMU_UNLOCK(kvm);
+}
+
+static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start,
+ pgoff_t end)
+{
+ enum kvm_gfn_range_filter attr_filter;
+ struct gmem_file *f;
+
+ attr_filter = kvm_gmem_get_invalidate_filter(inode);
+
+ kvm_gmem_for_each_file(f, inode->i_mapping)
+ __kvm_gmem_invalidate_begin(f, start, end, attr_filter);
+}
+
+static void __kvm_gmem_invalidate_end(struct gmem_file *f, pgoff_t start,
+ pgoff_t end)
+{
+ struct kvm *kvm = f->kvm;
+
+ if (xa_find(&f->bindings, &start, end - 1, XA_PRESENT)) {
+ KVM_MMU_LOCK(kvm);
+ kvm_mmu_invalidate_end(kvm);
+ KVM_MMU_UNLOCK(kvm);
+ }
+}
+
+static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start,
+ pgoff_t end)
+{
+ struct gmem_file *f;
+
+ kvm_gmem_for_each_file(f, inode->i_mapping)
+ __kvm_gmem_invalidate_end(f, start, end);
+}
+
+static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
+
+ /*
+ * Bindings must be stable across invalidation to ensure the start+end
+ * are balanced.
+ */
+ filemap_invalidate_lock(inode->i_mapping);
+
+ kvm_gmem_invalidate_begin(inode, start, end);
+
+ truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
+
+ kvm_gmem_invalidate_end(inode, start, end);
+
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ return 0;
+}
+
+static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
+{
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t start, index, end;
+ int r;
+
+ /* Dedicated guest is immutable by default. */
+ if (offset + len > i_size_read(inode))
+ return -EINVAL;
+
+ filemap_invalidate_lock_shared(mapping);
+
+ start = offset >> PAGE_SHIFT;
+ end = (offset + len) >> PAGE_SHIFT;
+
+ r = 0;
+ for (index = start; index < end; ) {
+ struct folio *folio;
+
+ if (signal_pending(current)) {
+ r = -EINTR;
+ break;
+ }
+
+ folio = kvm_gmem_get_folio(inode, index);
+ if (IS_ERR(folio)) {
+ r = PTR_ERR(folio);
+ break;
+ }
+
+ index = folio_next_index(folio);
+
+ folio_unlock(folio);
+ folio_put(folio);
+
+ /* 64-bit only, wrapping the index should be impossible. */
+ if (WARN_ON_ONCE(!index))
+ break;
+
+ cond_resched();
+ }
+
+ filemap_invalidate_unlock_shared(mapping);
+
+ return r;
+}
+
+static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len)
+{
+ int ret;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ return -EOPNOTSUPP;
+
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ return -EOPNOTSUPP;
+
+ if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
+ return -EINVAL;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
+ else
+ ret = kvm_gmem_allocate(file_inode(file), offset, len);
+
+ if (!ret)
+ file_modified(file);
+ return ret;
+}
+
+static int kvm_gmem_release(struct inode *inode, struct file *file)
+{
+ struct gmem_file *f = file->private_data;
+ struct kvm_memory_slot *slot;
+ struct kvm *kvm = f->kvm;
+ unsigned long index;
+
+ /*
+ * Prevent concurrent attempts to *unbind* a memslot. This is the last
+ * reference to the file and thus no new bindings can be created, but
+ * dereferencing the slot for existing bindings needs to be protected
+ * against memslot updates, specifically so that unbind doesn't race
+ * and free the memslot (kvm_gmem_get_file() will return NULL).
+ *
+ * Since .release is called only when the reference count is zero,
+ * after which file_ref_get() and get_file_active() fail,
+ * kvm_gmem_get_pfn() cannot be using the file concurrently.
+ * file_ref_put() provides a full barrier, and get_file_active() the
+ * matching acquire barrier.
+ */
+ mutex_lock(&kvm->slots_lock);
+
+ filemap_invalidate_lock(inode->i_mapping);
+
+ xa_for_each(&f->bindings, index, slot)
+ WRITE_ONCE(slot->gmem.file, NULL);
+
+ /*
+ * All in-flight operations are gone and new bindings can be created.
+ * Zap all SPTEs pointed at by this file. Do not free the backing
+ * memory, as its lifetime is associated with the inode, not the file.
+ */
+ __kvm_gmem_invalidate_begin(f, 0, -1ul,
+ kvm_gmem_get_invalidate_filter(inode));
+ __kvm_gmem_invalidate_end(f, 0, -1ul);
+
+ list_del(&f->entry);
+
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ mutex_unlock(&kvm->slots_lock);
+
+ xa_destroy(&f->bindings);
+ kfree(f);
+
+ kvm_put_kvm(kvm);
+
+ return 0;
+}
+
+static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
+{
+ /*
+ * Do not return slot->gmem.file if it has already been closed;
+ * there might be some time between the last fput() and when
+ * kvm_gmem_release() clears slot->gmem.file.
+ */
+ return get_file_active(&slot->gmem.file);
+}
+
+DEFINE_CLASS(gmem_get_file, struct file *, if (_T) fput(_T),
+ kvm_gmem_get_file(slot), struct kvm_memory_slot *slot);
+
+static bool kvm_gmem_supports_mmap(struct inode *inode)
+{
+ return GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_MMAP;
+}
+
+static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct folio *folio;
+ vm_fault_t ret = VM_FAULT_LOCKED;
+
+ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
+ return VM_FAULT_SIGBUS;
+
+ if (!(GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_INIT_SHARED))
+ return VM_FAULT_SIGBUS;
+
+ folio = kvm_gmem_get_folio(inode, vmf->pgoff);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EAGAIN)
+ return VM_FAULT_RETRY;
+
+ return vmf_error(PTR_ERR(folio));
+ }
+
+ if (WARN_ON_ONCE(folio_test_large(folio))) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_folio;
+ }
+
+ if (!folio_test_uptodate(folio)) {
+ clear_highpage(folio_page(folio, 0));
+ kvm_gmem_mark_prepared(folio);
+ }
+
+ vmf->page = folio_file_page(folio, vmf->pgoff);
+
+out_folio:
+ if (ret != VM_FAULT_LOCKED) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_NUMA
+static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+
+ return mpol_set_shared_policy(&GMEM_I(inode)->policy, vma, mpol);
+}
+
+static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t *pgoff)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+
+ *pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+
+ /*
+ * Return the memory policy for this index, or NULL if none is set.
+ *
+ * Returning NULL, e.g. instead of the current task's memory policy, is
+ * important for the .get_policy kernel ABI: it indicates that no
+ * explicit policy has been set via mbind() for this memory. The caller
+ * can then replace NULL with the default memory policy instead of the
+ * current task's memory policy.
+ */
+ return mpol_shared_policy_lookup(&GMEM_I(inode)->policy, *pgoff);
+}
+#endif /* CONFIG_NUMA */
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+ .fault = kvm_gmem_fault_user_mapping,
+#ifdef CONFIG_NUMA
+ .get_policy = kvm_gmem_get_policy,
+ .set_policy = kvm_gmem_set_policy,
+#endif
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (!kvm_gmem_supports_mmap(file_inode(file)))
+ return -ENODEV;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) !=
+ (VM_SHARED | VM_MAYSHARE)) {
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &kvm_gmem_vm_ops;
+
+ return 0;
+}
+
+static struct file_operations kvm_gmem_fops = {
+ .mmap = kvm_gmem_mmap,
+ .open = generic_file_open,
+ .release = kvm_gmem_release,
+ .fallocate = kvm_gmem_fallocate,
+};
+
+static int kvm_gmem_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
+{
+ pgoff_t start, end;
+
+ filemap_invalidate_lock_shared(mapping);
+
+ start = folio->index;
+ end = start + folio_nr_pages(folio);
+
+ kvm_gmem_invalidate_begin(mapping->host, start, end);
+
+ /*
+ * Do not truncate the range, what action is taken in response to the
+ * error is userspace's decision (assuming the architecture supports
+ * gracefully handling memory errors). If/when the guest attempts to
+ * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
+ * at which point KVM can either terminate the VM or propagate the
+ * error to userspace.
+ */
+
+ kvm_gmem_invalidate_end(mapping->host, start, end);
+
+ filemap_invalidate_unlock_shared(mapping);
+
+ return MF_DELAYED;
+}
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+static void kvm_gmem_free_folio(struct folio *folio)
+{
+ struct page *page = folio_page(folio, 0);
+ kvm_pfn_t pfn = page_to_pfn(page);
+ int order = folio_order(folio);
+
+ kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
+}
+#endif
+
+static const struct address_space_operations kvm_gmem_aops = {
+ .dirty_folio = noop_dirty_folio,
+ .migrate_folio = kvm_gmem_migrate_folio,
+ .error_remove_folio = kvm_gmem_error_folio,
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+ .free_folio = kvm_gmem_free_folio,
+#endif
+};
+
+static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EINVAL;
+}
+static const struct inode_operations kvm_gmem_iops = {
+ .setattr = kvm_gmem_setattr,
+};
+
+bool __weak kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
+{
+ return true;
+}
+
+static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
+{
+ static const char *name = "[kvm-gmem]";
+ struct gmem_file *f;
+ struct inode *inode;
+ struct file *file;
+ int fd, err;
+
+ fd = get_unused_fd_flags(0);
+ if (fd < 0)
+ return fd;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_fd;
+ }
+
+ /* __fput() will take care of fops_put(). */
+ if (!fops_get(&kvm_gmem_fops)) {
+ err = -ENOENT;
+ goto err_gmem;
+ }
+
+ inode = anon_inode_make_secure_inode(kvm_gmem_mnt->mnt_sb, name, NULL);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto err_fops;
+ }
+
+ inode->i_op = &kvm_gmem_iops;
+ inode->i_mapping->a_ops = &kvm_gmem_aops;
+ inode->i_mode |= S_IFREG;
+ inode->i_size = size;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+ mapping_set_inaccessible(inode->i_mapping);
+ /* Unmovable mappings are supposed to be marked unevictable as well. */
+ WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
+
+ GMEM_I(inode)->flags = flags;
+
+ file = alloc_file_pseudo(inode, kvm_gmem_mnt, name, O_RDWR, &kvm_gmem_fops);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto err_inode;
+ }
+
+ file->f_flags |= O_LARGEFILE;
+ file->private_data = f;
+
+ kvm_get_kvm(kvm);
+ f->kvm = kvm;
+ xa_init(&f->bindings);
+ list_add(&f->entry, &inode->i_mapping->i_private_list);
+
+ fd_install(fd, file);
+ return fd;
+
+err_inode:
+ iput(inode);
+err_fops:
+ fops_put(&kvm_gmem_fops);
+err_gmem:
+ kfree(f);
+err_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
+{
+ loff_t size = args->size;
+ u64 flags = args->flags;
+
+ if (flags & ~kvm_gmem_get_supported_flags(kvm))
+ return -EINVAL;
+
+ if (size <= 0 || !PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ return __kvm_gmem_create(kvm, size, flags);
+}
+
+int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned int fd, loff_t offset)
+{
+ loff_t size = slot->npages << PAGE_SHIFT;
+ unsigned long start, end;
+ struct gmem_file *f;
+ struct inode *inode;
+ struct file *file;
+ int r = -EINVAL;
+
+ BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
+
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+
+ if (file->f_op != &kvm_gmem_fops)
+ goto err;
+
+ f = file->private_data;
+ if (f->kvm != kvm)
+ goto err;
+
+ inode = file_inode(file);
+
+ if (offset < 0 || !PAGE_ALIGNED(offset) ||
+ offset + size > i_size_read(inode))
+ goto err;
+
+ filemap_invalidate_lock(inode->i_mapping);
+
+ start = offset >> PAGE_SHIFT;
+ end = start + slot->npages;
+
+ if (!xa_empty(&f->bindings) &&
+ xa_find(&f->bindings, &start, end - 1, XA_PRESENT)) {
+ filemap_invalidate_unlock(inode->i_mapping);
+ goto err;
+ }
+
+ /*
+ * memslots of flag KVM_MEM_GUEST_MEMFD are immutable to change, so
+ * kvm_gmem_bind() must occur on a new memslot. Because the memslot
+ * is not visible yet, kvm_gmem_get_pfn() is guaranteed to see the file.
+ */
+ WRITE_ONCE(slot->gmem.file, file);
+ slot->gmem.pgoff = start;
+ if (kvm_gmem_supports_mmap(inode))
+ slot->flags |= KVM_MEMSLOT_GMEM_ONLY;
+
+ xa_store_range(&f->bindings, start, end - 1, slot, GFP_KERNEL);
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ /*
+ * Drop the reference to the file, even on success. The file pins KVM,
+ * not the other way 'round. Active bindings are invalidated if the
+ * file is closed before memslots are destroyed.
+ */
+ r = 0;
+err:
+ fput(file);
+ return r;
+}
+
+static void __kvm_gmem_unbind(struct kvm_memory_slot *slot, struct gmem_file *f)
+{
+ unsigned long start = slot->gmem.pgoff;
+ unsigned long end = start + slot->npages;
+
+ xa_store_range(&f->bindings, start, end - 1, NULL, GFP_KERNEL);
+
+ /*
+ * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
+ * cannot see this memslot.
+ */
+ WRITE_ONCE(slot->gmem.file, NULL);
+}
+
+void kvm_gmem_unbind(struct kvm_memory_slot *slot)
+{
+ /*
+ * Nothing to do if the underlying file was _already_ closed, as
+ * kvm_gmem_release() invalidates and nullifies all bindings.
+ */
+ if (!slot->gmem.file)
+ return;
+
+ CLASS(gmem_get_file, file)(slot);
+
+ /*
+ * However, if the file is _being_ closed, then the bindings need to be
+ * removed as kvm_gmem_release() might not run until after the memslot
+ * is freed. Note, modifying the bindings is safe even though the file
+ * is dying as kvm_gmem_release() nullifies slot->gmem.file under
+ * slots_lock, and only puts its reference to KVM after destroying all
+ * bindings. I.e. reaching this point means kvm_gmem_release() hasn't
+ * yet destroyed the bindings or freed the gmem_file, and can't do so
+ * until the caller drops slots_lock.
+ */
+ if (!file) {
+ __kvm_gmem_unbind(slot, slot->gmem.file->private_data);
+ return;
+ }
+
+ filemap_invalidate_lock(file->f_mapping);
+ __kvm_gmem_unbind(slot, file->private_data);
+ filemap_invalidate_unlock(file->f_mapping);
+}
+
+/* Returns a locked folio on success. */
+static struct folio *__kvm_gmem_get_pfn(struct file *file,
+ struct kvm_memory_slot *slot,
+ pgoff_t index, kvm_pfn_t *pfn,
+ bool *is_prepared, int *max_order)
+{
+ struct file *slot_file = READ_ONCE(slot->gmem.file);
+ struct gmem_file *f = file->private_data;
+ struct folio *folio;
+
+ if (file != slot_file) {
+ WARN_ON_ONCE(slot_file);
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (xa_load(&f->bindings, index) != slot) {
+ WARN_ON_ONCE(xa_load(&f->bindings, index));
+ return ERR_PTR(-EIO);
+ }
+
+ folio = kvm_gmem_get_folio(file_inode(file), index);
+ if (IS_ERR(folio))
+ return folio;
+
+ if (folio_test_hwpoison(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ERR_PTR(-EHWPOISON);
+ }
+
+ *pfn = folio_file_pfn(folio, index);
+ if (max_order)
+ *max_order = 0;
+
+ *is_prepared = folio_test_uptodate(folio);
+ return folio;
+}
+
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+ int *max_order)
+{
+ pgoff_t index = kvm_gmem_get_index(slot, gfn);
+ struct folio *folio;
+ bool is_prepared = false;
+ int r = 0;
+
+ CLASS(gmem_get_file, file)(slot);
+ if (!file)
+ return -EFAULT;
+
+ folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (!is_prepared)
+ r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
+
+ folio_unlock(folio);
+
+ if (!r)
+ *page = folio_file_page(folio, index);
+ else
+ folio_put(folio);
+
+ return r;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
+ kvm_gmem_populate_cb post_populate, void *opaque)
+{
+ struct kvm_memory_slot *slot;
+ void __user *p;
+
+ int ret = 0, max_order;
+ long i;
+
+ lockdep_assert_held(&kvm->slots_lock);
+
+ if (WARN_ON_ONCE(npages <= 0))
+ return -EINVAL;
+
+ slot = gfn_to_memslot(kvm, start_gfn);
+ if (!kvm_slot_has_gmem(slot))
+ return -EINVAL;
+
+ CLASS(gmem_get_file, file)(slot);
+ if (!file)
+ return -EFAULT;
+
+ filemap_invalidate_lock(file->f_mapping);
+
+ npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
+ for (i = 0; i < npages; i += (1 << max_order)) {
+ struct folio *folio;
+ gfn_t gfn = start_gfn + i;
+ pgoff_t index = kvm_gmem_get_index(slot, gfn);
+ bool is_prepared = false;
+ kvm_pfn_t pfn;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ break;
+ }
+
+ if (is_prepared) {
+ folio_unlock(folio);
+ folio_put(folio);
+ ret = -EEXIST;
+ break;
+ }
+
+ folio_unlock(folio);
+ WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
+ (npages - i) < (1 << max_order));
+
+ ret = -EINVAL;
+ while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
+ KVM_MEMORY_ATTRIBUTE_PRIVATE,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
+ if (!max_order)
+ goto put_folio_and_exit;
+ max_order--;
+ }
+
+ p = src ? src + i * PAGE_SIZE : NULL;
+ ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
+ if (!ret)
+ kvm_gmem_mark_prepared(folio);
+
+put_folio_and_exit:
+ folio_put(folio);
+ if (ret)
+ break;
+ }
+
+ filemap_invalidate_unlock(file->f_mapping);
+
+ return ret && !i ? ret : i;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
+#endif
+
+static struct kmem_cache *kvm_gmem_inode_cachep;
+
+static void kvm_gmem_init_inode_once(void *__gi)
+{
+ struct gmem_inode *gi = __gi;
+
+ /*
+ * Note! Don't initialize the inode with anything specific to the
+ * guest_memfd instance, or that might be specific to how the inode is
+ * used (from the VFS-layer's perspective). This hook is called only
+ * during the initial slab allocation, i.e. only fields/state that are
+ * idempotent across _all_ use of the inode _object_ can be initialized
+ * at this time!
+ */
+ inode_init_once(&gi->vfs_inode);
+}
+
+static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
+{
+ struct gmem_inode *gi;
+
+ gi = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL);
+ if (!gi)
+ return NULL;
+
+ mpol_shared_policy_init(&gi->policy, NULL);
+
+ gi->flags = 0;
+ return &gi->vfs_inode;
+}
+
+static void kvm_gmem_destroy_inode(struct inode *inode)
+{
+ mpol_free_shared_policy(&GMEM_I(inode)->policy);
+}
+
+static void kvm_gmem_free_inode(struct inode *inode)
+{
+ kmem_cache_free(kvm_gmem_inode_cachep, GMEM_I(inode));
+}
+
+static const struct super_operations kvm_gmem_super_operations = {
+ .statfs = simple_statfs,
+ .alloc_inode = kvm_gmem_alloc_inode,
+ .destroy_inode = kvm_gmem_destroy_inode,
+ .free_inode = kvm_gmem_free_inode,
+};
+
+static int kvm_gmem_init_fs_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx;
+
+ if (!init_pseudo(fc, GUEST_MEMFD_MAGIC))
+ return -ENOMEM;
+
+ fc->s_iflags |= SB_I_NOEXEC;
+ fc->s_iflags |= SB_I_NODEV;
+ ctx = fc->fs_private;
+ ctx->ops = &kvm_gmem_super_operations;
+
+ return 0;
+}
+
+static struct file_system_type kvm_gmem_fs = {
+ .name = "guest_memfd",
+ .init_fs_context = kvm_gmem_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int kvm_gmem_init_mount(void)
+{
+ kvm_gmem_mnt = kern_mount(&kvm_gmem_fs);
+
+ if (IS_ERR(kvm_gmem_mnt))
+ return PTR_ERR(kvm_gmem_mnt);
+
+ kvm_gmem_mnt->mnt_flags |= MNT_NOEXEC;
+ return 0;
+}
+
+int kvm_gmem_init(struct module *module)
+{
+ struct kmem_cache_args args = {
+ .align = 0,
+ .ctor = kvm_gmem_init_inode_once,
+ };
+ int ret;
+
+ kvm_gmem_fops.owner = module;
+ kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache",
+ sizeof(struct gmem_inode),
+ &args, SLAB_ACCOUNT);
+ if (!kvm_gmem_inode_cachep)
+ return -ENOMEM;
+
+ ret = kvm_gmem_init_mount();
+ if (ret) {
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
+ return ret;
+ }
+ return 0;
+}
+
+void kvm_gmem_exit(void)
+{
+ kern_unmount(kvm_gmem_mnt);
+ kvm_gmem_mnt = NULL;
+ rcu_barrier();
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
+}
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 1e567d1f6d3d..6ccabfd32287 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -222,8 +222,6 @@ int kvm_set_irq_routing(struct kvm *kvm,
kvm_arch_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock);
- kvm_arch_post_irq_routing_update(kvm);
-
synchronize_srcu_expedited(&kvm->irq_srcu);
new = old;
@@ -237,3 +235,27 @@ out:
return r;
}
+
+/*
+ * Allocate empty IRQ routing by default so that additional setup isn't needed
+ * when userspace-driven IRQ routing is activated, and so that kvm->irq_routing
+ * is guaranteed to be non-NULL.
+ */
+int kvm_init_irq_routing(struct kvm *kvm)
+{
+ struct kvm_irq_routing_table *new;
+ int chip_size;
+
+ new = kzalloc(struct_size(new, map, 1), GFP_KERNEL_ACCOUNT);
+ if (!new)
+ return -ENOMEM;
+
+ new->nr_rt_entries = 1;
+
+ chip_size = sizeof(int) * KVM_NR_IRQCHIPS * KVM_IRQCHIP_NUM_PINS;
+ memset(new->chip, -1, chip_size);
+
+ RCU_INIT_POINTER(kvm->irq_routing, new);
+
+ return 0;
+}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9c60384b5ae0..5fcd401a5897 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Kernel-based Virtual Machine driver for Linux
- *
- * This module enables machines with Intel VT-x extensions to run virtual
- * machines without emulation or binary translation.
+ * Kernel-based Virtual Machine (KVM) Hypervisor
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
@@ -52,6 +49,7 @@
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
+#include <linux/rseq.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -62,36 +60,47 @@
#include "kvm_mm.h"
#include "vfio.h"
+#include <trace/events/ipi.h>
+
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
#include <linux/kvm_dirty_ring.h>
+
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
MODULE_AUTHOR("Qumranet");
+MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
-/* Default resets per-vcpu halt_poll_ns . */
-unsigned int halt_poll_ns_shrink;
+/* Default halves per-vcpu halt_poll_ns. */
+unsigned int halt_poll_ns_shrink = 2;
module_param(halt_poll_ns_shrink, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
+
+/*
+ * Allow direct access (from KVM or the CPU) without MMU notifier protection
+ * to unpinned pages.
+ */
+static bool allow_unsafe_mappings;
+module_param(allow_unsafe_mappings, bool, 0444);
/*
* Ordering of locks:
@@ -100,25 +109,17 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
*/
DEFINE_MUTEX(kvm_lock);
-static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
-static cpumask_var_t cpus_hardware_enabled;
-static int kvm_usage_count;
-static atomic_t hardware_enable_failed;
-
static struct kmem_cache *kvm_vcpu_cache;
static __read_mostly struct preempt_ops kvm_preempt_ops;
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
-struct dentry *kvm_debugfs_dir;
-EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
+static struct dentry *kvm_debugfs_dir;
static const struct file_operations stat_fops_per_vm;
-static struct file_operations kvm_chardev_ops;
-
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
@@ -143,14 +144,9 @@ static int kvm_no_compat_open(struct inode *inode, struct file *file)
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
.open = kvm_no_compat_open
#endif
-static int hardware_enable_all(void);
-static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-__visible bool kvm_rebooting;
-EXPORT_SYMBOL_GPL(kvm_rebooting);
-
#define KVM_EVENT_CREATE_VM 0
#define KVM_EVENT_DESTROY_VM 1
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
@@ -159,61 +155,10 @@ static unsigned long long kvm_active_vms;
static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
-__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
-{
-}
-
__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{
}
-bool kvm_is_zone_device_page(struct page *page)
-{
- /*
- * The metadata used by is_zone_device_page() to determine whether or
- * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
- * the device has been pinned, e.g. by get_user_pages(). WARN if the
- * page_count() is zero to help detect bad usage of this helper.
- */
- if (WARN_ON_ONCE(!page_count(page)))
- return false;
-
- return is_zone_device_page(page);
-}
-
-/*
- * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
- * page, NULL otherwise. Note, the list of refcounted PG_reserved page types
- * is likely incomplete, it has been compiled purely through people wanting to
- * back guest with a certain type of memory and encountering issues.
- */
-struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
-{
- struct page *page;
-
- if (!pfn_valid(pfn))
- return NULL;
-
- page = pfn_to_page(pfn);
- if (!PageReserved(page))
- return page;
-
- /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
- if (is_zero_pfn(pfn))
- return page;
-
- /*
- * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
- * perspective they are "normal" pages, albeit with slightly different
- * usage rules.
- */
- if (kvm_is_zone_device_page(page))
- return page;
-
- return NULL;
-}
-
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
@@ -226,7 +171,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
-EXPORT_SYMBOL_GPL(vcpu_load);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
@@ -236,7 +181,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_running_vcpu, NULL);
preempt_enable();
}
-EXPORT_SYMBOL_GPL(vcpu_put);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
@@ -323,8 +268,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
return called;
}
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except)
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
struct kvm_vcpu *vcpu;
struct cpumask *cpus;
@@ -337,25 +281,16 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
cpumask_clear(cpus);
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu == except)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_vcpu_request(vcpu, req, cpus, me);
- }
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
-{
- return kvm_make_all_cpus_request_except(kvm, req, NULL);
-}
-EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
-
-#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
++kvm->stat.generic.remote_tlb_flush_requests;
@@ -371,12 +306,38 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
* kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
* barrier here.
*/
- if (!kvm_arch_flush_remote_tlb(kvm)
+ if (!kvm_arch_flush_remote_tlbs(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
}
-EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
-#endif
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
+
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
+{
+ if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
+ return;
+
+ /*
+ * Fall back to a flushing entire TLBs if the architecture range-based
+ * TLB invalidation is unsupported or can't be performed for whatever
+ * reason.
+ */
+ kvm_flush_remote_tlbs(kvm);
+}
+
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ /*
+ * All current use cases for flushing the TLBs for a specific memslot
+ * are related to dirty logging, and many do the TLB flush out of
+ * mmu_lock. The interaction between the various operations on memslot
+ * must be serialized by slots_lock to ensure the TLB flush from one
+ * operation is observed by any other operation on the same memslot.
+ */
+ lockdep_assert_held(&kvm->slots_lock);
+ kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
+}
static void kvm_flush_shadow_all(struct kvm *kvm)
{
@@ -388,12 +349,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{
+ void *page;
+
gfp_flags |= mc->gfp_zero;
if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
- else
- return (void *)__get_free_page(gfp_flags);
+
+ page = (void *)__get_free_page(gfp_flags);
+ if (page && mc->init_value)
+ memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
+ return page;
}
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
@@ -408,7 +374,14 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
if (WARN_ON_ONCE(!capacity))
return -EIO;
- mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
+ /*
+ * Custom init values can be used only for page allocations,
+ * and obviously conflict with __GFP_ZERO.
+ */
+ if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
+ return -EIO;
+
+ mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
if (!mc->objects)
return -ENOMEM;
@@ -473,6 +446,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
+ rwlock_init(&vcpu->pid_lock);
#ifndef __KVM_HAVE_ARCH_WQP
rcuwait_init(&vcpu->wait);
#endif
@@ -500,7 +474,7 @@ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
* the vcpu->pid pointer, and at destruction time all file descriptors
* are already gone.
*/
- put_pid(rcu_dereference_protected(vcpu->pid, 1));
+ put_pid(vcpu->pid);
free_page((unsigned long)vcpu->run);
kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -514,49 +488,59 @@ void kvm_destroy_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu);
xa_erase(&kvm->vcpu_array, i);
+
+ /*
+ * Assert that the vCPU isn't visible in any way, to ensure KVM
+ * doesn't trigger a use-after-free if destroying vCPUs results
+ * in VM-wide request, e.g. to flush remote TLBs when tearing
+ * down MMUs, or to mark the VM dead if a KVM_BUG_ON() fires.
+ */
+ WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i));
}
atomic_set(&kvm->online_vcpus, 0);
}
-EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
return container_of(mn, struct kvm, mmu_notifier);
}
-static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
-
-typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
- unsigned long end);
+typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
-typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+typedef void (*on_lock_fn_t)(struct kvm *kvm);
-struct kvm_hva_range {
- unsigned long start;
- unsigned long end;
- pte_t pte;
- hva_handler_t handler;
+struct kvm_mmu_notifier_range {
+ /*
+ * 64-bit addresses, as KVM notifiers can operate on host virtual
+ * addresses (unsigned long) and guest physical addresses (64-bit).
+ */
+ u64 start;
+ u64 end;
+ union kvm_mmu_notifier_arg arg;
+ gfn_handler_t handler;
on_lock_fn_t on_lock;
- on_unlock_fn_t on_unlock;
bool flush_on_ret;
bool may_block;
+ bool lockless;
};
/*
+ * The inner-most helper returns a tuple containing the return value from the
+ * arch- and action-specific handler, plus a flag indicating whether or not at
+ * least one memslot was found, i.e. if the handler found guest memory.
+ *
+ * Note, most notifiers are averse to booleans, so even though KVM tracks the
+ * return from arch code as a bool, outer helpers will cast it to an int. :-(
+ */
+typedef struct kvm_mmu_notifier_return {
+ bool ret;
+ bool found_memslot;
+} kvm_mn_ret_t;
+
+/*
* Use a dedicated stub instead of NULL to indicate that there is no callback
* function/handler. The compiler technically can't guarantee that a real
* function will have a non-zero address, and so it will generate code to
@@ -575,26 +559,33 @@ static void kvm_null_fn(void)
node; \
node = interval_tree_iter_next(node, start, last)) \
-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
- const struct kvm_hva_range *range)
+static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
+ const struct kvm_mmu_notifier_range *range)
{
- bool ret = false, locked = false;
+ struct kvm_mmu_notifier_return r = {
+ .ret = false,
+ .found_memslot = false,
+ };
struct kvm_gfn_range gfn_range;
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
int i, idx;
if (WARN_ON_ONCE(range->end <= range->start))
- return 0;
+ return r;
/* A null handler is allowed if and only if on_lock() is provided. */
if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
IS_KVM_NULL_FN(range->handler)))
- return 0;
+ return r;
+
+ /* on_lock will never be called for lockless walks */
+ if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
+ return r;
idx = srcu_read_lock(&kvm->srcu);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
struct interval_tree_node *node;
slots = __kvm_memslots(kvm, i);
@@ -603,9 +594,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
unsigned long hva_start, hva_end;
slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
- hva_start = max(range->start, slot->userspace_addr);
- hva_end = min(range->end, slot->userspace_addr +
- (slot->npages << PAGE_SHIFT));
+ hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
+ hva_end = min_t(unsigned long, range->end,
+ slot->userspace_addr + (slot->npages << PAGE_SHIFT));
/*
* To optimize for the likely case where the address
@@ -613,8 +604,13 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
* bother making these conditional (to avoid writes on
* the second or later invocation of the handler).
*/
- gfn_range.pte = range->pte;
+ gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;
+ /*
+ * HVA-based notifications aren't relevant to private
+ * mappings as they don't have a userspace mapping.
+ */
+ gfn_range.attr_filter = KVM_FILTER_SHARED;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
@@ -623,108 +619,86 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
gfn_range.slot = slot;
-
- if (!locked) {
- locked = true;
- KVM_MMU_LOCK(kvm);
- if (!IS_KVM_NULL_FN(range->on_lock))
- range->on_lock(kvm, range->start, range->end);
- if (IS_KVM_NULL_FN(range->handler))
- break;
+ gfn_range.lockless = range->lockless;
+
+ if (!r.found_memslot) {
+ r.found_memslot = true;
+ if (!range->lockless) {
+ KVM_MMU_LOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_lock))
+ range->on_lock(kvm);
+
+ if (IS_KVM_NULL_FN(range->handler))
+ goto mmu_unlock;
+ }
}
- ret |= range->handler(kvm, &gfn_range);
+ r.ret |= range->handler(kvm, &gfn_range);
}
}
- if (range->flush_on_ret && ret)
+ if (range->flush_on_ret && r.ret)
kvm_flush_remote_tlbs(kvm);
- if (locked) {
+mmu_unlock:
+ if (r.found_memslot && !range->lockless)
KVM_MMU_UNLOCK(kvm);
- if (!IS_KVM_NULL_FN(range->on_unlock))
- range->on_unlock(kvm);
- }
srcu_read_unlock(&kvm->srcu, idx);
- /* The notifiers are averse to booleans. :-( */
- return (int)ret;
+ return r;
}
-static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- pte_t pte,
- hva_handler_t handler)
+ gfn_handler_t handler,
+ bool flush_on_ret)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_hva_range range = {
+ const struct kvm_mmu_notifier_range range = {
.start = start,
.end = end,
- .pte = pte,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
- .on_unlock = (void *)kvm_null_fn,
- .flush_on_ret = true,
+ .flush_on_ret = flush_on_ret,
.may_block = false,
+ .lockless = IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING),
};
- return __kvm_handle_hva_range(kvm, &range);
+ return kvm_handle_hva_range(kvm, &range).ret;
}
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- hva_handler_t handler)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_hva_range range = {
- .start = start,
- .end = end,
- .pte = __pte(0),
- .handler = handler,
- .on_lock = (void *)kvm_null_fn,
- .on_unlock = (void *)kvm_null_fn,
- .flush_on_ret = false,
- .may_block = false,
- };
-
- return __kvm_handle_hva_range(kvm, &range);
-}
-static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte)
+static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end,
+ gfn_handler_t handler)
{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
-
- trace_kvm_set_spte_hva(address);
-
- /*
- * .change_pte() must be surrounded by .invalidate_range_{start,end}().
- * If mmu_invalidate_in_progress is zero, then no in-progress
- * invalidations, including this one, found a relevant memslot at
- * start(); rechecking memslots here is unnecessary. Note, a false
- * positive (count elevated by a different invalidation) is sub-optimal
- * but functionally ok.
- */
- WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
- if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
- return;
-
- kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
+ return kvm_age_hva_range(mn, start, end, handler, false);
}
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
- unsigned long end)
+void kvm_mmu_invalidate_begin(struct kvm *kvm)
{
+ lockdep_assert_held_write(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_invalidate_in_progress++;
+
if (likely(kvm->mmu_invalidate_in_progress == 1)) {
+ kvm->mmu_invalidate_range_start = INVALID_GPA;
+ kvm->mmu_invalidate_range_end = INVALID_GPA;
+ }
+}
+
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
+{
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
+
+ if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
kvm->mmu_invalidate_range_start = start;
kvm->mmu_invalidate_range_end = end;
} else {
@@ -744,17 +718,21 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
}
}
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
+ return kvm_unmap_gfn_range(kvm, range);
+}
+
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_hva_range hva_range = {
+ const struct kvm_mmu_notifier_range hva_range = {
.start = range->start,
.end = range->end,
- .pte = __pte(0),
- .handler = kvm_unmap_gfn_range,
+ .handler = kvm_mmu_unmap_gfn_range,
.on_lock = kvm_mmu_invalidate_begin,
- .on_unlock = kvm_arch_guest_memory_reclaimed,
.flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range),
};
@@ -783,17 +761,23 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
- gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
- hva_range.may_block);
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
- __kvm_handle_hva_range(kvm, &hva_range);
+ /*
+ * If one or more memslots were found and thus zapped, notify arch code
+ * that guest memory has been reclaimed. This needs to be done *after*
+ * dropping mmu_lock, as x86's reclaim path is slooooow.
+ */
+ if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+ kvm_arch_guest_memory_reclaimed(kvm);
return 0;
}
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
- unsigned long end)
+void kvm_mmu_invalidate_end(struct kvm *kvm)
{
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
/*
* This sequence increase will notify the kvm page fault that
* the page that is going to be mapped in the spte could have
@@ -807,29 +791,36 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
* in conjunction with the smp_rmb in mmu_invalidate_retry().
*/
kvm->mmu_invalidate_in_progress--;
+ KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
+
+ /*
+ * Assert that at least one range was added between start() and end().
+ * Not adding a range isn't fatal, but it is a KVM bug.
+ */
+ WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_hva_range hva_range = {
+ const struct kvm_mmu_notifier_range hva_range = {
.start = range->start,
.end = range->end,
- .pte = __pte(0),
.handler = (void *)kvm_null_fn,
.on_lock = kvm_mmu_invalidate_end,
- .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = mmu_notifier_range_blockable(range),
};
bool wake;
- __kvm_handle_hva_range(kvm, &hva_range);
+ kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
spin_lock(&kvm->mn_invalidate_lock);
- wake = (--kvm->mn_active_invalidate_count == 0);
+ if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
+ --kvm->mn_active_invalidate_count;
+ wake = !kvm->mn_active_invalidate_count;
spin_unlock(&kvm->mn_invalidate_lock);
/*
@@ -838,8 +829,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
*/
if (wake)
rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
-
- BUG_ON(kvm->mmu_invalidate_in_progress < 0);
}
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
@@ -849,7 +838,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
+ return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
+ !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -872,7 +862,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
@@ -881,8 +871,8 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
{
trace_kvm_test_age_hva(address);
- return kvm_handle_hva_range_no_flush(mn, address, address + 1,
- kvm_test_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, address, address + 1,
+ kvm_test_age_gfn);
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
@@ -897,13 +887,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
- .invalidate_range = kvm_mmu_notifier_invalidate_range,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
- .change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
@@ -913,14 +901,14 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}
-#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
+#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
return 0;
}
-#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
@@ -959,13 +947,16 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
if (!memslot->dirty_bitmap)
return;
- kvfree(memslot->dirty_bitmap);
+ vfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
/* This does not remove the slot from struct kvm_memslots data structures */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
+ if (slot->flags & KVM_MEM_GUEST_MEMFD)
+ kvm_gmem_unbind(slot);
+
kvm_destroy_dirty_bitmap(slot);
kvm_arch_free_memslot(kvm, slot);
@@ -1088,10 +1079,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
&stat_fops_per_vm);
}
- ret = kvm_arch_create_vm_debugfs(kvm);
- if (ret)
- goto out_err;
-
+ kvm_arch_create_vm_debugfs(kvm);
return 0;
out_err:
kvm_destroy_vm_debugfs(kvm);
@@ -1099,15 +1087,6 @@ out_err:
}
/*
- * Called after the VM is otherwise initialized, but just before adding it to
- * the vm_list.
- */
-int __weak kvm_arch_post_init_vm(struct kvm *kvm)
-{
- return 0;
-}
-
-/*
* Called just after removing the VM from the vm_list, but before doing any
* other destruction.
*/
@@ -1121,24 +1100,27 @@ void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
* Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
* a per-arch destroy interface is not needed.
*/
-int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
+void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
- return 0;
+}
+
+/* Called only on cleanup and destruction paths when there are no users. */
+static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
+ enum kvm_bus idx)
+{
+ return rcu_dereference_protected(kvm->buses[idx],
+ !refcount_read(&kvm->users_count));
}
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{
struct kvm *kvm = kvm_arch_alloc_vm();
struct kvm_memslots *slots;
- int r = -ENOMEM;
- int i, j;
+ int r, i, j;
if (!kvm)
return ERR_PTR(-ENOMEM);
- /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
- __module_get(kvm_chardev_ops.owner);
-
KVM_MMU_LOCK_INIT(kvm);
mmgrab(current->mm);
kvm->mm = current->mm;
@@ -1150,6 +1132,9 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
xa_init(&kvm->vcpu_array);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ xa_init(&kvm->mem_attr_array);
+#endif
INIT_LIST_HEAD(&kvm->gpc_list);
spin_lock_init(&kvm->gpc_lock);
@@ -1168,13 +1153,19 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
task_pid_nr(current));
+ r = -ENOMEM;
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
if (init_srcu_struct(&kvm->irq_srcu))
goto out_err_no_irq_srcu;
+ r = kvm_init_irq_routing(kvm);
+ if (r)
+ goto out_err_no_irq_routing;
+
refcount_set(&kvm->users_count, 1);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
for (j = 0; j < 2; j++) {
slots = &kvm->__memslots[i][j];
@@ -1191,6 +1182,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
}
+ r = -ENOMEM;
for (i = 0; i < KVM_NR_BUSES; i++) {
rcu_assign_pointer(kvm->buses[i],
kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
@@ -1202,11 +1194,11 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (r)
goto out_err_no_arch_destroy_vm;
- r = hardware_enable_all();
+ r = kvm_enable_virtualization();
if (r)
goto out_err_no_disable;
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
@@ -1222,10 +1214,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (r)
goto out_err_no_debugfs;
- r = kvm_arch_post_init_vm(kvm);
- if (r)
- goto out_err;
-
mutex_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
mutex_unlock(&kvm_lock);
@@ -1235,30 +1223,29 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
return kvm;
-out_err:
- kvm_destroy_vm_debugfs(kvm);
out_err_no_debugfs:
kvm_coalesced_mmio_free(kvm);
out_no_coalesced_mmio:
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
if (kvm->mmu_notifier.ops)
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
#endif
out_err_no_mmu_notifier:
- hardware_disable_all();
+ kvm_disable_virtualization();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
out_err_no_arch_destroy_vm:
WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
- kfree(kvm_get_bus(kvm, i));
+ kfree(kvm_get_bus_for_destruction(kvm, i));
+ kvm_free_irq_routing(kvm);
+out_err_no_irq_routing:
cleanup_srcu_struct(&kvm->irq_srcu);
out_err_no_irq_srcu:
cleanup_srcu_struct(&kvm->srcu);
out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
- module_put(kvm_chardev_ops.owner);
return ERR_PTR(r);
}
@@ -1270,6 +1257,12 @@ static void kvm_destroy_devices(struct kvm *kvm)
* We do not need to take the kvm->lock here, because nobody else
* has a reference to the struct kvm at this point and therefore
* cannot access the devices list anyhow.
+ *
+ * The device list is generally managed as an rculist, but list_del()
+ * is used intentionally here. If a bug in KVM introduced a reader that
+ * was not backed by a reference on the kvm struct, the hope is that
+ * it'd consume the poisoned forward pointer instead of suffering a
+ * use-after-free, even though this cannot be guaranteed.
*/
list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
list_del(&dev->vm_node);
@@ -1285,7 +1278,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_destroy_pm_notifier(kvm);
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
- kvm_arch_sync_events(kvm);
mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
mutex_unlock(&kvm_lock);
@@ -1293,41 +1285,51 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
- struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
+ struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
if (bus)
kvm_io_bus_destroy(bus);
kvm->buses[i] = NULL;
}
kvm_coalesced_mmio_free(kvm);
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
/*
* At this point, pending calls to invalidate_range_start()
* have completed but no more MMU notifiers will run, so
* mn_active_invalidate_count may remain unbalanced.
- * No threads can be waiting in install_new_memslots as the
+ * No threads can be waiting in kvm_swap_active_memslots() as the
* last reference on KVM has been dropped, but freeing
* memslots would deadlock without this manual intervention.
+ *
+ * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
+ * notifier between a start() and end(), then there shouldn't be any
+ * in-progress invalidations.
*/
WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
- kvm->mn_active_invalidate_count = 0;
+ if (kvm->mn_active_invalidate_count)
+ kvm->mn_active_invalidate_count = 0;
+ else
+ WARN_ON(kvm->mmu_invalidate_in_progress);
#else
kvm_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
}
cleanup_srcu_struct(&kvm->irq_srcu);
+ srcu_barrier(&kvm->srcu);
cleanup_srcu_struct(&kvm->srcu);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ xa_destroy(&kvm->mem_attr_array);
+#endif
kvm_arch_free_vm(kvm);
preempt_notifier_dec();
- hardware_disable_all();
+ kvm_disable_virtualization();
mmdrop(mm);
- module_put(kvm_chardev_ops.owner);
}
void kvm_get_kvm(struct kvm *kvm)
@@ -1364,7 +1366,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm)
{
WARN_ON(refcount_dec_and_test(&kvm->users_count));
}
-EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
@@ -1376,6 +1378,65 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
return 0;
}
+int kvm_trylock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i, j;
+
+ lockdep_assert_held(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock))
+ goto out_unlock;
+ return 0;
+
+out_unlock:
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ if (i == j)
+ break;
+ mutex_unlock(&vcpu->mutex);
+ }
+ return -EINTR;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
+
+int kvm_lock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i, j;
+ int r;
+
+ lockdep_assert_held(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock);
+ if (r)
+ goto out_unlock;
+ }
+ return 0;
+
+out_unlock:
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ if (i == j)
+ break;
+ mutex_unlock(&vcpu->mutex);
+ }
+ return r;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
+
+void kvm_unlock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ lockdep_assert_held(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ mutex_unlock(&vcpu->mutex);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
+
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* See kvm_vm_ioctl_get_dirty_log() why this is needed.
@@ -1524,13 +1585,34 @@ static void kvm_replace_memslot(struct kvm *kvm,
}
}
-static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
+/*
+ * Flags that do not access any of the extra space of struct
+ * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
+ * only allows these.
+ */
+#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
+ (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
+
+static int check_memory_region_flags(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
-#ifdef __KVM_HAVE_READONLY_MEM
- valid_flags |= KVM_MEM_READONLY;
-#endif
+ if (IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
+ valid_flags |= KVM_MEM_GUEST_MEMFD;
+
+ /* Dirty logging private memory is not currently supported. */
+ if (mem->flags & KVM_MEM_GUEST_MEMFD)
+ valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
+
+ /*
+ * GUEST_MEMFD is incompatible with read-only memslots, as writes to
+ * read-only memslots have emulated MMIO, not page fault, semantics,
+ * and KVM doesn't allow emulated MMIO for private memory.
+ */
+ if (kvm_arch_has_readonly_mem(kvm) &&
+ !(mem->flags & KVM_MEM_GUEST_MEMFD))
+ valid_flags |= KVM_MEM_READONLY;
if (mem->flags & ~valid_flags)
return -EINVAL;
@@ -1589,7 +1671,7 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
* space 0 will use generations 0, 2, 4, ... while address space 1 will
* use generations 1, 3, 5, ...
*/
- gen += KVM_ADDRESS_SPACE_NUM;
+ gen += kvm_arch_nr_memslot_as_ids(kvm);
kvm_arch_memslots_updated(kvm, gen);
@@ -1750,13 +1832,13 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
kvm_arch_flush_shadow_memslot(kvm, old);
kvm_arch_guest_memory_reclaimed(kvm);
- /* Was released by kvm_swap_active_memslots, reacquire. */
+ /* Was released by kvm_swap_active_memslots(), reacquire. */
mutex_lock(&kvm->slots_arch_lock);
/*
* Copy the arch-specific field of the newly-installed slot back to the
* old slot as the arch data could have changed between releasing
- * slots_arch_lock in install_new_memslots() and re-acquiring the lock
+ * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
* above. Writers are required to retrieve memslots *after* acquiring
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
*/
@@ -1818,11 +1900,11 @@ static int kvm_set_memslot(struct kvm *kvm,
int r;
/*
- * Released in kvm_swap_active_memslots.
+ * Released in kvm_swap_active_memslots().
*
- * Must be held from before the current memslots are copied until
- * after the new memslots are installed with rcu_assign_pointer,
- * then released before the synchronize srcu in kvm_swap_active_memslots.
+ * Must be held from before the current memslots are copied until after
+ * the new memslots are installed with rcu_assign_pointer, then
+ * released before the synchronize srcu in kvm_swap_active_memslots().
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
@@ -1917,16 +1999,8 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
return false;
}
-/*
- * Allocate some memory and give it an address in the guest physical address
- * space.
- *
- * Discontiguous memory is allowed, mostly for framebuffers.
- *
- * Must be called holding kvm->slots_lock for write.
- */
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+static int kvm_set_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem)
{
struct kvm_memory_slot *old, *new;
struct kvm_memslots *slots;
@@ -1936,7 +2010,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
int as_id, id;
int r;
- r = check_memory_region_flags(mem);
+ lockdep_assert_held(&kvm->slots_lock);
+
+ r = check_memory_region_flags(kvm, mem);
if (r)
return r;
@@ -1955,11 +2031,23 @@ int __kvm_set_memory_region(struct kvm *kvm,
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
+ if (mem->flags & KVM_MEM_GUEST_MEMFD &&
+ (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
+ mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
+ return -EINVAL;
+ if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL;
- if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
+
+ /*
+ * The size of userspace-defined memory regions is restricted in order
+ * to play nice with dirty bitmap operations, which are indexed with an
+ * "unsigned int". KVM's internal memory regions don't support dirty
+ * logging, and so are exempt.
+ */
+ if (id < KVM_USER_MEM_SLOTS &&
+ (mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
@@ -1993,6 +2081,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
return -EINVAL;
} else { /* Modify an existing slot. */
+ /* Private memslots are immutable, they can only be deleted. */
+ if (mem->flags & KVM_MEM_GUEST_MEMFD)
+ return -EINVAL;
if ((mem->userspace_addr != old->userspace_addr) ||
(npages != old->npages) ||
((mem->flags ^ old->flags) & KVM_MEM_READONLY))
@@ -2021,32 +2112,46 @@ int __kvm_set_memory_region(struct kvm *kvm,
new->npages = npages;
new->flags = mem->flags;
new->userspace_addr = mem->userspace_addr;
+ if (mem->flags & KVM_MEM_GUEST_MEMFD) {
+ r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
+ if (r)
+ goto out;
+ }
r = kvm_set_memslot(kvm, old, new, change);
if (r)
- kfree(new);
+ goto out_unbind;
+
+ return 0;
+
+out_unbind:
+ if (mem->flags & KVM_MEM_GUEST_MEMFD)
+ kvm_gmem_unbind(new);
+out:
+ kfree(new);
return r;
}
-EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+int kvm_set_internal_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem)
{
- int r;
+ if (WARN_ON_ONCE(mem->slot < KVM_USER_MEM_SLOTS))
+ return -EINVAL;
- mutex_lock(&kvm->slots_lock);
- r = __kvm_set_memory_region(kvm, mem);
- mutex_unlock(&kvm->slots_lock);
- return r;
+ if (WARN_ON_ONCE(mem->flags))
+ return -EINVAL;
+
+ return kvm_set_memory_region(kvm, mem);
}
-EXPORT_SYMBOL_GPL(kvm_set_memory_region);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region2 *mem)
{
if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
+ guard(mutex)(&kvm->slots_lock);
return kvm_set_memory_region(kvm, mem);
}
@@ -2075,7 +2180,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
as_id = log->slot >> 16;
id = (u16)log->slot;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
@@ -2097,7 +2202,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
*is_dirty = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
@@ -2137,7 +2242,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
as_id = log->slot >> 16;
id = (u16)log->slot;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
@@ -2185,7 +2290,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
}
if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
return -EFAULT;
@@ -2249,7 +2354,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
as_id = log->slot >> 16;
id = (u16)log->slot;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
if (log->first_page & 63)
@@ -2302,7 +2407,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
KVM_MMU_UNLOCK(kvm);
if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
return 0;
}
@@ -2321,11 +2426,218 @@ static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
}
#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static u64 kvm_supported_mem_attributes(struct kvm *kvm)
+{
+ if (!kvm || kvm_arch_has_private_mem(kvm))
+ return KVM_MEMORY_ATTRIBUTE_PRIVATE;
+
+ return 0;
+}
+
+/*
+ * Returns true if _all_ gfns in the range [@start, @end) have attributes
+ * such that the bits in @mask match @attrs.
+ */
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long mask, unsigned long attrs)
+{
+ XA_STATE(xas, &kvm->mem_attr_array, start);
+ unsigned long index;
+ void *entry;
+
+ mask &= kvm_supported_mem_attributes(kvm);
+ if (attrs & ~mask)
+ return false;
+
+ if (end == start + 1)
+ return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
+
+ guard(rcu)();
+ if (!attrs)
+ return !xas_find(&xas, end - 1);
+
+ for (index = start; index < end; index++) {
+ do {
+ entry = xas_next(&xas);
+ } while (xas_retry(&xas, entry));
+
+ if (xas.xa_index != index ||
+ (xa_to_value(entry) & mask) != attrs)
+ return false;
+ }
+
+ return true;
+}
+
+static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
+ struct kvm_mmu_notifier_range *range)
+{
+ struct kvm_gfn_range gfn_range;
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ struct kvm_memslot_iter iter;
+ bool found_memslot = false;
+ bool ret = false;
+ int i;
+
+ gfn_range.arg = range->arg;
+ gfn_range.may_block = range->may_block;
+
+ /*
+ * If/when KVM supports more attributes beyond private .vs shared, this
+ * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target
+ * range already has the desired private vs. shared state (it's unclear
+ * if that is a net win). For now, KVM reaches this point if and only
+ * if the private flag is being toggled, i.e. all mappings are in play.
+ */
+
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
+ slots = __kvm_memslots(kvm, i);
+
+ kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
+ slot = iter.slot;
+ gfn_range.slot = slot;
+
+ gfn_range.start = max(range->start, slot->base_gfn);
+ gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
+ if (gfn_range.start >= gfn_range.end)
+ continue;
+
+ if (!found_memslot) {
+ found_memslot = true;
+ KVM_MMU_LOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_lock))
+ range->on_lock(kvm);
+ }
+
+ ret |= range->handler(kvm, &gfn_range);
+ }
+ }
+
+ if (range->flush_on_ret && ret)
+ kvm_flush_remote_tlbs(kvm);
+
+ if (found_memslot)
+ KVM_MMU_UNLOCK(kvm);
+}
+
+static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
+{
+ /*
+ * Unconditionally add the range to the invalidation set, regardless of
+ * whether or not the arch callback actually needs to zap SPTEs. E.g.
+ * if KVM supports RWX attributes in the future and the attributes are
+ * going from R=>RW, zapping isn't strictly necessary. Unconditionally
+ * adding the range allows KVM to require that MMU invalidations add at
+ * least one range between begin() and end(), e.g. allows KVM to detect
+ * bugs where the add() is missed. Relaxing the rule *might* be safe,
+ * but it's not obvious that allowing new mappings while the attributes
+ * are in flux is desirable or worth the complexity.
+ */
+ kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
+
+ return kvm_arch_pre_set_memory_attributes(kvm, range);
+}
+
+/* Set @attributes for the gfn range [@start, @end). */
+static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long attributes)
+{
+ struct kvm_mmu_notifier_range pre_set_range = {
+ .start = start,
+ .end = end,
+ .arg.attributes = attributes,
+ .handler = kvm_pre_set_memory_attributes,
+ .on_lock = kvm_mmu_invalidate_begin,
+ .flush_on_ret = true,
+ .may_block = true,
+ };
+ struct kvm_mmu_notifier_range post_set_range = {
+ .start = start,
+ .end = end,
+ .arg.attributes = attributes,
+ .handler = kvm_arch_post_set_memory_attributes,
+ .on_lock = kvm_mmu_invalidate_end,
+ .may_block = true,
+ };
+ unsigned long i;
+ void *entry;
+ int r = 0;
+
+ entry = attributes ? xa_mk_value(attributes) : NULL;
+
+ trace_kvm_vm_set_mem_attributes(start, end, attributes);
+
+ mutex_lock(&kvm->slots_lock);
+
+ /* Nothing to do if the entire range has the desired attributes. */
+ if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
+ goto out_unlock;
+
+ /*
+ * Reserve memory ahead of time to avoid having to deal with failures
+ * partway through setting the new attributes.
+ */
+ for (i = start; i < end; i++) {
+ r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
+ if (r)
+ goto out_unlock;
+
+ cond_resched();
+ }
+
+ kvm_handle_gfn_range(kvm, &pre_set_range);
+
+ for (i = start; i < end; i++) {
+ r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
+ GFP_KERNEL_ACCOUNT));
+ KVM_BUG_ON(r, kvm);
+ cond_resched();
+ }
+
+ kvm_handle_gfn_range(kvm, &post_set_range);
+
+out_unlock:
+ mutex_unlock(&kvm->slots_lock);
+
+ return r;
+}
+static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
+ struct kvm_memory_attributes *attrs)
+{
+ gfn_t start, end;
+
+ /* flags is currently not used. */
+ if (attrs->flags)
+ return -EINVAL;
+ if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
+ return -EINVAL;
+ if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
+ return -EINVAL;
+ if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
+ return -EINVAL;
+
+ start = attrs->address >> PAGE_SHIFT;
+ end = (attrs->address + attrs->size) >> PAGE_SHIFT;
+
+ /*
+ * xarray tracks data using "unsigned long", and as a result so does
+ * KVM. For simplicity, supports generic attributes only on 64-bit
+ * architectures.
+ */
+ BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
+
+ return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
-EXPORT_SYMBOL_GPL(gfn_to_memslot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2359,6 +2671,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
return NULL;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
@@ -2366,7 +2679,7 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
-EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2374,7 +2687,7 @@ bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2431,19 +2744,19 @@ unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
{
return gfn_to_hva_many(slot, gfn, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_hva);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
/*
* Return the hva of a @gfn and the R/W attribute if possible.
@@ -2478,37 +2791,93 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static inline int check_user_page_hwpoison(unsigned long addr)
+static bool kvm_is_ad_tracked_page(struct page *page)
{
- int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
+ /*
+ * Per page-flags.h, pages tagged PG_reserved "should in general not be
+ * touched (e.g. set dirty) except by its owner".
+ */
+ return !PageReserved(page);
+}
- rc = get_user_pages(addr, 1, flags, NULL, NULL);
- return rc == -EHWPOISON;
+static void kvm_set_page_dirty(struct page *page)
+{
+ if (kvm_is_ad_tracked_page(page))
+ SetPageDirty(page);
+}
+
+static void kvm_set_page_accessed(struct page *page)
+{
+ if (kvm_is_ad_tracked_page(page))
+ mark_page_accessed(page);
+}
+
+void kvm_release_page_clean(struct page *page)
+{
+ if (!page)
+ return;
+
+ kvm_set_page_accessed(page);
+ put_page(page);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
+
+void kvm_release_page_dirty(struct page *page)
+{
+ if (!page)
+ return;
+
+ kvm_set_page_dirty(page);
+ kvm_release_page_clean(page);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
+
+static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
+ struct follow_pfnmap_args *map, bool writable)
+{
+ kvm_pfn_t pfn;
+
+ WARN_ON_ONCE(!!page == !!map);
+
+ if (kfp->map_writable)
+ *kfp->map_writable = writable;
+
+ if (map)
+ pfn = map->pfn;
+ else
+ pfn = page_to_pfn(page);
+
+ *kfp->refcounted_page = page;
+
+ return pfn;
}
/*
* The fast path to get the writable pfn which will be stored in @pfn,
- * true indicates success, otherwise false is returned. It's also the
- * only part that runs if we can in atomic context.
+ * true indicates success, otherwise false is returned.
*/
-static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
- bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ struct page *page;
+ bool r;
/*
- * Fast pin a writable pfn only if it is a write fault request
- * or the caller allows to map a writable pfn for a read fault
- * request.
+ * Try the fast-only path when the caller wants to pin/get the page for
+ * writing. If the caller only wants to read the page, KVM must go
+ * down the full, slow path in order to avoid racing an operation that
+ * breaks Copy-on-Write (CoW), e.g. so that KVM doesn't end up pointing
+ * at the old, read-only page while mm/ points at a new, writable page.
*/
- if (!(write_fault || writable))
+ if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
return false;
- if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
- *pfn = page_to_pfn(page[0]);
+ if (kfp->pin)
+ r = pin_user_pages_fast(kfp->hva, 1, FOLL_WRITE, &page) == 1;
+ else
+ r = get_user_page_fast_only(kfp->hva, FOLL_WRITE, &page);
- if (writable)
- *writable = true;
+ if (r) {
+ *pfn = kvm_resolve_pfn(kfp, page, NULL, true);
return true;
}
@@ -2519,40 +2888,48 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
* The slow path to get the pfn of the specified host virtual address,
* 1 indicates success, -errno is returned if error is detected.
*/
-static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
- bool interruptible, bool *writable, kvm_pfn_t *pfn)
+static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
{
- unsigned int flags = FOLL_HWPOISON;
- struct page *page;
+ /*
+ * When a VCPU accesses a page that is not mapped into the secondary
+ * MMU, we lookup the page using GUP to map it, so the guest VCPU can
+ * make progress. We always want to honor NUMA hinting faults in that
+ * case, because GUP usage corresponds to memory accesses from the VCPU.
+ * Otherwise, we'd not trigger NUMA hinting faults once a page is
+ * mapped into the secondary MMU and gets accessed by a VCPU.
+ *
+ * Note that get_user_page_fast_only() and FOLL_WRITE for now
+ * implicitly honor NUMA hinting faults and don't need this flag.
+ */
+ unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
+ struct page *page, *wpage;
int npages;
- might_sleep();
-
- if (writable)
- *writable = write_fault;
-
- if (write_fault)
- flags |= FOLL_WRITE;
- if (async)
- flags |= FOLL_NOWAIT;
- if (interruptible)
- flags |= FOLL_INTERRUPTIBLE;
-
- npages = get_user_pages_unlocked(addr, 1, &page, flags);
+ if (kfp->pin)
+ npages = pin_user_pages_unlocked(kfp->hva, 1, &page, flags);
+ else
+ npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
if (npages != 1)
return npages;
- /* map read fault as writable if possible */
- if (unlikely(!write_fault) && writable) {
- struct page *wpage;
+ /*
+ * Pinning is mutually exclusive with opportunistically mapping a read
+ * fault as writable, as KVM should never pin pages when mapping memory
+ * into the guest (pinning is only for direct accesses from KVM).
+ */
+ if (WARN_ON_ONCE(kfp->map_writable && kfp->pin))
+ goto out;
- if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
- *writable = true;
- put_page(page);
- page = wpage;
- }
+ /* map read fault as writable if possible */
+ if (!(flags & FOLL_WRITE) && kfp->map_writable &&
+ get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
+ put_page(page);
+ page = wpage;
+ flags |= FOLL_WRITE;
}
- *pfn = page_to_pfn(page);
+
+out:
+ *pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE);
return npages;
}
@@ -2567,33 +2944,29 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
return true;
}
-static int kvm_try_get_pfn(kvm_pfn_t pfn)
-{
- struct page *page = kvm_pfn_to_refcounted_page(pfn);
-
- if (!page)
- return 1;
-
- return get_page_unless_zero(page);
-}
-
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
- unsigned long addr, bool write_fault,
- bool *writable, kvm_pfn_t *p_pfn)
+ struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
{
- kvm_pfn_t pfn;
- pte_t *ptep;
- spinlock_t *ptl;
+ struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva };
+ bool write_fault = kfp->flags & FOLL_WRITE;
int r;
- r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ /*
+ * Remapped memory cannot be pinned in any meaningful sense. Bail if
+ * the caller wants to pin the page, i.e. access the page outside of
+ * MMU notifier protection, and unsafe umappings are disallowed.
+ */
+ if (kfp->pin && !allow_unsafe_mappings)
+ return -EINVAL;
+
+ r = follow_pfnmap_start(&args);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
* not call the fault handler, so do it here.
*/
bool unlocked = false;
- r = fixup_user_fault(current->mm, addr,
+ r = fixup_user_fault(current->mm, kfp->hva,
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
if (unlocked)
@@ -2601,187 +2974,110 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
if (r)
return r;
- r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ r = follow_pfnmap_start(&args);
if (r)
return r;
}
- if (write_fault && !pte_write(*ptep)) {
- pfn = KVM_PFN_ERR_RO_FAULT;
+ if (write_fault && !args.writable) {
+ *p_pfn = KVM_PFN_ERR_RO_FAULT;
goto out;
}
- if (writable)
- *writable = pte_write(*ptep);
- pfn = pte_pfn(*ptep);
-
- /*
- * Get a reference here because callers of *hva_to_pfn* and
- * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
- * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
- * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
- * simply do nothing for reserved pfns.
- *
- * Whoever called remap_pfn_range is also going to call e.g.
- * unmap_mapping_range before the underlying pages are freed,
- * causing a call to our MMU notifier.
- *
- * Certain IO or PFNMAP mappings can be backed with valid
- * struct pages, but be allocated without refcounting e.g.,
- * tail pages of non-compound higher order allocations, which
- * would then underflow the refcount when the caller does the
- * required put_page. Don't allow those pages here.
- */
- if (!kvm_try_get_pfn(pfn))
- r = -EFAULT;
-
+ *p_pfn = kvm_resolve_pfn(kfp, NULL, &args, args.writable);
out:
- pte_unmap_unlock(ptep, ptl);
- *p_pfn = pfn;
-
+ follow_pfnmap_end(&args);
return r;
}
-/*
- * Pin guest page in memory and return its pfn.
- * @addr: host virtual address which maps memory to the guest
- * @atomic: whether this function can sleep
- * @interruptible: whether the process can be interrupted by non-fatal signals
- * @async: whether this function need to wait IO complete if the
- * host page is not in the memory
- * @write_fault: whether we should get a writable host page
- * @writable: whether it allows to map a writable host page for !@write_fault
- *
- * The function will map a writable host page for these two cases:
- * 1): @write_fault = true
- * 2): @write_fault = false && @writable, @writable will tell the caller
- * whether the mapping is writable.
- */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
- bool *async, bool write_fault, bool *writable)
+kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
{
struct vm_area_struct *vma;
kvm_pfn_t pfn;
int npages, r;
- /* we can do it either atomically or asynchronously, not both */
- BUG_ON(atomic && async);
-
- if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
- return pfn;
+ might_sleep();
- if (atomic)
+ if (WARN_ON_ONCE(!kfp->refcounted_page))
return KVM_PFN_ERR_FAULT;
- npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
- writable, &pfn);
+ if (hva_to_pfn_fast(kfp, &pfn))
+ return pfn;
+
+ npages = hva_to_pfn_slow(kfp, &pfn);
if (npages == 1)
return pfn;
- if (npages == -EINTR)
+ if (npages == -EINTR || npages == -EAGAIN)
return KVM_PFN_ERR_SIGPENDING;
+ if (npages == -EHWPOISON)
+ return KVM_PFN_ERR_HWPOISON;
mmap_read_lock(current->mm);
- if (npages == -EHWPOISON ||
- (!async && check_user_page_hwpoison(addr))) {
- pfn = KVM_PFN_ERR_HWPOISON;
- goto exit;
- }
-
retry:
- vma = vma_lookup(current->mm, addr);
+ vma = vma_lookup(current->mm, kfp->hva);
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
- r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
+ r = hva_to_pfn_remapped(vma, kfp, &pfn);
if (r == -EAGAIN)
goto retry;
if (r < 0)
pfn = KVM_PFN_ERR_FAULT;
} else {
- if (async && vma_is_valid(vma, write_fault))
- *async = true;
- pfn = KVM_PFN_ERR_FAULT;
+ if ((kfp->flags & FOLL_NOWAIT) &&
+ vma_is_valid(vma, kfp->flags & FOLL_WRITE))
+ pfn = KVM_PFN_ERR_NEEDS_IO;
+ else
+ pfn = KVM_PFN_ERR_FAULT;
}
-exit:
mmap_read_unlock(current->mm);
return pfn;
}
-kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool interruptible, bool *async,
- bool write_fault, bool *writable, hva_t *hva)
+static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
{
- unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
-
- if (hva)
- *hva = addr;
+ kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
+ kfp->flags & FOLL_WRITE);
- if (addr == KVM_HVA_ERR_RO_BAD) {
- if (writable)
- *writable = false;
+ if (kfp->hva == KVM_HVA_ERR_RO_BAD)
return KVM_PFN_ERR_RO_FAULT;
- }
- if (kvm_is_error_hva(addr)) {
- if (writable)
- *writable = false;
+ if (kvm_is_error_hva(kfp->hva))
return KVM_PFN_NOSLOT;
- }
- /* Do not map writable pfn in the readonly memslot. */
- if (writable && memslot_is_readonly(slot)) {
- *writable = false;
- writable = NULL;
+ if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
+ *kfp->map_writable = false;
+ kfp->map_writable = NULL;
}
- return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
- writable);
-}
-EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
-
-kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
- bool *writable)
-{
- return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
- NULL, write_fault, writable, NULL);
-}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
-
-kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
-{
- return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
- NULL, NULL);
+ return hva_to_pfn(kfp);
}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+ unsigned int foll, bool *writable,
+ struct page **refcounted_page)
{
- return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
- NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
+ struct kvm_follow_pfn kfp = {
+ .slot = slot,
+ .gfn = gfn,
+ .flags = foll,
+ .map_writable = writable,
+ .refcounted_page = refcounted_page,
+ };
-kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
-}
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
+ if (WARN_ON_ONCE(!writable || !refcounted_page))
+ return KVM_PFN_ERR_FAULT;
-kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
-{
- return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
-}
-EXPORT_SYMBOL_GPL(gfn_to_pfn);
+ *writable = false;
+ *refcounted_page = NULL;
-kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
+ return kvm_follow_pfn(&kfp);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- struct page **pages, int nr_pages)
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry = 0;
@@ -2795,192 +3091,91 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
}
-EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
/*
- * Do not use this helper unless you are absolutely certain the gfn _must_ be
- * backed by 'struct page'. A valid example is if the backing memslot is
- * controlled by KVM. Note, if the returned page is valid, it's refcount has
- * been elevated by gfn_to_pfn().
+ * Don't use this API unless you are absolutely, positively certain that KVM
+ * needs to get a struct page, e.g. to pin the page for firmware DMA.
+ *
+ * FIXME: Users of this API likely need to FOLL_PIN the page, not just elevate
+ * its refcount.
*/
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
-{
- struct page *page;
- kvm_pfn_t pfn;
-
- pfn = gfn_to_pfn(kvm, gfn);
-
- if (is_error_noslot_pfn(pfn))
- return KVM_ERR_PTR_BAD_PAGE;
-
- page = kvm_pfn_to_refcounted_page(pfn);
- if (!page)
- return KVM_ERR_PTR_BAD_PAGE;
-
- return page;
-}
-EXPORT_SYMBOL_GPL(gfn_to_page);
+struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
+{
+ struct page *refcounted_page = NULL;
+ struct kvm_follow_pfn kfp = {
+ .slot = gfn_to_memslot(kvm, gfn),
+ .gfn = gfn,
+ .flags = write ? FOLL_WRITE : 0,
+ .refcounted_page = &refcounted_page,
+ };
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
-{
- if (dirty)
- kvm_release_pfn_dirty(pfn);
- else
- kvm_release_pfn_clean(pfn);
+ (void)kvm_follow_pfn(&kfp);
+ return refcounted_page;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
-int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ bool writable)
{
- kvm_pfn_t pfn;
- void *hva = NULL;
- struct page *page = KVM_UNMAPPED_PAGE;
+ struct kvm_follow_pfn kfp = {
+ .slot = gfn_to_memslot(vcpu->kvm, gfn),
+ .gfn = gfn,
+ .flags = writable ? FOLL_WRITE : 0,
+ .refcounted_page = &map->pinned_page,
+ .pin = true,
+ };
- if (!map)
- return -EINVAL;
+ map->pinned_page = NULL;
+ map->page = NULL;
+ map->hva = NULL;
+ map->gfn = gfn;
+ map->writable = writable;
- pfn = gfn_to_pfn(vcpu->kvm, gfn);
- if (is_error_noslot_pfn(pfn))
+ map->pfn = kvm_follow_pfn(&kfp);
+ if (is_error_noslot_pfn(map->pfn))
return -EINVAL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- hva = kmap(page);
+ if (pfn_valid(map->pfn)) {
+ map->page = pfn_to_page(map->pfn);
+ map->hva = kmap(map->page);
#ifdef CONFIG_HAS_IOMEM
} else {
- hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
- if (!hva)
- return -EFAULT;
-
- map->page = page;
- map->hva = hva;
- map->pfn = pfn;
- map->gfn = gfn;
-
- return 0;
+ return map->hva ? 0 : -EFAULT;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
{
- if (!map)
- return;
-
if (!map->hva)
return;
- if (map->page != KVM_UNMAPPED_PAGE)
+ if (map->page)
kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
else
memunmap(map->hva);
#endif
- if (dirty)
+ if (map->writable)
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
- kvm_release_pfn(map->pfn, dirty);
+ if (map->pinned_page) {
+ if (map->writable)
+ kvm_set_page_dirty(map->pinned_page);
+ kvm_set_page_accessed(map->pinned_page);
+ unpin_user_page(map->pinned_page);
+ }
map->hva = NULL;
map->page = NULL;
+ map->pinned_page = NULL;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
-
-static bool kvm_is_ad_tracked_page(struct page *page)
-{
- /*
- * Per page-flags.h, pages tagged PG_reserved "should in general not be
- * touched (e.g. set dirty) except by its owner".
- */
- return !PageReserved(page);
-}
-
-static void kvm_set_page_dirty(struct page *page)
-{
- if (kvm_is_ad_tracked_page(page))
- SetPageDirty(page);
-}
-
-static void kvm_set_page_accessed(struct page *page)
-{
- if (kvm_is_ad_tracked_page(page))
- mark_page_accessed(page);
-}
-
-void kvm_release_page_clean(struct page *page)
-{
- WARN_ON(is_error_page(page));
-
- kvm_set_page_accessed(page);
- put_page(page);
-}
-EXPORT_SYMBOL_GPL(kvm_release_page_clean);
-
-void kvm_release_pfn_clean(kvm_pfn_t pfn)
-{
- struct page *page;
-
- if (is_error_noslot_pfn(pfn))
- return;
-
- page = kvm_pfn_to_refcounted_page(pfn);
- if (!page)
- return;
-
- kvm_release_page_clean(page);
-}
-EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
-
-void kvm_release_page_dirty(struct page *page)
-{
- WARN_ON(is_error_page(page));
-
- kvm_set_page_dirty(page);
- kvm_release_page_clean(page);
-}
-EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
-
-void kvm_release_pfn_dirty(kvm_pfn_t pfn)
-{
- struct page *page;
-
- if (is_error_noslot_pfn(pfn))
- return;
-
- page = kvm_pfn_to_refcounted_page(pfn);
- if (!page)
- return;
-
- kvm_release_page_dirty(page);
-}
-EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
-
-/*
- * Note, checking for an error/noslot pfn is the caller's responsibility when
- * directly marking a page dirty/accessed. Unlike the "release" helpers, the
- * "set" helpers are not to be used when the pfn might point at garbage.
- */
-void kvm_set_pfn_dirty(kvm_pfn_t pfn)
-{
- if (WARN_ON(is_error_noslot_pfn(pfn)))
- return;
-
- if (pfn_valid(pfn))
- kvm_set_page_dirty(pfn_to_page(pfn));
-}
-EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
-
-void kvm_set_pfn_accessed(kvm_pfn_t pfn)
-{
- if (WARN_ON(is_error_noslot_pfn(pfn)))
- return;
-
- if (pfn_valid(pfn))
- kvm_set_page_accessed(pfn_to_page(pfn));
-}
-EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
static int next_segment(unsigned long len, int offset)
{
@@ -2990,12 +3185,16 @@ static int next_segment(unsigned long len, int offset)
return len;
}
+/* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, int len)
{
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3012,7 +3211,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
@@ -3021,7 +3220,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
@@ -3041,7 +3240,7 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_read_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{
@@ -3061,7 +3260,7 @@ int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned l
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
@@ -3069,6 +3268,9 @@ static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3089,8 +3291,9 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
+/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
@@ -3098,6 +3301,9 @@ static int __kvm_write_guest_page(struct kvm *kvm,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3115,7 +3321,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
@@ -3124,7 +3330,7 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
@@ -3145,7 +3351,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_write_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
@@ -3166,7 +3372,7 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
struct gfn_to_hva_cache *ghc,
@@ -3215,7 +3421,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@@ -3246,14 +3452,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@@ -3283,14 +3489,14 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
@@ -3301,7 +3507,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
int ret;
while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
+ ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
if (ret < 0)
return ret;
offset = 0;
@@ -3310,7 +3516,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_clear_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
@@ -3335,7 +3541,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
-EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
@@ -3344,7 +3550,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
-EXPORT_SYMBOL_GPL(mark_page_dirty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -3353,7 +3559,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
@@ -3590,7 +3796,7 @@ out:
trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
@@ -3602,13 +3808,13 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
#ifndef CONFIG_S390
/*
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
*/
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
{
int me, cpu;
@@ -3637,34 +3843,47 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
*/
if (kvm_arch_vcpu_should_kick(vcpu)) {
cpu = READ_ONCE(vcpu->cpu);
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- smp_send_reschedule(cpu);
+ if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
+ /*
+ * Use a reschedule IPI to kick the vCPU if the caller
+ * doesn't need to wait for a response, as KVM allows
+ * kicking vCPUs while IRQs are disabled, but using the
+ * SMP function call framework with IRQs disabled can
+ * deadlock due to taking cross-CPU locks.
+ */
+ if (wait)
+ smp_call_function_single(cpu, ack_kick, NULL, wait);
+ else
+ smp_send_reschedule(cpu);
+ }
}
out:
put_cpu();
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
#endif /* !CONFIG_S390 */
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
- struct pid *pid;
struct task_struct *task = NULL;
- int ret = 0;
+ int ret;
+
+ if (!read_trylock(&target->pid_lock))
+ return 0;
+
+ if (target->pid)
+ task = get_pid_task(target->pid, PIDTYPE_PID);
+
+ read_unlock(&target->pid_lock);
- rcu_read_lock();
- pid = rcu_dereference(target->pid);
- if (pid)
- task = get_pid_task(pid, PIDTYPE_PID);
- rcu_read_unlock();
if (!task)
- return ret;
+ return 0;
ret = yield_to(task, 1);
put_task_struct(task);
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
/*
* Helper that checks whether a VCPU is eligible for directed yield.
@@ -3728,6 +3947,18 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
return false;
}
+/*
+ * By default, simply query the target vCPU's current mode when checking if a
+ * vCPU was preempted in kernel mode. All architectures except x86 (or more
+ * specifical, except VMX) allow querying whether or not a vCPU is in kernel
+ * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
+ * directly for cross-vCPU checks is functionally correct and accurate.
+ */
+bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_in_kernel(vcpu);
+}
+
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
@@ -3735,51 +3966,71 @@ bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
+ int nr_vcpus, start, i, idx, yielded;
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
- unsigned long i;
- int yielded = 0;
int try = 3;
- int pass;
+
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+ if (nr_vcpus < 2)
+ return;
+
+ /* Pairs with the smp_wmb() in kvm_vm_ioctl_create_vcpu(). */
+ smp_rmb();
kvm_vcpu_set_in_spin_loop(me, true);
+
/*
- * We boost the priority of a VCPU that is runnable but not
- * currently running, because it got preempted by something
- * else and called schedule in __vcpu_run. Hopefully that
- * VCPU is holding the lock that we need and will release it.
- * We approximate round-robin by starting at the last boosted VCPU.
+ * The current vCPU ("me") is spinning in kernel mode, i.e. is likely
+ * waiting for a resource to become available. Attempt to yield to a
+ * vCPU that is runnable, but not currently running, e.g. because the
+ * vCPU was preempted by a higher priority task. With luck, the vCPU
+ * that was preempted is holding a lock or some other resource that the
+ * current vCPU is waiting to acquire, and yielding to the other vCPU
+ * will allow it to make forward progress and release the lock (or kick
+ * the spinning vCPU, etc).
+ *
+ * Since KVM has no insight into what exactly the guest is doing,
+ * approximate a round-robin selection by iterating over all vCPUs,
+ * starting at the last boosted vCPU. I.e. if N=kvm->last_boosted_vcpu,
+ * iterate over vCPU[N+1]..vCPU[N-1], wrapping as needed.
+ *
+ * Note, this is inherently racy, e.g. if multiple vCPUs are spinning,
+ * they may all try to yield to the same vCPU(s). But as above, this
+ * is all best effort due to KVM's lack of visibility into the guest.
*/
- for (pass = 0; pass < 2 && !yielded && try; pass++) {
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!pass && i <= last_boosted_vcpu) {
- i = last_boosted_vcpu;
- continue;
- } else if (pass && i > last_boosted_vcpu)
- break;
- if (!READ_ONCE(vcpu->ready))
- continue;
- if (vcpu == me)
- continue;
- if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
- continue;
- if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
- !kvm_arch_dy_has_pending_interrupt(vcpu) &&
- !kvm_arch_vcpu_in_kernel(vcpu))
- continue;
- if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
- continue;
+ start = READ_ONCE(kvm->last_boosted_vcpu) + 1;
+ for (i = 0; i < nr_vcpus; i++) {
+ idx = (start + i) % nr_vcpus;
+ if (idx == me->vcpu_idx)
+ continue;
- yielded = kvm_vcpu_yield_to(vcpu);
- if (yielded > 0) {
- kvm->last_boosted_vcpu = i;
- break;
- } else if (yielded < 0) {
- try--;
- if (!try)
- break;
- }
+ vcpu = xa_load(&kvm->vcpu_array, idx);
+ if (!READ_ONCE(vcpu->ready))
+ continue;
+ if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
+ continue;
+
+ /*
+ * Treat the target vCPU as being in-kernel if it has a pending
+ * interrupt, as the vCPU trying to yield may be spinning
+ * waiting on IPI delivery, i.e. the target vCPU is in-kernel
+ * for the purposes of directed yield.
+ */
+ if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
+ !kvm_arch_dy_has_pending_interrupt(vcpu) &&
+ !kvm_arch_vcpu_preempted_in_kernel(vcpu))
+ continue;
+
+ if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
+ continue;
+
+ yielded = kvm_vcpu_yield_to(vcpu);
+ if (yielded > 0) {
+ WRITE_ONCE(kvm->last_boosted_vcpu, idx);
+ break;
+ } else if (yielded < 0 && !--try) {
+ break;
}
}
kvm_vcpu_set_in_spin_loop(me, false);
@@ -3787,7 +4038,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{
@@ -3852,7 +4103,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
-static const struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.mmap = kvm_vcpu_mmap,
@@ -3874,8 +4125,11 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
- *val = pid_nr(rcu_access_pointer(vcpu->pid));
+ struct kvm_vcpu *vcpu = data;
+
+ read_lock(&vcpu->pid_lock);
+ *val = pid_nr(vcpu->pid);
+ read_unlock(&vcpu->pid_lock);
return 0;
}
@@ -3902,12 +4156,21 @@ static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
{
int r;
struct kvm_vcpu *vcpu;
struct page *page;
+ /*
+ * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
+ * too-large values instead of silently truncating.
+ *
+ * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
+ * changing the storage type (at the very least, IDs should be tracked
+ * as unsigned ints).
+ */
+ BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
if (id >= KVM_MAX_VCPU_IDS)
return -EINVAL;
@@ -3947,7 +4210,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_free_run_page;
if (kvm->dirty_ring_size) {
- r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
+ r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring,
id, kvm->dirty_ring_size);
if (r)
goto arch_vcpu_destroy;
@@ -3955,12 +4218,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
mutex_lock(&kvm->lock);
-#ifdef CONFIG_LOCKDEP
- /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
- mutex_lock(&vcpu->mutex);
- mutex_unlock(&vcpu->mutex);
-#endif
-
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
@@ -3968,18 +4225,23 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
- BUG_ON(r == -EBUSY);
+ WARN_ON_ONCE(r == -EBUSY);
if (r)
goto unlock_vcpu_destroy;
- /* Now it's all set up, let userspace reach it */
+ /*
+ * Now it's all set up, let userspace reach it. Grab the vCPU's mutex
+ * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
+ * visible (per online_vcpus), e.g. so that KVM doesn't get tricked
+ * into a NULL-pointer dereference because KVM thinks the _current_
+ * vCPU doesn't exist. As a bonus, taking vcpu->mutex ensures lockdep
+ * knows it's taken *inside* kvm->lock.
+ */
+ mutex_lock(&vcpu->mutex);
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
- if (r < 0) {
- xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
- kvm_put_kvm_no_destroy(kvm);
- goto unlock_vcpu_destroy;
- }
+ if (r < 0)
+ goto kvm_put_xa_erase;
/*
* Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
@@ -3987,12 +4249,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
*/
smp_wmb();
atomic_inc(&kvm->online_vcpus);
+ mutex_unlock(&vcpu->mutex);
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_postcreate(vcpu);
kvm_create_vcpu_debugfs(vcpu);
return r;
+kvm_put_xa_erase:
+ mutex_unlock(&vcpu->mutex);
+ kvm_put_kvm_no_destroy(kvm);
+ xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_dirty_ring_free(&vcpu->dirty_ring);
@@ -4030,8 +4297,18 @@ static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
sizeof(vcpu->stat), user_buffer, size, offset);
}
+static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
+{
+ struct kvm_vcpu *vcpu = file->private_data;
+
+ kvm_put_kvm(vcpu->kvm);
+ return 0;
+}
+
static const struct file_operations kvm_vcpu_stats_fops = {
+ .owner = THIS_MODULE,
.read = kvm_vcpu_stats_read,
+ .release = kvm_vcpu_stats_release,
.llseek = noop_llseek,
};
@@ -4047,17 +4324,92 @@ static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
if (fd < 0)
return fd;
- file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
+ file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu,
+ O_RDONLY, FMODE_PREAD);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
- file->f_mode |= FMODE_PREAD;
+
+ kvm_get_kvm(vcpu->kvm);
fd_install(fd, file);
return fd;
}
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range)
+{
+ int idx;
+ long r;
+ u64 full_size;
+
+ if (range->flags)
+ return -EINVAL;
+
+ if (!PAGE_ALIGNED(range->gpa) ||
+ !PAGE_ALIGNED(range->size) ||
+ range->gpa + range->size <= range->gpa)
+ return -EINVAL;
+
+ vcpu_load(vcpu);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ full_size = range->size;
+ do {
+ if (signal_pending(current)) {
+ r = -EINTR;
+ break;
+ }
+
+ r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
+ if (WARN_ON_ONCE(r == 0 || r == -EIO))
+ break;
+
+ if (r < 0)
+ break;
+
+ range->size -= r;
+ range->gpa += r;
+ cond_resched();
+ } while (range->size);
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ vcpu_put(vcpu);
+
+ /* Return success if at least one page was mapped successfully. */
+ return full_size == range->size ? r : 0;
+}
+#endif
+
+static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ /*
+ * In practice, this happy path will always be taken, as a well-behaved
+ * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns.
+ */
+ if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus)))
+ return 0;
+
+ /*
+ * Acquire and release the vCPU's mutex to wait for vCPU creation to
+ * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU
+ * is fully online).
+ */
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
+
+ mutex_unlock(&vcpu->mutex);
+
+ if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx)))
+ return -EIO;
+
+ return 0;
+}
+
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4074,10 +4426,19 @@ static long kvm_vcpu_ioctl(struct file *filp,
return -EINVAL;
/*
- * Some architectures have vcpu ioctls that are asynchronous to vcpu
- * execution; mutex_lock() would break them.
+ * Wait for the vCPU to be online before handling the ioctl(), as KVM
+ * assumes the vCPU is reachable via vcpu_array, i.e. may dereference
+ * a NULL pointer if userspace invokes an ioctl() before KVM is ready.
*/
- r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+ r = kvm_wait_for_vcpu_online(vcpu);
+ if (r)
+ return r;
+
+ /*
+ * Let arch code handle select vCPU ioctls without holding vcpu->mutex,
+ * e.g. to support ioctls that can run asynchronous to vCPU execution.
+ */
+ r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;
@@ -4089,7 +4450,14 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
- oldpid = rcu_access_pointer(vcpu->pid);
+
+ /*
+ * Note, vcpu->pid is primarily protected by vcpu->mutex. The
+ * dedicated r/w lock allows other tasks, e.g. other vCPUs, to
+ * read vcpu->pid while this vCPU is in KVM_RUN, e.g. to yield
+ * directly to this vCPU
+ */
+ oldpid = vcpu->pid;
if (unlikely(oldpid != task_pid(current))) {
/* The thread running this VCPU changed. */
struct pid *newpid;
@@ -4099,12 +4467,22 @@ static long kvm_vcpu_ioctl(struct file *filp,
break;
newpid = get_task_pid(current, PIDTYPE_PID);
- rcu_assign_pointer(vcpu->pid, newpid);
- if (oldpid)
- synchronize_rcu();
+ write_lock(&vcpu->pid_lock);
+ vcpu->pid = newpid;
+ write_unlock(&vcpu->pid_lock);
+
put_pid(oldpid);
}
+ vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
r = kvm_arch_vcpu_ioctl_run(vcpu);
+ vcpu->wants_to_run = false;
+
+ /*
+ * FIXME: Remove this hack once all KVM architectures
+ * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
+ */
+ rseq_virt_userspace_exit();
+
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
@@ -4112,7 +4490,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs;
r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@ -4139,8 +4517,7 @@ out_free1:
break;
}
case KVM_GET_SREGS: {
- kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
- GFP_KERNEL_ACCOUNT);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
@@ -4232,7 +4609,7 @@ out_free1:
break;
}
case KVM_GET_FPU: {
- fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
r = -ENOMEM;
if (!fpu)
goto out;
@@ -4259,6 +4636,20 @@ out_free1:
r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
break;
}
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+ case KVM_PRE_FAULT_MEMORY: {
+ struct kvm_pre_fault_memory range;
+
+ r = -EFAULT;
+ if (copy_from_user(&range, argp, sizeof(range)))
+ break;
+ r = kvm_vcpu_pre_fault_memory(vcpu, &range);
+ /* Pass back leftover range. */
+ if (copy_to_user(argp, &range, sizeof(range)))
+ r = -EFAULT;
+ break;
+ }
+#endif
default:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
@@ -4368,7 +4759,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
if (dev->ops->release) {
mutex_lock(&kvm->lock);
- list_del(&dev->vm_node);
+ list_del_rcu(&dev->vm_node);
+ synchronize_rcu();
dev->ops->release(dev);
mutex_unlock(&kvm->lock);
}
@@ -4377,7 +4769,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
return 0;
}
-static const struct file_operations kvm_device_fops = {
+static struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
.release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
@@ -4451,7 +4843,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kfree(dev);
return ret;
}
- list_add(&dev->vm_node, &kvm->devices);
+ list_add_rcu(&dev->vm_node, &kvm->devices);
mutex_unlock(&kvm->lock);
if (ops->init)
@@ -4462,7 +4854,8 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
if (ret < 0) {
kvm_put_kvm_no_destroy(kvm);
mutex_lock(&kvm->lock);
- list_del(&dev->vm_node);
+ list_del_rcu(&dev->vm_node);
+ synchronize_rcu();
if (ops->release)
ops->release(dev);
mutex_unlock(&kvm->lock);
@@ -4475,19 +4868,19 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
return 0;
}
-static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{
switch (arg) {
case KVM_CAP_USER_MEMORY:
+ case KVM_CAP_USER_MEMORY2:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQFD:
- case KVM_CAP_IRQFD_RESAMPLE:
#endif
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
@@ -4508,9 +4901,11 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
-#if KVM_ADDRESS_SPACE_NUM > 1
+#if KVM_MAX_NR_ADDRESS_SPACES > 1
case KVM_CAP_MULTI_ADDRESS_SPACE:
- return KVM_ADDRESS_SPACE_NUM;
+ if (kvm)
+ return kvm_arch_nr_memslot_as_ids(kvm);
+ return KVM_MAX_NR_ADDRESS_SPACES;
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
@@ -4531,7 +4926,18 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_BINARY_STATS_FD:
case KVM_CAP_SYSTEM_EVENT_DATA:
+ case KVM_CAP_DEVICE_CTRL:
return 1;
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ case KVM_CAP_MEMORY_ATTRIBUTES:
+ return kvm_supported_mem_attributes(kvm);
+#endif
+#ifdef CONFIG_KVM_GUEST_MEMFD
+ case KVM_CAP_GUEST_MEMFD:
+ return 1;
+ case KVM_CAP_GUEST_MEMFD_FLAGS:
+ return kvm_gmem_get_supported_flags(kvm);
+#endif
default:
break;
}
@@ -4550,7 +4956,7 @@ static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
return -EINVAL;
/* Should be bigger to keep the reserved entries, or a page */
- if (size < kvm_dirty_ring_get_rsvd_entries() *
+ if (size < kvm_dirty_ring_get_rsvd_entries(kvm) *
sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
return -EINVAL;
@@ -4580,15 +4986,18 @@ static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
{
unsigned long i;
struct kvm_vcpu *vcpu;
- int cleared = 0;
+ int cleared = 0, r;
if (!kvm->dirty_ring_size)
return -EINVAL;
mutex_lock(&kvm->slots_lock);
- kvm_for_each_vcpu(i, vcpu, kvm)
- cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared);
+ if (r)
+ break;
+ }
mutex_unlock(&kvm->slots_lock);
@@ -4604,19 +5013,20 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
return -EINVAL;
}
-static bool kvm_are_all_memslots_empty(struct kvm *kvm)
+bool kvm_are_all_memslots_empty(struct kvm *kvm)
{
int i;
lockdep_assert_held(&kvm->slots_lock);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
return false;
}
return true;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
@@ -4696,8 +5106,18 @@ static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
sizeof(kvm->stat), user_buffer, size, offset);
}
+static int kvm_vm_stats_release(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = file->private_data;
+
+ kvm_put_kvm(kvm);
+ return 0;
+}
+
static const struct file_operations kvm_vm_stats_fops = {
+ .owner = THIS_MODULE,
.read = kvm_vm_stats_read,
+ .release = kvm_vm_stats_release,
.llseek = noop_llseek,
};
@@ -4710,18 +5130,27 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
if (fd < 0)
return fd;
- file = anon_inode_getfile("kvm-vm-stats",
- &kvm_vm_stats_fops, kvm, O_RDONLY);
+ file = anon_inode_getfile_fmode("kvm-vm-stats",
+ &kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
- file->f_mode |= FMODE_PREAD;
+
+ kvm_get_kvm(kvm);
fd_install(fd, file);
return fd;
}
+#define SANITY_CHECK_MEM_REGION_FIELD(field) \
+do { \
+ BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
+ offsetof(struct kvm_userspace_memory_region2, field)); \
+ BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
+ sizeof_field(struct kvm_userspace_memory_region2, field)); \
+} while (0)
+
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4744,15 +5173,39 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
break;
}
+ case KVM_SET_USER_MEMORY_REGION2:
case KVM_SET_USER_MEMORY_REGION: {
- struct kvm_userspace_memory_region kvm_userspace_mem;
+ struct kvm_userspace_memory_region2 mem;
+ unsigned long size;
+
+ if (ioctl == KVM_SET_USER_MEMORY_REGION) {
+ /*
+ * Fields beyond struct kvm_userspace_memory_region shouldn't be
+ * accessed, but avoid leaking kernel memory in case of a bug.
+ */
+ memset(&mem, 0, sizeof(mem));
+ size = sizeof(struct kvm_userspace_memory_region);
+ } else {
+ size = sizeof(struct kvm_userspace_memory_region2);
+ }
+
+ /* Ensure the common parts of the two structs are identical. */
+ SANITY_CHECK_MEM_REGION_FIELD(slot);
+ SANITY_CHECK_MEM_REGION_FIELD(flags);
+ SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
+ SANITY_CHECK_MEM_REGION_FIELD(memory_size);
+ SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
r = -EFAULT;
- if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof(kvm_userspace_mem)))
+ if (copy_from_user(&mem, argp, size))
goto out;
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
+ r = -EINVAL;
+ if (ioctl == KVM_SET_USER_MEMORY_REGION &&
+ (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
+ goto out;
+
+ r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
break;
}
case KVM_GET_DIRTY_LOG: {
@@ -4866,9 +5319,8 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
if (routing.nr) {
urouting = argp;
- entries = vmemdup_user(urouting->entries,
- array_size(sizeof(*entries),
- routing.nr));
+ entries = vmemdup_array_user(urouting->entries,
+ routing.nr, sizeof(*entries));
if (IS_ERR(entries)) {
r = PTR_ERR(entries);
goto out;
@@ -4880,6 +5332,18 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ case KVM_SET_MEMORY_ATTRIBUTES: {
+ struct kvm_memory_attributes attrs;
+
+ r = -EFAULT;
+ if (copy_from_user(&attrs, argp, sizeof(attrs)))
+ goto out;
+
+ r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
+ break;
+ }
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
case KVM_CREATE_DEVICE: {
struct kvm_create_device cd;
@@ -4907,6 +5371,18 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_GET_STATS_FD:
r = kvm_vm_ioctl_get_stats_fd(kvm);
break;
+#ifdef CONFIG_KVM_GUEST_MEMFD
+ case KVM_CREATE_GUEST_MEMFD: {
+ struct kvm_create_guest_memfd guest_memfd;
+
+ r = -EFAULT;
+ if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
+ goto out;
+
+ r = kvm_gmem_create(kvm, &guest_memfd);
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -4994,7 +5470,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
}
#endif
-static const struct file_operations kvm_vm_fops = {
+static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.llseek = noop_llseek,
@@ -5005,7 +5481,7 @@ bool file_is_kvm(struct file *file)
{
return file && file->f_op == &kvm_vm_fops;
}
-EXPORT_SYMBOL_GPL(file_is_kvm);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
@@ -5053,7 +5529,7 @@ put_fd:
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
- long r = -EINVAL;
+ int r = -EINVAL;
switch (ioctl) {
case KVM_GET_API_VERSION:
@@ -5078,11 +5554,6 @@ static long kvm_dev_ioctl(struct file *filp,
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
- case KVM_TRACE_ENABLE:
- case KVM_TRACE_PAUSE:
- case KVM_TRACE_DISABLE:
- r = -EOPNOTSUPP;
- break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
}
@@ -5102,110 +5573,212 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-static void hardware_enable_nolock(void *junk)
-{
- int cpu = raw_smp_processor_id();
- int r;
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+bool enable_virt_at_load = true;
+module_param(enable_virt_at_load, bool, 0444);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
- if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
- return;
+__visible bool kvm_rebooting;
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
- cpumask_set_cpu(cpu, cpus_hardware_enabled);
+static DEFINE_PER_CPU(bool, virtualization_enabled);
+static DEFINE_MUTEX(kvm_usage_lock);
+static int kvm_usage_count;
- r = kvm_arch_hardware_enable();
+__weak void kvm_arch_enable_virtualization(void)
+{
- if (r) {
- cpumask_clear_cpu(cpu, cpus_hardware_enabled);
- atomic_inc(&hardware_enable_failed);
- pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
- }
}
-static int kvm_starting_cpu(unsigned int cpu)
+__weak void kvm_arch_disable_virtualization(void)
{
- raw_spin_lock(&kvm_count_lock);
- if (kvm_usage_count)
- hardware_enable_nolock(NULL);
- raw_spin_unlock(&kvm_count_lock);
+
+}
+
+static int kvm_enable_virtualization_cpu(void)
+{
+ if (__this_cpu_read(virtualization_enabled))
+ return 0;
+
+ if (kvm_arch_enable_virtualization_cpu()) {
+ pr_info("kvm: enabling virtualization on CPU%d failed\n",
+ raw_smp_processor_id());
+ return -EIO;
+ }
+
+ __this_cpu_write(virtualization_enabled, true);
return 0;
}
-static void hardware_disable_nolock(void *junk)
+static int kvm_online_cpu(unsigned int cpu)
{
- int cpu = raw_smp_processor_id();
+ /*
+ * Abort the CPU online process if hardware virtualization cannot
+ * be enabled. Otherwise running VMs would encounter unrecoverable
+ * errors when scheduled to this CPU.
+ */
+ return kvm_enable_virtualization_cpu();
+}
- if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
+static void kvm_disable_virtualization_cpu(void *ign)
+{
+ if (!__this_cpu_read(virtualization_enabled))
return;
- cpumask_clear_cpu(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_disable();
+
+ kvm_arch_disable_virtualization_cpu();
+
+ __this_cpu_write(virtualization_enabled, false);
}
-static int kvm_dying_cpu(unsigned int cpu)
+static int kvm_offline_cpu(unsigned int cpu)
{
- raw_spin_lock(&kvm_count_lock);
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
- raw_spin_unlock(&kvm_count_lock);
+ kvm_disable_virtualization_cpu(NULL);
return 0;
}
-static void hardware_disable_all_nolock(void)
+static void kvm_shutdown(void *data)
{
- BUG_ON(!kvm_usage_count);
+ /*
+ * Disable hardware virtualization and set kvm_rebooting to indicate
+ * that KVM has asynchronously disabled hardware virtualization, i.e.
+ * that relevant errors and exceptions aren't entirely unexpected.
+ * Some flavors of hardware virtualization need to be disabled before
+ * transferring control to firmware (to perform shutdown/reboot), e.g.
+ * on x86, virtualization can block INIT interrupts, which are used by
+ * firmware to pull APs back under firmware control. Note, this path
+ * is used for both shutdown and reboot scenarios, i.e. neither name is
+ * 100% comprehensive.
+ */
+ pr_info("kvm: exiting hardware virtualization\n");
+ kvm_rebooting = true;
+ on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
+}
- kvm_usage_count--;
- if (!kvm_usage_count)
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+static int kvm_suspend(void *data)
+{
+ /*
+ * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
+ * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
+ * count is stable. Assert that kvm_usage_lock is not held to ensure
+ * the system isn't suspended while KVM is enabling hardware. Hardware
+ * enabling can be preempted, but the task cannot be frozen until it has
+ * dropped all locks (userspace tasks are frozen via a fake signal).
+ */
+ lockdep_assert_not_held(&kvm_usage_lock);
+ lockdep_assert_irqs_disabled();
+
+ kvm_disable_virtualization_cpu(NULL);
+ return 0;
}
-static void hardware_disable_all(void)
+static void kvm_resume(void *data)
{
- raw_spin_lock(&kvm_count_lock);
- hardware_disable_all_nolock();
- raw_spin_unlock(&kvm_count_lock);
+ lockdep_assert_not_held(&kvm_usage_lock);
+ lockdep_assert_irqs_disabled();
+
+ WARN_ON_ONCE(kvm_enable_virtualization_cpu());
}
-static int hardware_enable_all(void)
+static const struct syscore_ops kvm_syscore_ops = {
+ .suspend = kvm_suspend,
+ .resume = kvm_resume,
+ .shutdown = kvm_shutdown,
+};
+
+static struct syscore kvm_syscore = {
+ .ops = &kvm_syscore_ops,
+};
+
+int kvm_enable_virtualization(void)
{
- int r = 0;
+ int r;
- raw_spin_lock(&kvm_count_lock);
+ guard(mutex)(&kvm_usage_lock);
- kvm_usage_count++;
- if (kvm_usage_count == 1) {
- atomic_set(&hardware_enable_failed, 0);
- on_each_cpu(hardware_enable_nolock, NULL, 1);
+ if (kvm_usage_count++)
+ return 0;
- if (atomic_read(&hardware_enable_failed)) {
- hardware_disable_all_nolock();
- r = -EBUSY;
- }
+ kvm_arch_enable_virtualization();
+
+ r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
+ kvm_online_cpu, kvm_offline_cpu);
+ if (r)
+ goto err_cpuhp;
+
+ register_syscore(&kvm_syscore);
+
+ /*
+ * Undo virtualization enabling and bail if the system is going down.
+ * If userspace initiated a forced reboot, e.g. reboot -f, then it's
+ * possible for an in-flight operation to enable virtualization after
+ * syscore_shutdown() is called, i.e. without kvm_shutdown() being
+ * invoked. Note, this relies on system_state being set _before_
+ * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
+ * or this CPU observes the impending shutdown. Which is why KVM uses
+ * a syscore ops hook instead of registering a dedicated reboot
+ * notifier (the latter runs before system_state is updated).
+ */
+ if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
+ system_state == SYSTEM_RESTART) {
+ r = -EBUSY;
+ goto err_rebooting;
}
- raw_spin_unlock(&kvm_count_lock);
+ return 0;
+err_rebooting:
+ unregister_syscore(&kvm_syscore);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+err_cpuhp:
+ kvm_arch_disable_virtualization();
+ --kvm_usage_count;
return r;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
-static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
- void *v)
+void kvm_disable_virtualization(void)
{
- /*
- * Some (well, at least mine) BIOSes hang on reboot if
- * in vmx root mode.
- *
- * And Intel TXT required VMX off for all cpu when system shutdown.
- */
- pr_info("kvm: exiting hardware virtualization\n");
- kvm_rebooting = true;
- on_each_cpu(hardware_disable_nolock, NULL, 1);
- return NOTIFY_OK;
+ guard(mutex)(&kvm_usage_lock);
+
+ if (--kvm_usage_count)
+ return;
+
+ unregister_syscore(&kvm_syscore);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+ kvm_arch_disable_virtualization();
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
-static struct notifier_block kvm_reboot_notifier = {
- .notifier_call = kvm_reboot,
- .priority = 0,
-};
+static int kvm_init_virtualization(void)
+{
+ if (enable_virt_at_load)
+ return kvm_enable_virtualization();
+
+ return 0;
+}
+
+static void kvm_uninit_virtualization(void)
+{
+ if (enable_virt_at_load)
+ kvm_disable_virtualization();
+}
+#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
+static int kvm_init_virtualization(void)
+{
+ return 0;
+}
+
+static void kvm_uninit_virtualization(void)
+{
+
+}
+#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
+
+static void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+ if (dev->ops->destructor)
+ dev->ops->destructor(dev);
+}
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
@@ -5293,7 +5866,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
return -EOPNOTSUPP;
}
-/* kvm_io_bus_write - called under kvm->slots_lock */
+static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
+{
+ /*
+ * Ensure that any updates to kvm_buses[] observed by the previous vCPU
+ * machine instruction are also visible to the vCPU machine instruction
+ * that triggered this call.
+ */
+ smp_mb__after_srcu_read_lock();
+
+ return srcu_dereference(kvm->buses[idx], &kvm->srcu);
+}
+
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
@@ -5306,15 +5890,14 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_write);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
-/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie)
{
@@ -5326,7 +5909,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
@@ -5364,7 +5947,6 @@ static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
return -EOPNOTSUPP;
}
-/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
@@ -5377,14 +5959,21 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
+
+static void __free_bus(struct rcu_head *rcu)
+{
+ struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
+
+ kfree(bus);
+}
-/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{
@@ -5392,6 +5981,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus;
struct kvm_io_range range;
+ lockdep_assert_held(&kvm->slots_lock);
+
bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return -ENOMEM;
@@ -5421,8 +6012,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
memcpy(new_bus->range + i + 1, bus->range + i,
(bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
- synchronize_srcu_expedited(&kvm->srcu);
- kfree(bus);
+ call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
return 0;
}
@@ -5430,7 +6020,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
- int i, j;
+ int i;
struct kvm_io_bus *new_bus, *bus;
lockdep_assert_held(&kvm->slots_lock);
@@ -5460,18 +6050,19 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
- /* Destroy the old bus _after_ installing the (null) bus. */
+ /*
+ * If NULL bus is installed, destroy the old bus, including all the
+ * attached devices. Otherwise, destroy the caller's device only.
+ */
if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n");
- for (j = 0; j < bus->dev_count; j++) {
- if (j == i)
- continue;
- kvm_iodevice_destructor(bus->range[j].dev);
- }
+ kvm_io_bus_destroy(bus);
+ return -ENOMEM;
}
+ kvm_iodevice_destructor(dev);
kfree(bus);
- return new_bus ? 0 : -ENOMEM;
+ return 0;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -5483,7 +6074,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
srcu_idx = srcu_read_lock(&kvm->srcu);
- bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ bus = kvm_get_bus_srcu(kvm, bus_idx);
if (!bus)
goto out_unlock;
@@ -5498,15 +6089,14 @@ out_unlock:
return iodev;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)
{
int ret;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
/*
* The debugfs files are a reference to the kvm struct which
@@ -5527,8 +6117,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
static int kvm_debugfs_release(struct inode *inode, struct file *file)
{
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
simple_attr_release(inode, file);
kvm_put_kvm(stat_data->kvm);
@@ -5577,7 +6166,7 @@ static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
static int kvm_stat_data_get(void *data, u64 *val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
switch (stat_data->kind) {
case KVM_STAT_VM:
@@ -5596,7 +6185,7 @@ static int kvm_stat_data_get(void *data, u64 *val)
static int kvm_stat_data_clear(void *data, u64 val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
if (val)
return -EINVAL;
@@ -5628,7 +6217,6 @@ static const struct file_operations stat_fops_per_vm = {
.release = kvm_debugfs_release,
.read = simple_attr_read,
.write = simple_attr_write,
- .llseek = no_llseek,
};
static int vm_stat_get(void *_offset, u64 *val)
@@ -5723,7 +6311,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
active = kvm_active_vms;
mutex_unlock(&kvm_lock);
- env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
if (!env)
return;
@@ -5739,7 +6327,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (!IS_ERR(kvm->debugfs_dentry)) {
- char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
+ char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
@@ -5785,26 +6373,6 @@ static void kvm_init_debug(void)
}
}
-static int kvm_suspend(void)
-{
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
- return 0;
-}
-
-static void kvm_resume(void)
-{
- if (kvm_usage_count) {
- lockdep_assert_not_held(&kvm_count_lock);
- hardware_enable_nolock(NULL);
- }
-}
-
-static struct syscore_ops kvm_syscore_ops = {
- .suspend = kvm_suspend,
- .resume = kvm_resume,
-};
-
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
@@ -5819,8 +6387,9 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
WRITE_ONCE(vcpu->ready, false);
__this_cpu_write(kvm_running_vcpu, vcpu);
- kvm_arch_sched_in(vcpu, cpu);
kvm_arch_vcpu_load(vcpu, cpu);
+
+ WRITE_ONCE(vcpu->scheduled_out, false);
}
static void kvm_sched_out(struct preempt_notifier *pn,
@@ -5828,7 +6397,9 @@ static void kvm_sched_out(struct preempt_notifier *pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- if (current->on_rq) {
+ WRITE_ONCE(vcpu->scheduled_out, true);
+
+ if (task_is_runnable(current) && vcpu->wants_to_run) {
WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
@@ -5855,7 +6426,7 @@ struct kvm_vcpu *kvm_get_running_vcpu(void)
return vcpu;
}
-EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
/**
* kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
@@ -5909,63 +6480,11 @@ void kvm_unregister_perf_callbacks(void)
}
#endif
-struct kvm_cpu_compat_check {
- void *opaque;
- int *ret;
-};
-
-static void check_processor_compat(void *data)
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
{
- struct kvm_cpu_compat_check *c = data;
-
- *c->ret = kvm_arch_check_processor_compat(c->opaque);
-}
-
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module)
-{
- struct kvm_cpu_compat_check c;
int r;
int cpu;
- r = kvm_arch_init(opaque);
- if (r)
- goto out_fail;
-
- /*
- * kvm_arch_init makes sure there's at most one caller
- * for architectures that support multiple implementations,
- * like intel and amd on x86.
- * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
- * conflicts in case kvm is already setup for another implementation.
- */
- r = kvm_irqfd_init();
- if (r)
- goto out_irqfd;
-
- if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
- r = -ENOMEM;
- goto out_free_0;
- }
-
- r = kvm_arch_hardware_setup(opaque);
- if (r < 0)
- goto out_free_1;
-
- c.ret = &r;
- c.opaque = opaque;
- for_each_online_cpu(cpu) {
- smp_call_function_single(cpu, check_processor_compat, &c, 1);
- if (r < 0)
- goto out_free_2;
- }
-
- r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
- kvm_starting_cpu, kvm_dying_cpu);
- if (r)
- goto out_free_2;
- register_reboot_notifier(&kvm_reboot_notifier);
-
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
@@ -5976,32 +6495,29 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
offsetofend(struct kvm_vcpu, stats_id)
- offsetof(struct kvm_vcpu, arch),
NULL);
- if (!kvm_vcpu_cache) {
- r = -ENOMEM;
- goto out_free_3;
- }
+ if (!kvm_vcpu_cache)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu))) {
r = -ENOMEM;
- goto out_free_4;
+ goto err_cpu_kick_mask;
}
}
+ r = kvm_irqfd_init();
+ if (r)
+ goto err_irqfd;
+
r = kvm_async_pf_init();
if (r)
- goto out_free_4;
+ goto err_async_pf;
kvm_chardev_ops.owner = module;
-
- r = misc_register(&kvm_dev);
- if (r) {
- pr_err("kvm: misc device register failed\n");
- goto out_unreg;
- }
-
- register_syscore_ops(&kvm_syscore_ops);
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
+ kvm_device_fops.owner = module;
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
@@ -6009,153 +6525,68 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
kvm_init_debug();
r = kvm_vfio_ops_init();
- WARN_ON(r);
+ if (WARN_ON_ONCE(r))
+ goto err_vfio;
+
+ r = kvm_gmem_init(module);
+ if (r)
+ goto err_gmem;
+
+ r = kvm_init_virtualization();
+ if (r)
+ goto err_virt;
+
+ /*
+ * Registration _must_ be the very last thing done, as this exposes
+ * /dev/kvm to userspace, i.e. all infrastructure must be setup!
+ */
+ r = misc_register(&kvm_dev);
+ if (r) {
+ pr_err("kvm: misc device register failed\n");
+ goto err_register;
+ }
return 0;
-out_unreg:
+err_register:
+ kvm_uninit_virtualization();
+err_virt:
+ kvm_gmem_exit();
+err_gmem:
+ kvm_vfio_ops_exit();
+err_vfio:
kvm_async_pf_deinit();
-out_free_4:
+err_async_pf:
+ kvm_irqfd_exit();
+err_irqfd:
+err_cpu_kick_mask:
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
-out_free_3:
- unregister_reboot_notifier(&kvm_reboot_notifier);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
-out_free_2:
- kvm_arch_hardware_unsetup();
-out_free_1:
- free_cpumask_var(cpus_hardware_enabled);
-out_free_0:
- kvm_irqfd_exit();
-out_irqfd:
- kvm_arch_exit();
-out_fail:
return r;
}
-EXPORT_SYMBOL_GPL(kvm_init);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
void kvm_exit(void)
{
int cpu;
- debugfs_remove_recursive(kvm_debugfs_dir);
+ /*
+ * Note, unregistering /dev/kvm doesn't strictly need to come first,
+ * fops_get(), a.k.a. try_module_get(), prevents acquiring references
+ * to KVM while the module is being stopped.
+ */
misc_deregister(&kvm_dev);
+
+ kvm_uninit_virtualization();
+
+ debugfs_remove_recursive(kvm_debugfs_dir);
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
+ kvm_gmem_exit();
+ kvm_vfio_ops_exit();
kvm_async_pf_deinit();
- unregister_syscore_ops(&kvm_syscore_ops);
- unregister_reboot_notifier(&kvm_reboot_notifier);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
- on_each_cpu(hardware_disable_nolock, NULL, 1);
- kvm_arch_hardware_unsetup();
- kvm_arch_exit();
kvm_irqfd_exit();
- free_cpumask_var(cpus_hardware_enabled);
- kvm_vfio_ops_exit();
-}
-EXPORT_SYMBOL_GPL(kvm_exit);
-
-struct kvm_vm_worker_thread_context {
- struct kvm *kvm;
- struct task_struct *parent;
- struct completion init_done;
- kvm_vm_thread_fn_t thread_fn;
- uintptr_t data;
- int err;
-};
-
-static int kvm_vm_worker_thread(void *context)
-{
- /*
- * The init_context is allocated on the stack of the parent thread, so
- * we have to locally copy anything that is needed beyond initialization
- */
- struct kvm_vm_worker_thread_context *init_context = context;
- struct task_struct *parent;
- struct kvm *kvm = init_context->kvm;
- kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
- uintptr_t data = init_context->data;
- int err;
-
- err = kthread_park(current);
- /* kthread_park(current) is never supposed to return an error */
- WARN_ON(err != 0);
- if (err)
- goto init_complete;
-
- err = cgroup_attach_task_all(init_context->parent, current);
- if (err) {
- kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
- __func__, err);
- goto init_complete;
- }
-
- set_user_nice(current, task_nice(init_context->parent));
-
-init_complete:
- init_context->err = err;
- complete(&init_context->init_done);
- init_context = NULL;
-
- if (err)
- goto out;
-
- /* Wait to be woken up by the spawner before proceeding. */
- kthread_parkme();
-
- if (!kthread_should_stop())
- err = thread_fn(kvm, data);
-
-out:
- /*
- * Move kthread back to its original cgroup to prevent it lingering in
- * the cgroup of the VM process, after the latter finishes its
- * execution.
- *
- * kthread_stop() waits on the 'exited' completion condition which is
- * set in exit_mm(), via mm_release(), in do_exit(). However, the
- * kthread is removed from the cgroup in the cgroup_exit() which is
- * called after the exit_mm(). This causes the kthread_stop() to return
- * before the kthread actually quits the cgroup.
- */
- rcu_read_lock();
- parent = rcu_dereference(current->real_parent);
- get_task_struct(parent);
- rcu_read_unlock();
- cgroup_attach_task_all(parent, current);
- put_task_struct(parent);
-
- return err;
-}
-
-int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
- uintptr_t data, const char *name,
- struct task_struct **thread_ptr)
-{
- struct kvm_vm_worker_thread_context init_context = {};
- struct task_struct *thread;
-
- *thread_ptr = NULL;
- init_context.kvm = kvm;
- init_context.parent = current;
- init_context.thread_fn = thread_fn;
- init_context.data = data;
- init_completion(&init_context.init_done);
-
- thread = kthread_run(kvm_vm_worker_thread, &init_context,
- "%s-%d", name, task_pid_nr(current));
- if (IS_ERR(thread))
- return PTR_ERR(thread);
-
- /* kthread_run is never supposed to return NULL */
- WARN_ON(thread == NULL);
-
- wait_for_completion(&init_context.init_done);
-
- if (!init_context.err)
- *thread_ptr = thread;
-
- return init_context.err;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index 180f1a09e6ba..9fcc5d5b7f8d 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -20,21 +20,78 @@
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
#endif /* KVM_HAVE_MMU_RWLOCK */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
- bool *async, bool write_fault, bool *writable);
+
+struct kvm_follow_pfn {
+ const struct kvm_memory_slot *slot;
+ const gfn_t gfn;
+
+ unsigned long hva;
+
+ /* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */
+ unsigned int flags;
+
+ /*
+ * Pin the page (effectively FOLL_PIN, which is an mm/ internal flag).
+ * The page *must* be pinned if KVM will write to the page via a kernel
+ * mapping, e.g. via kmap(), mremap(), etc.
+ */
+ bool pin;
+
+ /*
+ * If non-NULL, try to get a writable mapping even for a read fault.
+ * Set to true if a writable mapping was obtained.
+ */
+ bool *map_writable;
+
+ /*
+ * Optional output. Set to a valid "struct page" if the returned pfn
+ * is for a refcounted or pinned struct page, NULL if the returned pfn
+ * has no struct page or if the struct page is not being refcounted
+ * (e.g. tail pages of non-compound higher order allocations from
+ * IO/PFNMAP mappings).
+ */
+ struct page **refcounted_page;
+};
+
+kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
#ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block);
+ unsigned long end);
#else
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block)
+ unsigned long end)
{
}
#endif /* HAVE_KVM_PFNCACHE */
+#ifdef CONFIG_KVM_GUEST_MEMFD
+int kvm_gmem_init(struct module *module);
+void kvm_gmem_exit(void);
+int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
+int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned int fd, loff_t offset);
+void kvm_gmem_unbind(struct kvm_memory_slot *slot);
+#else
+static inline int kvm_gmem_init(struct module *module)
+{
+ return 0;
+}
+static inline void kvm_gmem_exit(void) {};
+static inline int kvm_gmem_bind(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned int fd, loff_t offset)
+{
+ WARN_ON_ONCE(1);
+ return -EIO;
+}
+
+static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
+{
+ WARN_ON_ONCE(1);
+}
+#endif /* CONFIG_KVM_GUEST_MEMFD */
+
#endif /* __KVM_MM_H__ */
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 2d6aba677830..728d2c1b488a 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -23,57 +23,51 @@
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
- unsigned long end, bool may_block)
+ unsigned long end)
{
- DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
- bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
- write_lock_irq(&gpc->lock);
+ read_lock_irq(&gpc->lock);
/* Only a single page so no need to care about length */
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
- gpc->valid = false;
+ read_unlock_irq(&gpc->lock);
/*
- * If a guest vCPU could be using the physical address,
- * it needs to be forced out of guest mode.
+ * There is a small window here where the cache could
+ * be modified, and invalidation would no longer be
+ * necessary. Hence check again whether invalidation
+ * is still necessary once the write lock has been
+ * acquired.
*/
- if (gpc->usage & KVM_GUEST_USES_PFN) {
- if (!evict_vcpus) {
- evict_vcpus = true;
- bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
- }
- __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
- }
+
+ write_lock_irq(&gpc->lock);
+ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+ gpc->uhva >= start && gpc->uhva < end)
+ gpc->valid = false;
+ write_unlock_irq(&gpc->lock);
+ continue;
}
- write_unlock_irq(&gpc->lock);
+
+ read_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
+}
- if (evict_vcpus) {
- /*
- * KVM needs to ensure the vCPU is fully out of guest context
- * before allowing the invalidation to continue.
- */
- unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
- bool called;
-
- /*
- * If the OOM reaper is active, then all vCPUs should have
- * been stopped already, so perform the request without
- * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
- */
- if (!may_block)
- req &= ~KVM_REQUEST_WAIT;
-
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
+static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
+ unsigned long len)
+{
+ unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
+ offset_in_page(gpa);
- WARN_ON_ONCE(called && !may_block);
- }
+ /*
+ * The cached access must fit within a single page. The 'len' argument
+ * to activate() and refresh() exists only to enforce that.
+ */
+ return offset + len <= PAGE_SIZE;
}
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
@@ -83,10 +77,17 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (!gpc->active)
return false;
- if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
+ /*
+ * If the page was cached from a memslot, make sure the memslots have
+ * not been re-configured.
+ */
+ if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
+ return false;
+
+ if (kvm_is_error_hva(gpc->uhva))
return false;
- if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
return false;
if (!gpc->valid)
@@ -94,19 +95,33 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
return true;
}
-EXPORT_SYMBOL_GPL(kvm_gpc_check);
-static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
+static void *gpc_map(kvm_pfn_t pfn)
{
- /* Unmap the old pfn/page if it was mapped before. */
- if (!is_error_noslot_pfn(pfn) && khva) {
- if (pfn_valid(pfn))
- kunmap(pfn_to_page(pfn));
+ if (pfn_valid(pfn))
+ return kmap(pfn_to_page(pfn));
+
#ifdef CONFIG_HAS_IOMEM
- else
- memunmap(khva);
+ return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+#else
+ return NULL;
#endif
+}
+
+static void gpc_unmap(kvm_pfn_t pfn, void *khva)
+{
+ /* Unmap the old pfn/page if it was mapped before. */
+ if (is_error_noslot_pfn(pfn) || !khva)
+ return;
+
+ if (pfn_valid(pfn)) {
+ kunmap(pfn_to_page(pfn));
+ return;
}
+
+#ifdef CONFIG_HAS_IOMEM
+ memunmap(khva);
+#endif
}
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
@@ -140,10 +155,19 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
- void *old_khva = gpc->khva - offset_in_page(gpc->khva);
+ void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
void *new_khva = NULL;
unsigned long mmu_seq;
+ struct page *page;
+
+ struct kvm_follow_pfn kfp = {
+ .slot = gpc->memslot,
+ .gfn = gpa_to_gfn(gpc->gpa),
+ .flags = FOLL_WRITE,
+ .hva = gpc->uhva,
+ .refcounted_page = &page,
+ };
lockdep_assert_held(&gpc->refresh_lock);
@@ -175,15 +199,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* the existing mapping and didn't create a new one.
*/
if (new_khva != old_khva)
- gpc_unmap_khva(new_pfn, new_khva);
+ gpc_unmap(new_pfn, new_khva);
- kvm_release_pfn_clean(new_pfn);
+ kvm_release_page_unused(page);
cond_resched();
}
- /* We always request a writeable mapping */
- new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
+ new_pfn = hva_to_pfn(&kfp);
if (is_error_noslot_pfn(new_pfn))
goto out_error;
@@ -192,20 +215,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
- if (gpc->usage & KVM_HOST_USES_PFN) {
- if (new_pfn == gpc->pfn) {
- new_khva = old_khva;
- } else if (pfn_valid(new_pfn)) {
- new_khva = kmap(pfn_to_page(new_pfn));
-#ifdef CONFIG_HAS_IOMEM
- } else {
- new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
-#endif
- }
- if (!new_khva) {
- kvm_release_pfn_clean(new_pfn);
- goto out_error;
- }
+ if (new_pfn == gpc->pfn)
+ new_khva = old_khva;
+ else
+ new_khva = gpc_map(new_pfn);
+
+ if (!new_khva) {
+ kvm_release_page_unused(page);
+ goto out_error;
}
write_lock_irq(&gpc->lock);
@@ -219,14 +236,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
gpc->valid = true;
gpc->pfn = new_pfn;
- gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
+ gpc->khva = new_khva + offset_in_page(gpc->uhva);
/*
- * Put the reference to the _new_ pfn. The pfn is now tracked by the
+ * Put the reference to the _new_ page. The page is now tracked by the
* cache and can be safely migrated, swapped, etc... as the cache will
* invalidate any mappings in response to relevant mmu_notifier events.
*/
- kvm_release_pfn_clean(new_pfn);
+ kvm_release_page_clean(page);
return 0;
@@ -236,30 +253,21 @@ out_error:
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
- unsigned long len)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
{
- struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
- unsigned long page_offset = gpa & ~PAGE_MASK;
+ unsigned long page_offset;
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
+ bool hva_change = false;
void *old_khva;
int ret;
- /*
- * If must fit within a single page. The 'len' argument is
- * only to enforce that.
- */
- if (page_offset + len > PAGE_SIZE)
+ /* Either gpa or uhva must be valid, but not both */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
return -EINVAL;
- /*
- * If another task is refreshing the cache, wait for it to complete.
- * There is no guarantee that concurrent refreshes will see the same
- * gpa, memslots generation, etc..., so they must be fully serialized.
- */
- mutex_lock(&gpc->refresh_lock);
+ lockdep_assert_held(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
@@ -269,30 +277,56 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
}
old_pfn = gpc->pfn;
- old_khva = gpc->khva - offset_in_page(gpc->khva);
- old_uhva = gpc->uhva;
-
- /* If the userspace HVA is invalid, refresh that first */
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
- kvm_is_error_hva(gpc->uhva)) {
- gfn_t gfn = gpa_to_gfn(gpa);
-
- gpc->gpa = gpa;
- gpc->generation = slots->generation;
- gpc->memslot = __gfn_to_memslot(slots, gfn);
- gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
-
- if (kvm_is_error_hva(gpc->uhva)) {
- ret = -EFAULT;
- goto out;
+ old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
+ old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
+
+ if (kvm_is_error_gpa(gpa)) {
+ page_offset = offset_in_page(uhva);
+
+ gpc->gpa = INVALID_GPA;
+ gpc->memslot = NULL;
+ gpc->uhva = PAGE_ALIGN_DOWN(uhva);
+
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else {
+ struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
+
+ page_offset = offset_in_page(gpa);
+
+ if (gpc->gpa != gpa || gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva)) {
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ gpc->gpa = gpa;
+ gpc->generation = slots->generation;
+ gpc->memslot = __gfn_to_memslot(slots, gfn);
+ gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
+
+ if (kvm_is_error_hva(gpc->uhva)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * Even if the GPA and/or the memslot generation changed, the
+ * HVA may still be the same.
+ */
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else {
+ gpc->uhva = old_uhva;
}
}
+ /* Note: the offset must be correct before calling hva_to_pfn_retry() */
+ gpc->uhva += page_offset;
+
/*
* If the userspace HVA changed or the PFN was already invalid,
* drop the lock and do the HVA to PFN lookup again.
*/
- if (!gpc->valid || old_uhva != gpc->uhva) {
+ if (!gpc->valid || hva_change) {
ret = hva_to_pfn_retry(gpc);
} else {
/*
@@ -323,41 +357,53 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
out_unlock:
write_unlock_irq(&gpc->lock);
- mutex_unlock(&gpc->refresh_lock);
-
if (unmap_old)
- gpc_unmap_khva(old_pfn, old_khva);
+ gpc_unmap(old_pfn, old_khva);
return ret;
}
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
- return __kvm_gpc_refresh(gpc, gpc->gpa, len);
+ unsigned long uhva;
+
+ guard(mutex)(&gpc->refresh_lock);
+
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
+ return -EINVAL;
+
+ /*
+ * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
+ * or HVA-based, not both. For GPA-based caches, the HVA will be
+ * recomputed during refresh if necessary.
+ */
+ uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
+
+ return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
}
-EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
{
- WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
- WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
-
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
- gpc->vcpu = vcpu;
- gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
+ gpc->gpa = INVALID_GPA;
gpc->uhva = KVM_HVA_ERR_BAD;
+ gpc->active = gpc->valid = false;
}
-EXPORT_SYMBOL_GPL(kvm_gpc_init);
-int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
+ unsigned long len)
{
struct kvm *kvm = gpc->kvm;
+ if (!kvm_gpc_is_valid_len(gpa, uhva, len))
+ return -EINVAL;
+
+ guard(mutex)(&gpc->refresh_lock);
+
if (!gpc->active) {
if (KVM_BUG_ON(gpc->valid, kvm))
return -EIO;
@@ -375,9 +421,28 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva);
+}
+
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+{
+ /*
+ * Explicitly disallow INVALID_GPA so that the magic value can be used
+ * by KVM to differentiate between GPA-based and HVA-based caches.
+ */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
+ return -EINVAL;
+
+ return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
+}
+
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
+{
+ if (!access_ok((void __user *)uhva, len))
+ return -EINVAL;
+
+ return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
}
-EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
@@ -385,6 +450,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
kvm_pfn_t old_pfn;
void *old_khva;
+ guard(mutex)(&gpc->refresh_lock);
+
if (gpc->active) {
/*
* Deactivate the cache before removing it from the list, KVM
@@ -412,7 +479,6 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
- gpc_unmap_khva(old_pfn, old_khva);
+ gpc_unmap(old_pfn, old_khva);
}
}
-EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 9584eb57e0ed..be50514bbd11 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -21,7 +21,7 @@
#include <asm/kvm_ppc.h>
#endif
-struct kvm_vfio_group {
+struct kvm_vfio_file {
struct list_head node;
struct file *file;
#ifdef CONFIG_SPAPR_TCE_IOMMU
@@ -30,7 +30,7 @@ struct kvm_vfio_group {
};
struct kvm_vfio {
- struct list_head group_list;
+ struct list_head file_list;
struct mutex lock;
bool noncoherent;
};
@@ -64,18 +64,18 @@ static bool kvm_vfio_file_enforced_coherent(struct file *file)
return ret;
}
-static bool kvm_vfio_file_is_group(struct file *file)
+static bool kvm_vfio_file_is_valid(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
- fn = symbol_get(vfio_file_is_group);
+ fn = symbol_get(vfio_file_is_valid);
if (!fn)
return false;
ret = fn(file);
- symbol_put(vfio_file_is_group);
+ symbol_put(vfio_file_is_valid);
return ret;
}
@@ -98,34 +98,33 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
}
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
- struct kvm_vfio_group *kvg)
+ struct kvm_vfio_file *kvf)
{
- if (WARN_ON_ONCE(!kvg->iommu_group))
+ if (WARN_ON_ONCE(!kvf->iommu_group))
return;
- kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
- iommu_group_put(kvg->iommu_group);
- kvg->iommu_group = NULL;
+ kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
+ iommu_group_put(kvf->iommu_group);
+ kvf->iommu_group = NULL;
}
#endif
/*
- * Groups can use the same or different IOMMU domains. If the same then
- * adding a new group may change the coherency of groups we've previously
- * been told about. We don't want to care about any of that so we retest
- * each group and bail as soon as we find one that's noncoherent. This
- * means we only ever [un]register_noncoherent_dma once for the whole device.
+ * Groups/devices can use the same or different IOMMU domains. If the same
+ * then adding a new group/device may change the coherency of groups/devices
+ * we've previously been told about. We don't want to care about any of
+ * that so we retest each group/device and bail as soon as we find one that's
+ * noncoherent. This means we only ever [un]register_noncoherent_dma once
+ * for the whole device.
*/
static void kvm_vfio_update_coherency(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
bool noncoherent = false;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
- mutex_lock(&kv->lock);
-
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
noncoherent = true;
break;
}
@@ -139,165 +138,151 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
else
kvm_arch_unregister_noncoherent_dma(dev->kvm);
}
-
- mutex_unlock(&kv->lock);
}
-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
struct file *filp;
- int ret;
+ int ret = 0;
filp = fget(fd);
if (!filp)
return -EBADF;
- /* Ensure the FD is a vfio group FD.*/
- if (!kvm_vfio_file_is_group(filp)) {
+ /* Ensure the FD is a vfio FD. */
+ if (!kvm_vfio_file_is_valid(filp)) {
ret = -EINVAL;
- goto err_fput;
+ goto out_fput;
}
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file == filp) {
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file == filp) {
ret = -EEXIST;
- goto err_unlock;
+ goto out_unlock;
}
}
- kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
- if (!kvg) {
+ kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
+ if (!kvf) {
ret = -ENOMEM;
- goto err_unlock;
+ goto out_unlock;
}
- kvg->file = filp;
- list_add_tail(&kvg->node, &kv->group_list);
-
- kvm_arch_start_assignment(dev->kvm);
-
- mutex_unlock(&kv->lock);
+ kvf->file = get_file(filp);
+ list_add_tail(&kvf->node, &kv->file_list);
- kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+ kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
- return 0;
-err_unlock:
+out_unlock:
mutex_unlock(&kv->lock);
-err_fput:
+out_fput:
fput(filp);
return ret;
}
-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
- struct fd f;
+ struct kvm_vfio_file *kvf;
+ CLASS(fd, f)(fd);
int ret;
- f = fdget(fd);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file != f.file)
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file != fd_file(f))
continue;
- list_del(&kvg->node);
- kvm_arch_end_assignment(dev->kvm);
+ list_del(&kvf->node);
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvg->file, NULL);
- fput(kvg->file);
- kfree(kvg);
+ kvm_vfio_file_set_kvm(kvf->file, NULL);
+ fput(kvf->file);
+ kfree(kvf);
ret = 0;
break;
}
- mutex_unlock(&kv->lock);
-
- fdput(f);
-
kvm_vfio_update_coherency(dev);
+ mutex_unlock(&kv->lock);
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
-static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
- void __user *arg)
+static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
+ void __user *arg)
{
struct kvm_vfio_spapr_tce param;
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
- struct fd f;
+ struct kvm_vfio_file *kvf;
int ret;
if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
return -EFAULT;
- f = fdget(param.groupfd);
- if (!f.file)
+ CLASS(fd, f)(param.groupfd);
+ if (fd_empty(f))
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file != f.file)
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file != fd_file(f))
continue;
- if (!kvg->iommu_group) {
- kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
- if (WARN_ON_ONCE(!kvg->iommu_group)) {
+ if (!kvf->iommu_group) {
+ kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
+ if (WARN_ON_ONCE(!kvf->iommu_group)) {
ret = -EIO;
goto err_fdput;
}
}
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
- kvg->iommu_group);
+ kvf->iommu_group);
break;
}
err_fdput:
mutex_unlock(&kv->lock);
- fdput(f);
return ret;
}
#endif
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
- void __user *arg)
+static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
+ void __user *arg)
{
int32_t __user *argp = arg;
int32_t fd;
switch (attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
+ case KVM_DEV_VFIO_FILE_ADD:
if (get_user(fd, argp))
return -EFAULT;
- return kvm_vfio_group_add(dev, fd);
+ return kvm_vfio_file_add(dev, fd);
- case KVM_DEV_VFIO_GROUP_DEL:
+ case KVM_DEV_VFIO_FILE_DEL:
if (get_user(fd, argp))
return -EFAULT;
- return kvm_vfio_group_del(dev, fd);
+ return kvm_vfio_file_del(dev, fd);
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
- return kvm_vfio_group_set_spapr_tce(dev, arg);
+ return kvm_vfio_file_set_spapr_tce(dev, arg);
#endif
}
@@ -308,9 +293,9 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
- case KVM_DEV_VFIO_GROUP:
- return kvm_vfio_set_group(dev, attr->attr,
- u64_to_user_ptr(attr->addr));
+ case KVM_DEV_VFIO_FILE:
+ return kvm_vfio_set_file(dev, attr->attr,
+ u64_to_user_ptr(attr->addr));
}
return -ENXIO;
@@ -320,10 +305,10 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
- case KVM_DEV_VFIO_GROUP:
+ case KVM_DEV_VFIO_FILE:
switch (attr->attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
- case KVM_DEV_VFIO_GROUP_DEL:
+ case KVM_DEV_VFIO_FILE_ADD:
+ case KVM_DEV_VFIO_FILE_DEL:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
#endif
@@ -339,17 +324,16 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg, *tmp;
+ struct kvm_vfio_file *kvf, *tmp;
- list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+ list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvg->file, NULL);
- fput(kvg->file);
- list_del(&kvg->node);
- kfree(kvg);
- kvm_arch_end_assignment(dev->kvm);
+ kvm_vfio_file_set_kvm(kvf->file, NULL);
+ fput(kvf->file);
+ list_del(&kvf->node);
+ kfree(kvf);
}
kvm_vfio_update_coherency(dev);
@@ -360,7 +344,7 @@ static void kvm_vfio_release(struct kvm_device *dev)
static int kvm_vfio_create(struct kvm_device *dev, u32 type);
-static struct kvm_device_ops kvm_vfio_ops = {
+static const struct kvm_device_ops kvm_vfio_ops = {
.name = "kvm-vfio",
.create = kvm_vfio_create,
.release = kvm_vfio_release,
@@ -373,6 +357,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
struct kvm_device *tmp;
struct kvm_vfio *kv;
+ lockdep_assert_held(&dev->kvm->lock);
+
/* Only one VFIO "device" per VM */
list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
if (tmp->ops == &kvm_vfio_ops)
@@ -382,7 +368,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
if (!kv)
return -ENOMEM;
- INIT_LIST_HEAD(&kv->group_list);
+ INIT_LIST_HEAD(&kv->file_list);
mutex_init(&kv->lock);
dev->private = kv;
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 28fda42e471b..62c160200be9 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -22,8 +22,8 @@
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IRQ bypass manager utility module");
-static LIST_HEAD(producers);
-static LIST_HEAD(consumers);
+static DEFINE_XARRAY(producers);
+static DEFINE_XARRAY(consumers);
static DEFINE_MUTEX(lock);
/* @lock must be held when calling connect */
@@ -51,6 +51,10 @@ static int __connect(struct irq_bypass_producer *prod,
if (prod->start)
prod->start(prod);
+ if (!ret) {
+ prod->consumer = cons;
+ cons->producer = prod;
+ }
return ret;
}
@@ -72,56 +76,49 @@ static void __disconnect(struct irq_bypass_producer *prod,
cons->start(cons);
if (prod->start)
prod->start(prod);
+
+ prod->consumer = NULL;
+ cons->producer = NULL;
}
/**
* irq_bypass_register_producer - register IRQ bypass producer
* @producer: pointer to producer structure
+ * @eventfd: pointer to the eventfd context associated with the producer
+ * @irq: Linux IRQ number of the underlying producer device
*
- * Add the provided IRQ producer to the list of producers and connect
- * with any matching token found on the IRQ consumers list.
+ * Add the provided IRQ producer to the set of producers and connect with the
+ * consumer with a matching eventfd, if one exists.
*/
-int irq_bypass_register_producer(struct irq_bypass_producer *producer)
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq)
{
- struct irq_bypass_producer *tmp;
+ unsigned long index = (unsigned long)eventfd;
struct irq_bypass_consumer *consumer;
int ret;
- if (!producer->token)
+ if (WARN_ON_ONCE(producer->eventfd))
return -EINVAL;
- might_sleep();
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
+ producer->irq = irq;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
- list_for_each_entry(tmp, &producers, node) {
- if (tmp->token == producer->token) {
- ret = -EBUSY;
- goto out_err;
- }
- }
+ ret = xa_insert(&producers, index, producer, GFP_KERNEL);
+ if (ret)
+ return ret;
- list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
- ret = __connect(producer, consumer);
- if (ret)
- goto out_err;
- break;
+ consumer = xa_load(&consumers, index);
+ if (consumer) {
+ ret = __connect(producer, consumer);
+ if (ret) {
+ WARN_ON_ONCE(xa_erase(&producers, index) != producer);
+ return ret;
}
}
- list_add(&producer->node, &producers);
-
- mutex_unlock(&lock);
-
+ producer->eventfd = eventfd;
return 0;
-out_err:
- mutex_unlock(&lock);
- module_put(THIS_MODULE);
- return ret;
}
EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
@@ -129,95 +126,65 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
* irq_bypass_unregister_producer - unregister IRQ bypass producer
* @producer: pointer to producer structure
*
- * Remove a previously registered IRQ producer from the list of producers
- * and disconnect it from any connected IRQ consumer.
+ * Remove a previously registered IRQ producer (note, it's safe to call this
+ * even if registration was unsuccessful). Disconnect from the associated
+ * consumer, if one exists.
*/
void irq_bypass_unregister_producer(struct irq_bypass_producer *producer)
{
- struct irq_bypass_producer *tmp;
- struct irq_bypass_consumer *consumer;
+ unsigned long index = (unsigned long)producer->eventfd;
- if (!producer->token)
+ if (!producer->eventfd)
return;
- might_sleep();
-
- if (!try_module_get(THIS_MODULE))
- return; /* nothing in the list anyway */
-
- mutex_lock(&lock);
-
- list_for_each_entry(tmp, &producers, node) {
- if (tmp->token != producer->token)
- continue;
-
- list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
- __disconnect(producer, consumer);
- break;
- }
- }
-
- list_del(&producer->node);
- module_put(THIS_MODULE);
- break;
- }
+ guard(mutex)(&lock);
- mutex_unlock(&lock);
+ if (producer->consumer)
+ __disconnect(producer, producer->consumer);
- module_put(THIS_MODULE);
+ WARN_ON_ONCE(xa_erase(&producers, index) != producer);
+ producer->eventfd = NULL;
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
/**
* irq_bypass_register_consumer - register IRQ bypass consumer
* @consumer: pointer to consumer structure
+ * @eventfd: pointer to the eventfd context associated with the consumer
*
- * Add the provided IRQ consumer to the list of consumers and connect
- * with any matching token found on the IRQ producer list.
+ * Add the provided IRQ consumer to the set of consumers and connect with the
+ * producer with a matching eventfd, if one exists.
*/
-int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd)
{
- struct irq_bypass_consumer *tmp;
+ unsigned long index = (unsigned long)eventfd;
struct irq_bypass_producer *producer;
int ret;
- if (!consumer->token ||
- !consumer->add_producer || !consumer->del_producer)
+ if (WARN_ON_ONCE(consumer->eventfd))
return -EINVAL;
- might_sleep();
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
+ if (!consumer->add_producer || !consumer->del_producer)
+ return -EINVAL;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
- list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token == consumer->token || tmp == consumer) {
- ret = -EBUSY;
- goto out_err;
- }
- }
+ ret = xa_insert(&consumers, index, consumer, GFP_KERNEL);
+ if (ret)
+ return ret;
- list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
- ret = __connect(producer, consumer);
- if (ret)
- goto out_err;
- break;
+ producer = xa_load(&producers, index);
+ if (producer) {
+ ret = __connect(producer, consumer);
+ if (ret) {
+ WARN_ON_ONCE(xa_erase(&consumers, index) != consumer);
+ return ret;
}
}
- list_add(&consumer->node, &consumers);
-
- mutex_unlock(&lock);
-
+ consumer->eventfd = eventfd;
return 0;
-out_err:
- mutex_unlock(&lock);
- module_put(THIS_MODULE);
- return ret;
}
EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);
@@ -225,42 +192,23 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);
* irq_bypass_unregister_consumer - unregister IRQ bypass consumer
* @consumer: pointer to consumer structure
*
- * Remove a previously registered IRQ consumer from the list of consumers
- * and disconnect it from any connected IRQ producer.
+ * Remove a previously registered IRQ consumer (note, it's safe to call this
+ * even if registration was unsuccessful). Disconnect from the associated
+ * producer, if one exists.
*/
void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
{
- struct irq_bypass_consumer *tmp;
- struct irq_bypass_producer *producer;
+ unsigned long index = (unsigned long)consumer->eventfd;
- if (!consumer->token)
+ if (!consumer->eventfd)
return;
- might_sleep();
-
- if (!try_module_get(THIS_MODULE))
- return; /* nothing in the list anyway */
-
- mutex_lock(&lock);
-
- list_for_each_entry(tmp, &consumers, node) {
- if (tmp != consumer)
- continue;
-
- list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
- __disconnect(producer, consumer);
- break;
- }
- }
-
- list_del(&consumer->node);
- module_put(THIS_MODULE);
- break;
- }
+ guard(mutex)(&lock);
- mutex_unlock(&lock);
+ if (consumer->producer)
+ __disconnect(consumer->producer, consumer);
- module_put(THIS_MODULE);
+ WARN_ON_ONCE(xa_erase(&consumers, index) != consumer);
+ consumer->eventfd = NULL;
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);