summaryrefslogtreecommitdiff
path: root/virt/kvm/eventfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/eventfd.c')
-rw-r--r--virt/kvm/eventfd.c295
1 files changed, 181 insertions, 114 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 2ad013b8bde9..0e8b5277be3b 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -28,7 +28,7 @@
#include <kvm/iodev.h>
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
static struct workqueue_struct *irqfd_cleanup_wq;
@@ -55,6 +55,15 @@ irqfd_inject(struct work_struct *work)
irqfd->gsi, 1, false);
}
+static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
+{
+ struct kvm_kernel_irqfd *irqfd;
+
+ list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
+ srcu_read_lock_held(&resampler->kvm->irq_srcu))
+ eventfd_signal(irqfd->resamplefd);
+}
+
/*
* Since resampler irqfds share an IRQ source ID, we de-assert once
* then notify all of the resampler irqfds using this GSI. We can't
@@ -65,7 +74,6 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kernel_irqfd_resampler *resampler;
struct kvm *kvm;
- struct kvm_kernel_irqfd *irqfd;
int idx;
resampler = container_of(kian,
@@ -76,10 +84,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
resampler->notifier.gsi, 0, false);
idx = srcu_read_lock(&kvm->irq_srcu);
-
- list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
- eventfd_signal(irqfd->resamplefd, 1);
-
+ irqfd_resampler_notify(resampler);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -92,14 +97,19 @@ irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
- synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
- list_del(&resampler->link);
+ list_del_rcu(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
+ /*
+ * synchronize_srcu_expedited(&kvm->irq_srcu) already called
+ * in kvm_unregister_irq_ack_notifier().
+ */
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
+ } else {
+ synchronize_srcu_expedited(&kvm->irq_srcu);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
@@ -117,7 +127,7 @@ irqfd_shutdown(struct work_struct *work)
u64 cnt;
/* Make sure irqfd has been initialized in assign path. */
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
@@ -139,7 +149,7 @@ irqfd_shutdown(struct work_struct *work)
/*
* It is now safe to release the object's resources
*/
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
irq_bypass_unregister_consumer(&irqfd->consumer);
#endif
eventfd_ctx_put(irqfd->eventfd);
@@ -194,6 +204,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
int ret = 0;
if (flags & EPOLLIN) {
+ /*
+ * WARNING: Do NOT take irqfds.lock in any path except EPOLLHUP,
+ * as KVM holds irqfds.lock when registering the irqfd with the
+ * eventfd.
+ */
u64 cnt;
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
@@ -215,6 +230,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
/* The eventfd is closing, detach from KVM */
unsigned long iflags;
+ /*
+ * Taking irqfds.lock is safe here, as KVM holds a reference to
+ * the eventfd when registering the irqfd, i.e. this path can't
+ * be reached while kvm_irqfd_add() is running.
+ */
spin_lock_irqsave(&kvm->irqfds.lock, iflags);
/*
@@ -235,22 +255,14 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
return ret;
}
-static void
-irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
- poll_table *pt)
-{
- struct kvm_kernel_irqfd *irqfd =
- container_of(pt, struct kvm_kernel_irqfd, pt);
- add_wait_queue_priority(wqh, &irqfd->wait);
-}
-
-/* Must be called under irqfds.lock */
static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
int n_entries;
+ lockdep_assert_held(&kvm->irqfds.lock);
+
n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc);
@@ -264,7 +276,64 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
write_seqcount_end(&irqfd->irq_entry_sc);
}
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+struct kvm_irqfd_pt {
+ struct kvm_kernel_irqfd *irqfd;
+ struct kvm *kvm;
+ poll_table pt;
+ int ret;
+};
+
+static void kvm_irqfd_register(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct kvm_irqfd_pt *p = container_of(pt, struct kvm_irqfd_pt, pt);
+ struct kvm_kernel_irqfd *irqfd = p->irqfd;
+ struct kvm *kvm = p->kvm;
+
+ /*
+ * Note, irqfds.lock protects the irqfd's irq_entry, i.e. its routing,
+ * and irqfds.items. It does NOT protect registering with the eventfd.
+ */
+ spin_lock_irq(&kvm->irqfds.lock);
+
+ /*
+ * Initialize the routing information prior to adding the irqfd to the
+ * eventfd's waitqueue, as irqfd_wakeup() can be invoked as soon as the
+ * irqfd is registered.
+ */
+ irqfd_update(kvm, irqfd);
+
+ /*
+ * Add the irqfd as a priority waiter on the eventfd, with a custom
+ * wake-up handler, so that KVM *and only KVM* is notified whenever the
+ * underlying eventfd is signaled.
+ */
+ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
+
+ /*
+ * Temporarily lie to lockdep about holding irqfds.lock to avoid a
+ * false positive regarding potential deadlock with irqfd_wakeup()
+ * (see irqfd_wakeup() for details).
+ *
+ * Adding to the wait queue will fail if there is already a priority
+ * waiter, i.e. if the eventfd is associated with another irqfd (in any
+ * VM). Note, kvm_irqfd_deassign() waits for all in-flight shutdown
+ * jobs to complete, i.e. ensures the irqfd has been removed from the
+ * eventfd's waitqueue before returning to userspace.
+ */
+ spin_release(&kvm->irqfds.lock.dep_map, _RET_IP_);
+ p->ret = add_wait_queue_priority_exclusive(wqh, &irqfd->wait);
+ spin_acquire(&kvm->irqfds.lock.dep_map, 0, 0, _RET_IP_);
+ if (p->ret)
+ goto out;
+
+ list_add_tail(&irqfd->list, &kvm->irqfds.items);
+
+out:
+ spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
@@ -275,27 +344,20 @@ void __attribute__((weak)) kvm_arch_irq_bypass_start(
{
}
-int __attribute__((weak)) kvm_arch_update_irqfd_routing(
- struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
+void __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
{
- return 0;
-}
-bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
- struct kvm_kernel_irq_routing_entry *old,
- struct kvm_kernel_irq_routing_entry *new)
-{
- return true;
}
#endif
static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct kvm_kernel_irqfd *irqfd, *tmp;
- struct fd f;
+ struct kvm_kernel_irqfd *irqfd;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ struct kvm_irqfd_pt irqfd_pt;
int ret;
__poll_t events;
int idx;
@@ -317,16 +379,16 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
- f = fdget(args->fd);
- if (!f.file) {
+ CLASS(fd, f)(args->fd);
+ if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
- goto fail;
+ goto out;
}
irqfd->eventfd = eventfd;
@@ -368,76 +430,67 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
resampler->notifier.irq_acked = irqfd_resampler_ack;
INIT_LIST_HEAD(&resampler->link);
- list_add(&resampler->link, &kvm->irqfds.resampler_list);
+ list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
kvm_register_irq_ack_notifier(kvm,
&resampler->notifier);
irqfd->resampler = resampler;
}
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
mutex_unlock(&kvm->irqfds.resampler_lock);
}
/*
- * Install our own custom wake-up handling so we are notified via
- * a callback whenever someone signals the underlying eventfd
+ * Set the irqfd routing and add it to KVM's list before registering
+ * the irqfd with the eventfd, so that the routing information is valid
+ * and stays valid, e.g. if there are GSI routing changes, prior to
+ * making the irqfd visible, i.e. before it might be signaled.
+ *
+ * Note, holding SRCU ensures a stable read of routing information, and
+ * also prevents irqfd_shutdown() from freeing the irqfd before it's
+ * fully initialized.
*/
- init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
- init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
-
- spin_lock_irq(&kvm->irqfds.lock);
-
- ret = 0;
- list_for_each_entry(tmp, &kvm->irqfds.items, list) {
- if (irqfd->eventfd != tmp->eventfd)
- continue;
- /* This fd is used for another irq already. */
- ret = -EBUSY;
- spin_unlock_irq(&kvm->irqfds.lock);
- goto fail;
- }
-
idx = srcu_read_lock(&kvm->irq_srcu);
- irqfd_update(kvm, irqfd);
-
- list_add_tail(&irqfd->list, &kvm->irqfds.items);
-
- spin_unlock_irq(&kvm->irqfds.lock);
/*
- * Check if there was an event already pending on the eventfd
- * before we registered, and trigger it as if we didn't miss it.
+ * Register the irqfd with the eventfd by polling on the eventfd, and
+ * simultaneously and the irqfd to KVM's list. If there was en event
+ * pending on the eventfd prior to registering, manually trigger IRQ
+ * injection.
*/
- events = vfs_poll(f.file, &irqfd->pt);
+ irqfd_pt.irqfd = irqfd;
+ irqfd_pt.kvm = kvm;
+ init_poll_funcptr(&irqfd_pt.pt, kvm_irqfd_register);
+
+ events = vfs_poll(fd_file(f), &irqfd_pt.pt);
+
+ ret = irqfd_pt.ret;
+ if (ret)
+ goto fail_poll;
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (kvm_arch_has_irq_bypass()) {
- irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
irqfd->consumer.start = kvm_arch_irq_bypass_start;
- ret = irq_bypass_register_consumer(&irqfd->consumer);
+ ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd);
if (ret)
- pr_info("irq bypass consumer (token %p) registration fails: %d\n",
- irqfd->consumer.token, ret);
+ pr_info("irq bypass consumer (eventfd %p) registration fails: %d\n",
+ irqfd->eventfd, ret);
}
#endif
srcu_read_unlock(&kvm->irq_srcu, idx);
-
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
return 0;
+fail_poll:
+ srcu_read_unlock(&kvm->irq_srcu, idx);
fail:
if (irqfd->resampler)
irqfd_resampler_shutdown(irqfd);
@@ -448,8 +501,6 @@ fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
- fdput(f);
-
out:
kfree(irqfd);
return ret;
@@ -463,8 +514,8 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
- link)
+ hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
+ link, srcu_read_lock_held(&kvm->irq_srcu))
if (kian->gsi == gsi) {
srcu_read_unlock(&kvm->irq_srcu, idx);
return true;
@@ -474,14 +525,14 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_irq_has_notifier);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
struct kvm_irq_ack_notifier *kian;
- hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
- link)
+ hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
+ link, srcu_read_lock_held(&kvm->irq_srcu))
if (kian->gsi == gsi)
kian->irq_acked(kian);
}
@@ -514,24 +565,10 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
-#endif
-void
-kvm_eventfd_init(struct kvm *kvm)
-{
-#ifdef CONFIG_HAVE_KVM_IRQFD
- spin_lock_init(&kvm->irqfds.lock);
- INIT_LIST_HEAD(&kvm->irqfds.items);
- INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
- mutex_init(&kvm->irqfds.resampler_lock);
-#endif
- INIT_LIST_HEAD(&kvm->ioeventfds);
-}
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
/*
* shutdown any irqfd's that match fd+gsi
*/
@@ -613,7 +650,7 @@ kvm_irqfd_release(struct kvm *kvm)
/*
* Take note of a change in irq routing.
- * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
+ * Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards.
*/
void kvm_irq_routing_update(struct kvm *kvm)
{
@@ -622,27 +659,47 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
/* Under irqfds.lock, so can read irq_entry safely */
struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
#endif
irqfd_update(kvm, irqfd);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
- if (irqfd->producer &&
- kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
- int ret = kvm_arch_update_irqfd_routing(
- irqfd->kvm, irqfd->producer->irq,
- irqfd->gsi, 1);
- WARN_ON(ret);
- }
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ if (irqfd->producer)
+ kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry);
#endif
}
spin_unlock_irq(&kvm->irqfds.lock);
}
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
+{
+ struct kvm_kernel_irqfd_resampler *resampler;
+ int gsi, idx;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ if (gsi != -1) {
+ list_for_each_entry_srcu(resampler,
+ &kvm->irqfds.resampler_list, link,
+ srcu_read_lock_held(&kvm->irq_srcu)) {
+ if (resampler->notifier.gsi == gsi) {
+ irqfd_resampler_notify(resampler);
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return true;
+ }
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ return false;
+}
+
/*
* create a host-wide workqueue for issuing deferred shutdown requests
* aggregated from all vm* instances. We need our own isolated
@@ -650,7 +707,7 @@ void kvm_irq_routing_update(struct kvm *kvm)
*/
int kvm_irqfd_init(void)
{
- irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
+ irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", WQ_PERCPU, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
@@ -752,7 +809,7 @@ ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
return 0;
}
@@ -855,9 +912,9 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
unlock_fail:
mutex_unlock(&kvm->slots_lock);
+ kfree(p);
fail:
- kfree(p);
eventfd_ctx_put(eventfd);
return ret;
@@ -867,7 +924,7 @@ static int
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_ioeventfd *args)
{
- struct _ioeventfd *p, *tmp;
+ struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus;
int ret = -ENOENT;
@@ -881,8 +938,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
mutex_lock(&kvm->slots_lock);
- list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
-
+ list_for_each_entry(p, &kvm->ioeventfds, list) {
if (p->bus_idx != bus_idx ||
p->eventfd != eventfd ||
p->addr != args->addr ||
@@ -897,7 +953,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
bus = kvm_get_bus(kvm, bus_idx);
if (bus)
bus->ioeventfd_count--;
- ioeventfd_release(p);
ret = 0;
break;
}
@@ -980,3 +1035,15 @@ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return kvm_assign_ioeventfd(kvm, args);
}
+
+void
+kvm_eventfd_init(struct kvm *kvm)
+{
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+ spin_lock_init(&kvm->irqfds.lock);
+ INIT_LIST_HEAD(&kvm->irqfds.items);
+ INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
+ mutex_init(&kvm->irqfds.resampler_lock);
+#endif
+ INIT_LIST_HEAD(&kvm->ioeventfds);
+}