summaryrefslogtreecommitdiff
path: root/virt/kvm/eventfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/eventfd.c')
-rw-r--r--virt/kvm/eventfd.c32
1 files changed, 12 insertions, 20 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 229570059a1b..249ba5b72e9b 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -97,18 +97,19 @@ irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
- synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
list_del_rcu(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
/*
- * synchronize_srcu(&kvm->irq_srcu) already called
+ * synchronize_srcu_expedited(&kvm->irq_srcu) already called
* in kvm_unregister_irq_ack_notifier().
*/
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
+ } else {
+ synchronize_srcu_expedited(&kvm->irq_srcu);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
@@ -126,7 +127,7 @@ irqfd_shutdown(struct work_struct *work)
u64 cnt;
/* Make sure irqfd has been initialized in assign path. */
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
@@ -303,7 +304,6 @@ static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct kvm_kernel_irqfd *irqfd, *tmp;
- struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
__poll_t events;
@@ -326,16 +326,16 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
- f = fdget(args->fd);
- if (!f.file) {
+ CLASS(fd, f)(args->fd);
+ if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
- goto fail;
+ goto out;
}
irqfd->eventfd = eventfd;
@@ -384,7 +384,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
mutex_unlock(&kvm->irqfds.resampler_lock);
}
@@ -419,7 +419,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
- events = vfs_poll(f.file, &irqfd->pt);
+ events = vfs_poll(fd_file(f), &irqfd->pt);
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
@@ -439,12 +439,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
#endif
srcu_read_unlock(&kvm->irq_srcu, idx);
-
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
return 0;
fail:
@@ -457,8 +451,6 @@ fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
- fdput(f);
-
out:
kfree(irqfd);
return ret;
@@ -523,7 +515,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
- synchronize_srcu(&kvm->irq_srcu);
+ synchronize_srcu_expedited(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
@@ -608,7 +600,7 @@ kvm_irqfd_release(struct kvm *kvm)
/*
* Take note of a change in irq routing.
- * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
+ * Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards.
*/
void kvm_irq_routing_update(struct kvm *kvm)
{