summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2020-09-26 18:09:29 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2020-10-25 20:02:01 -0400
commite3e096e7fc30c28cfc53fe8a1e265d65b85f60bb (patch)
treed9ceffa18096934ad35460bd2b3b2915f265bc76 /fs/eventpoll.c
parent57804b1cc4616780c72a2d0930d1bd0d5bd3ed4c (diff)
ep_insert(): don't open-code ep_remove() on failure exits
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c51
1 files changed, 14 insertions, 37 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f9c567af1f5f..c987b61701e4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1384,12 +1384,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
epi->next = EP_UNACTIVE_PTR;
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
- if (error)
- goto error_create_wakeup_source;
+ if (error) {
+ kmem_cache_free(epi_cache, epi);
+ return error;
+ }
} else {
RCU_INIT_POINTER(epi->ws, NULL);
}
+ atomic_long_inc(&ep->user->epoll_watches);
+
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
@@ -1402,9 +1406,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
- error = -EINVAL;
- if (full_check && reverse_path_check())
- goto error_remove_epi;
+ if (unlikely(full_check && reverse_path_check())) {
+ ep_remove(ep, epi);
+ return -EINVAL;
+ }
/* Initialize the poll table using the queue callback */
epq.epi = epi;
@@ -1424,9 +1429,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
- error = -ENOMEM;
- if (!epq.epi)
- goto error_unregister;
+ if (unlikely(!epq.epi)) {
+ ep_remove(ep, epi);
+ return -ENOMEM;
+ }
/* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock);
@@ -1448,40 +1454,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
write_unlock_irq(&ep->lock);
- atomic_long_inc(&ep->user->epoll_watches);
-
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, NULL);
return 0;
-
-error_unregister:
- ep_unregister_pollwait(ep, epi);
-error_remove_epi:
- spin_lock(&tfile->f_lock);
- list_del_rcu(&epi->fllink);
- spin_unlock(&tfile->f_lock);
-
- rb_erase_cached(&epi->rbn, &ep->rbr);
-
- /*
- * We need to do this because an event could have been arrived on some
- * allocated wait queue. Note that we don't care about the ep->ovflist
- * list, since that is used/cleaned only inside a section bound by "mtx".
- * And ep_insert() is called with "mtx" held.
- */
- write_lock_irq(&ep->lock);
- if (ep_is_linked(epi))
- list_del_init(&epi->rdllink);
- write_unlock_irq(&ep->lock);
-
- wakeup_source_unregister(ep_wakeup_source(epi));
-
-error_create_wakeup_source:
- kmem_cache_free(epi_cache, epi);
-
- return error;
}
/*