summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorSoheil Hassas Yeganeh <soheil@google.com>2020-12-18 14:02:00 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-19 11:18:38 -0800
commite8c85328b1e88f4ee7f84a1fdbff2f2c7965e026 (patch)
treea1a3e3ac13780408cecc2ef0e2e264e35e1f8b5f /fs/eventpoll.c
parent1493c47fb140ddd9e5c291f0c0da3fb03741c766 (diff)
epoll: pull all code between fetch_events and send_event into the loop
This is a no-op change which simplifies the follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-7-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Khazhismel Kumykov <khazhy@google.com> Cc: Guantao Liu <guantaol@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 03d0ac076a16..63a7a6e13dfc 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1774,14 +1774,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
}
fetch_events:
- eavail = ep_events_available(ep);
- if (!eavail)
- eavail = ep_busy_loop(ep, timed_out);
+ do {
+ eavail = ep_events_available(ep);
+ if (!eavail)
+ eavail = ep_busy_loop(ep, timed_out);
- if (eavail)
- goto send_events;
+ if (eavail)
+ goto send_events;
- do {
if (signal_pending(current))
return -EINTR;
@@ -1830,21 +1830,22 @@ fetch_events:
* carefully under lock, below.
*/
eavail = 1;
- } while (0);
- if (!list_empty_careful(&wait.entry)) {
- write_lock_irq(&ep->lock);
- /*
- * If the thread timed out and is not on the wait queue, it
- * means that the thread was woken up after its timeout expired
- * before it could reacquire the lock. Thus, when wait.entry is
- * empty, it needs to harvest events.
- */
- if (timed_out)
- eavail = list_empty(&wait.entry);
- __remove_wait_queue(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
- }
+ if (!list_empty_careful(&wait.entry)) {
+ write_lock_irq(&ep->lock);
+ /*
+ * If the thread timed out and is not on the wait queue,
+ * it means that the thread was woken up after its
+ * timeout expired before it could reacquire the lock.
+ * Thus, when wait.entry is empty, it needs to harvest
+ * events.
+ */
+ if (timed_out)
+ eavail = list_empty(&wait.entry);
+ __remove_wait_queue(&ep->wq, &wait);
+ write_unlock_irq(&ep->lock);
+ }
+ } while (0);
send_events:
/*