summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-12-07 12:54:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-07 12:54:26 -0800
commitf467a6a66419a18f782f87507464a5dedc942dec (patch)
tree2c57def4070ff2e823f47db901b3dcfb87368981
parent1b6b26ae7053e4914181eedf70f2d92c12abda8a (diff)
pipe: fix and clarify pipe read wakeup logic
This is the read side version of the previous commit: it simplifies the logic to only wake up waiting writers when necessary, and makes sure to use a synchronous wakeup. This time not so much for GNU make jobserver reasons (that pipe never fills up), but simply to get the writer going quickly again. A bit less verbose commentary this time, if only because I assume that the write side commentary isn't going to be ignored if you touch this code. Cc: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/pipe.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/fs/pipe.c b/fs/pipe.c
index efe93384c61c..bcc2192d33e2 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -276,16 +276,25 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
- int do_wakeup;
+ bool was_full;
ssize_t ret;
/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;
- do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
+
+ /*
+ * We only wake up writers if the pipe was full when we started
+ * reading in order to avoid unnecessary wakeups.
+ *
+ * But when we do wake up writers, we do so using a sync wakeup
+ * (WF_SYNC), because we want them to get going and generate more
+ * data for us.
+ */
+ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
for (;;) {
unsigned int head = pipe->head;
unsigned int tail = pipe->tail;
@@ -324,19 +333,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
}
if (!buf->len) {
- bool wake;
pipe_buf_release(pipe, buf);
spin_lock_irq(&pipe->wait.lock);
tail++;
pipe->tail = tail;
- do_wakeup = 1;
- wake = head - (tail - 1) == pipe->max_usage / 2;
- if (wake)
- wake_up_locked_poll(
- &pipe->wait, EPOLLOUT | EPOLLWRNORM);
spin_unlock_irq(&pipe->wait.lock);
- if (wake)
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
total_len -= chars;
if (!total_len)
@@ -365,13 +366,17 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = -ERESTARTSYS;
break;
}
+ if (was_full) {
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
pipe_wait(pipe);
+ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
}
__pipe_unlock(pipe);
- /* Signal writers asynchronously that there is more room. */
- if (do_wakeup) {
- wake_up_interruptible_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ if (was_full) {
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)