summaryrefslogtreecommitdiff
path: root/fs/netfs/write_collect.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/netfs/write_collect.c')
-rw-r--r--fs/netfs/write_collect.c582
1 files changed, 152 insertions, 430 deletions
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 488147439fe0..cbf3d9194c7b 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -15,15 +15,39 @@
/* Notes made in the collector */
#define HIT_PENDING 0x01 /* A front op was still pending */
-#define SOME_EMPTY 0x02 /* One of more streams are empty */
-#define ALL_EMPTY 0x04 /* All streams are empty */
-#define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */
-#define NEED_REASSESS 0x10 /* Need to loop round and reassess */
-#define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */
-#define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */
-#define BUFFERED 0x80 /* The pagecache needs cleaning up */
-#define NEED_RETRY 0x100 /* A front op requests retrying */
-#define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */
+#define NEED_REASSESS 0x02 /* Need to loop round and reassess */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
+
+static void netfs_dump_request(const struct netfs_io_request *rreq)
+{
+ pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n",
+ rreq->debug_id, refcount_read(&rreq->ref), rreq->flags,
+ rreq->origin, rreq->error);
+ pr_err(" st=%llx tsl=%zx/%llx/%llx\n",
+ rreq->start, rreq->transferred, rreq->submitted, rreq->len);
+ pr_err(" cci=%llx/%llx/%llx\n",
+ rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to));
+ pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write);
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ const struct netfs_io_subrequest *sreq;
+ const struct netfs_io_stream *s = &rreq->io_streams[i];
+
+ pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n",
+ s->stream_nr, s->source, s->error,
+ s->avail, s->active, s->need_retry, s->failed);
+ pr_err(" str[%x] ct=%llx t=%zx\n",
+ s->stream_nr, s->collected_to, s->transferred);
+ list_for_each_entry(sreq, &s->subrequests, rreq_link) {
+ pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n",
+ sreq->stream_nr, sreq->debug_index, sreq->source,
+ sreq->start, sreq->transferred, sreq->len,
+ refcount_read(&sreq->ref), sreq->flags);
+ }
+ }
+}
/*
* Successful completion of write of a folio to the server and/or cache. Note
@@ -33,6 +57,7 @@
int netfs_folio_written_back(struct folio *folio)
{
enum netfs_folio_trace why = netfs_folio_trace_clear;
+ struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
struct netfs_folio *finfo;
struct netfs_group *group = NULL;
int gcount = 0;
@@ -41,6 +66,12 @@ int netfs_folio_written_back(struct folio *folio)
/* Streaming writes cannot be redirtied whilst under writeback,
* so discard the streaming record.
*/
+ unsigned long long fend;
+
+ fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len;
+ if (fend > ictx->zero_point)
+ ictx->zero_point = fend;
+
folio_detach_private(folio);
group = finfo->netfs_group;
gcount++;
@@ -75,55 +106,45 @@ end_wb:
}
/*
- * Get hold of a folio we have under writeback. We don't want to get the
- * refcount on it.
+ * Unlock any folios we've finished with.
*/
-static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos)
+static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
+ unsigned int *notes)
{
- XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
- struct folio *folio;
+ struct folio_queue *folioq = wreq->buffer.tail;
+ unsigned long long collected_to = wreq->collected_to;
+ unsigned int slot = wreq->buffer.first_tail_slot;
- rcu_read_lock();
-
- for (;;) {
- xas_reset(&xas);
- folio = xas_load(&xas);
- if (xas_retry(&xas, folio))
- continue;
-
- if (!folio || xa_is_value(folio))
- kdebug("R=%08x: folio %lx (%llx) not present",
- wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
- BUG_ON(!folio || xa_is_value(folio));
-
- if (folio == xas_reload(&xas))
- break;
+ if (WARN_ON_ONCE(!folioq)) {
+ pr_err("[!] Writeback unlock found empty rolling buffer!\n");
+ netfs_dump_request(wreq);
+ return;
}
- rcu_read_unlock();
+ if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
+ if (netfs_pgpriv2_unlock_copied_folios(wreq))
+ *notes |= MADE_PROGRESS;
+ return;
+ }
- if (WARN_ONCE(!folio_test_writeback(folio),
- "R=%08x: folio %lx is not under writeback\n",
- wreq->debug_id, folio->index)) {
- trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ return;
+ slot = 0;
}
- return folio;
-}
-/*
- * Unlock any folios we've finished with.
- */
-static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
- unsigned long long collected_to,
- unsigned int *notes)
-{
for (;;) {
struct folio *folio;
struct netfs_folio *finfo;
unsigned long long fpos, fend;
size_t fsize, flen;
- folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to);
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_writeback(folio),
+ "R=%08x: folio %lx is not under writeback\n",
+ wreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
fpos = folio_pos(folio);
fsize = folio_size(folio);
@@ -134,12 +155,6 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
trace_netfs_collect_folio(wreq, folio, fend, collected_to);
- if (fpos + fsize > wreq->contiguity) {
- trace_netfs_collect_contig(wreq, fpos + fsize,
- netfs_contig_trace_unlock);
- wreq->contiguity = fpos + fsize;
- }
-
/* Unlock any folio we've transferred all of. */
if (collected_to < fend)
break;
@@ -148,213 +163,26 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
wreq->cleaned_to = fpos + fsize;
*notes |= MADE_PROGRESS;
- if (fpos + fsize >= collected_to)
- break;
- }
-}
-
-/*
- * Perform retries on the streams that need it.
- */
-static void netfs_retry_write_stream(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream)
-{
- struct list_head *next;
-
- kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
-
- if (list_empty(&stream->subrequests))
- return;
-
- if (stream->source == NETFS_UPLOAD_TO_SERVER &&
- wreq->netfs_ops->retry_request)
- wreq->netfs_ops->retry_request(wreq, stream);
-
- if (unlikely(stream->failed))
- return;
-
- /* If there's no renegotiation to do, just resend each failed subreq. */
- if (!stream->prepare_write) {
- struct netfs_io_subrequest *subreq;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
- break;
- if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq);
- }
- }
- return;
- }
-
- next = stream->subrequests.next;
-
- do {
- struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
- unsigned long long start, len;
- size_t part;
- bool boundary = false;
-
- /* Go through the stream and find the next span of contiguous
- * data that we then rejig (cifs, for example, needs the wsize
- * renegotiating) and reissue.
- */
- from = list_entry(next, struct netfs_io_subrequest, rreq_link);
- to = from;
- start = from->start + from->transferred;
- len = from->len - from->transferred;
-
- if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
- return;
-
- list_for_each_continue(next, &stream->subrequests) {
- subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
- if (subreq->start + subreq->transferred != start + len ||
- test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
- break;
- to = subreq;
- len += to->len;
- }
-
- /* Work through the sublist. */
- subreq = from;
- list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
- if (!len)
- break;
- /* Renegotiate max_len (wsize) */
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- stream->prepare_write(subreq);
-
- part = min(len, subreq->max_len);
- subreq->len = part;
- subreq->start = start;
- subreq->transferred = 0;
- len -= part;
- start += part;
- if (len && subreq == to &&
- __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
- boundary = true;
-
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq);
- if (subreq == to)
- break;
- }
-
- /* If we managed to use fewer subreqs, we can discard the
- * excess; if we used the same number, then we're done.
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
*/
- if (!len) {
- if (subreq == to)
- continue;
- list_for_each_entry_safe_from(subreq, tmp,
- &stream->subrequests, rreq_link) {
- trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
- if (subreq == to)
- break;
- }
- continue;
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ goto done;
+ slot = 0;
}
- /* We ran out of subrequests, so we need to allocate some more
- * and insert them after.
- */
- do {
- subreq = netfs_alloc_subrequest(wreq);
- subreq->source = to->source;
- subreq->start = start;
- subreq->max_len = len;
- subreq->max_nr_segs = INT_MAX;
- subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
- subreq->stream_nr = to->stream_nr;
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
-
- trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
- refcount_read(&subreq->ref),
- netfs_sreq_trace_new);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
-
- list_add(&subreq->rreq_link, &to->rreq_link);
- to = list_next_entry(to, rreq_link);
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
-
- switch (stream->source) {
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload);
- subreq->max_len = min(len, wreq->wsize);
- break;
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- stream->prepare_write(subreq);
-
- part = min(len, subreq->max_len);
- subreq->len = subreq->transferred + part;
- len -= part;
- start += part;
- if (!len && boundary) {
- __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
- boundary = false;
- }
-
- netfs_reissue_write(stream, subreq);
- if (!len)
- break;
-
- } while (len);
-
- } while (!list_is_head(next, &stream->subrequests));
-}
-
-/*
- * Perform retries on the streams that need it. If we're doing content
- * encryption and the server copy changed due to a third-party write, we may
- * need to do an RMW cycle and also rewrite the data to the cache.
- */
-static void netfs_retry_writes(struct netfs_io_request *wreq)
-{
- struct netfs_io_subrequest *subreq;
- struct netfs_io_stream *stream;
- int s;
-
- /* Wait for all outstanding I/O to quiesce before performing retries as
- * we may need to renegotiate the I/O sizes.
- */
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (!stream->active)
- continue;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- }
+ if (fpos + fsize >= collected_to)
+ break;
}
- // TODO: Enc: Fetch changed partial pages
- // TODO: Enc: Reencrypt content if needed.
- // TODO: Enc: Wind back transferred point.
- // TODO: Enc: Mark cache pages for retry.
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->need_retry) {
- stream->need_retry = false;
- netfs_retry_write_stream(wreq, stream);
- }
- }
+ wreq->buffer.tail = folioq;
+done:
+ wreq->buffer.first_tail_slot = slot;
}
/*
@@ -370,37 +198,31 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
{
struct netfs_io_subrequest *front, *remove;
struct netfs_io_stream *stream;
- unsigned long long collected_to;
+ unsigned long long collected_to, issued_to;
unsigned int notes;
int s;
- kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
trace_netfs_collect(wreq);
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
reassess_streams:
+ issued_to = atomic64_read(&wreq->issued_to);
smp_rmb();
collected_to = ULLONG_MAX;
- if (wreq->origin == NETFS_WRITEBACK)
- notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG;
- else if (wreq->origin == NETFS_WRITETHROUGH)
- notes = ALL_EMPTY | BUFFERED;
+ if (wreq->origin == NETFS_WRITEBACK ||
+ wreq->origin == NETFS_WRITETHROUGH ||
+ wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
+ notes = NEED_UNLOCK;
else
- notes = ALL_EMPTY;
+ notes = 0;
/* Remove completed subrequests from the front of the streams and
* advance the completion point on each stream. We stop when we hit
* something that's in progress. The issuer thread may be adding stuff
* to the tail whilst we're doing this.
- *
- * We must not, however, merge in discontiguities that span whole
- * folios that aren't under writeback. This is made more complicated
- * by the folios in the gap being of unpredictable sizes - if they even
- * exist - but we don't want to look them up.
*/
for (s = 0; s < NR_IO_STREAMS; s++) {
- loff_t rstart, rend;
-
stream = &wreq->io_streams[s];
/* Read active flag before list pointers */
if (!smp_load_acquire(&stream->active))
@@ -409,32 +231,16 @@ reassess_streams:
front = stream->front;
while (front) {
trace_netfs_collect_sreq(wreq, front);
- //kdebug("sreq [%x] %llx %zx/%zx",
+ //_debug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len);
- /* Stall if there may be a discontinuity. */
- rstart = round_down(front->start, PAGE_SIZE);
- if (rstart > wreq->contiguity) {
- if (wreq->contiguity > stream->collected_to) {
- trace_netfs_collect_gap(wreq, stream,
- wreq->contiguity, 'D');
- stream->collected_to = wreq->contiguity;
- }
- notes |= REASSESS_DISCONTIG;
- break;
- }
- rend = round_up(front->start + front->len, PAGE_SIZE);
- if (rend > wreq->contiguity) {
- trace_netfs_collect_contig(wreq, rend,
- netfs_contig_trace_collect);
- wreq->contiguity = rend;
- if (notes & REASSESS_DISCONTIG)
- notes |= NEED_REASSESS;
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'F');
+ stream->collected_to = front->start;
}
- notes &= ~MAYBE_DISCONTIG;
/* Stall if the front is still undergoing I/O. */
- if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) {
+ if (netfs_check_subreq_in_progress(front)) {
notes |= HIT_PENDING;
break;
}
@@ -448,6 +254,7 @@ reassess_streams:
if (front->start + front->transferred > stream->collected_to) {
stream->collected_to = front->start + front->transferred;
stream->transferred = stream->collected_to - wreq->start;
+ stream->transferred_valid = true;
notes |= MADE_PROGRESS;
}
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
@@ -473,26 +280,20 @@ reassess_streams:
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
stream->front = front;
- if (!front) {
- unsigned long long jump_to = atomic64_read(&wreq->issued_to);
-
- if (stream->collected_to < jump_to) {
- trace_netfs_collect_gap(wreq, stream, jump_to, 'A');
- stream->collected_to = jump_to;
- }
- }
-
spin_unlock(&wreq->lock);
- netfs_put_subrequest(remove, false,
+ netfs_put_subrequest(remove,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
netfs_sreq_trace_put_done);
}
- if (front)
- notes &= ~ALL_EMPTY;
- else
- notes |= SOME_EMPTY;
+ /* If we have an empty stream, we need to jump it forward
+ * otherwise the collection point will never advance.
+ */
+ if (!front && issued_to > stream->collected_to) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'E');
+ stream->collected_to = issued_to;
+ }
if (stream->collected_to < collected_to)
collected_to = stream->collected_to;
@@ -501,36 +302,6 @@ reassess_streams:
if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to)
wreq->collected_to = collected_to;
- /* If we have an empty stream, we need to jump it forward over any gap
- * otherwise the collection point will never advance.
- *
- * Note that the issuer always adds to the stream with the lowest
- * so-far submitted start, so if we see two consecutive subreqs in one
- * stream with nothing between then in another stream, then the second
- * stream has a gap that can be jumped.
- */
- if (notes & SOME_EMPTY) {
- unsigned long long jump_to = wreq->start + READ_ONCE(wreq->submitted);
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active &&
- stream->front &&
- stream->front->start < jump_to)
- jump_to = stream->front->start;
- }
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active &&
- !stream->front &&
- stream->collected_to < jump_to) {
- trace_netfs_collect_gap(wreq, stream, jump_to, 'B');
- stream->collected_to = jump_to;
- }
- }
- }
-
for (s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s];
if (stream->active)
@@ -540,57 +311,25 @@ reassess_streams:
trace_netfs_collect_state(wreq, wreq->collected_to, notes);
/* Unlock any folios that we have now finished with. */
- if (notes & BUFFERED) {
- unsigned long long clean_to = min(wreq->collected_to, wreq->contiguity);
-
- if (wreq->cleaned_to < clean_to)
- netfs_writeback_unlock_folios(wreq, clean_to, &notes);
+ if (notes & NEED_UNLOCK) {
+ if (wreq->cleaned_to < wreq->collected_to)
+ netfs_writeback_unlock_folios(wreq, &notes);
} else {
wreq->cleaned_to = wreq->collected_to;
}
// TODO: Discard encryption buffers
- /* If all streams are discontiguous with the last folio we cleared, we
- * may need to skip a set of folios.
- */
- if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY)) == MAYBE_DISCONTIG) {
- unsigned long long jump_to = ULLONG_MAX;
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active && stream->front &&
- stream->front->start < jump_to)
- jump_to = stream->front->start;
- }
-
- trace_netfs_collect_contig(wreq, jump_to, netfs_contig_trace_jump);
- wreq->contiguity = jump_to;
- wreq->cleaned_to = jump_to;
- wreq->collected_to = jump_to;
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->collected_to < jump_to)
- stream->collected_to = jump_to;
- }
- //cond_resched();
- notes |= MADE_PROGRESS;
- goto reassess_streams;
- }
-
if (notes & NEED_RETRY)
goto need_retry;
- if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
- trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
- clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
- }
- if (notes & NEED_REASSESS) {
+ if (notes & MADE_PROGRESS) {
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
//cond_resched();
goto reassess_streams;
}
- if (notes & MADE_PROGRESS) {
+
+ if (notes & NEED_REASSESS) {
//cond_resched();
goto reassess_streams;
}
@@ -598,7 +337,7 @@ reassess_streams:
out:
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
wreq->nr_group_rel = 0;
- kleave(" = %x", notes);
+ _leave(" = %x", notes);
return;
need_retry:
@@ -606,7 +345,7 @@ need_retry:
* that any partially completed op will have had any wholly transferred
* folios removed from it.
*/
- kdebug("retry");
+ _debug("retry");
netfs_retry_writes(wreq);
goto out;
}
@@ -614,30 +353,22 @@ need_retry:
/*
* Perform the collection of subrequests, folios and encryption buffers.
*/
-void netfs_write_collection_worker(struct work_struct *work)
+bool netfs_write_collection(struct netfs_io_request *wreq)
{
- struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
struct netfs_inode *ictx = netfs_inode(wreq->inode);
size_t transferred;
+ bool transferred_valid = false;
int s;
- kenter("R=%x", wreq->debug_id);
-
- netfs_see_request(wreq, netfs_rreq_trace_see_work);
- if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
+ _enter("R=%x", wreq->debug_id);
netfs_collect_write_results(wreq);
/* We're done when the app thread has finished posting subreqs and all
* the queues in all the streams are empty.
*/
- if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags))
+ return false;
smp_rmb(); /* Read ALL_QUEUED before lists. */
transferred = LONG_MAX;
@@ -645,28 +376,33 @@ void netfs_write_collection_worker(struct work_struct *work)
struct netfs_io_stream *stream = &wreq->io_streams[s];
if (!stream->active)
continue;
- if (!list_empty(&stream->subrequests)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
- if (stream->transferred < transferred)
+ if (!list_empty(&stream->subrequests))
+ return false;
+ if (stream->transferred_valid &&
+ stream->transferred < transferred) {
transferred = stream->transferred;
+ transferred_valid = true;
+ }
}
/* Okay, declare that all I/O is complete. */
- wreq->transferred = transferred;
+ if (transferred_valid)
+ wreq->transferred = transferred;
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
if (wreq->io_streams[1].active &&
- wreq->io_streams[1].failed) {
+ wreq->io_streams[1].failed &&
+ ictx->ops->invalidate_cache) {
/* Cache write failure doesn't prevent writeback completion
* unless we're in disconnected mode.
*/
ictx->ops->invalidate_cache(wreq);
}
- if (wreq->cleanup)
- wreq->cleanup(wreq);
+ if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
+ wreq->origin == NETFS_DIO_WRITE) &&
+ !wreq->error)
+ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
if (wreq->origin == NETFS_DIO_WRITE &&
wreq->mapping->nrpages) {
@@ -684,33 +420,36 @@ void netfs_write_collection_worker(struct work_struct *work)
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_end(wreq->inode);
- kdebug("finished");
- trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+ _debug("finished");
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len);
wreq->iocb->ki_pos += written;
- if (wreq->iocb->ki_complete)
+ if (wreq->iocb->ki_complete) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
wreq->iocb->ki_complete(
wreq->iocb, wreq->error ? wreq->error : written);
+ }
wreq->iocb = VFS_PTR_POISON;
}
- netfs_clear_subrequests(wreq, false);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete);
+ netfs_clear_subrequests(wreq);
+ return true;
}
-/*
- * Wake the collection work item.
- */
-void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
+void netfs_write_collection_worker(struct work_struct *work)
{
- if (!work_pending(&wreq->work)) {
- netfs_get_request(wreq, netfs_rreq_trace_get_work);
- if (!queue_work(system_unbound_wq, &wreq->work))
- netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq);
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
+
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ if (netfs_check_rreq_in_progress(rreq)) {
+ if (netfs_write_collection(rreq))
+ /* Drop the ref from the IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ else
+ netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
}
}
@@ -718,7 +457,6 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* netfs_write_subrequest_terminated - Note the termination of a write operation.
* @_op: The I/O request that has terminated.
* @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
*
* This tells the library that a contributory write I/O operation has
* terminated, one way or another, and that it should collect the results.
@@ -728,23 +466,18 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* negative error code. The library will look after reissuing I/O operations
* as appropriate and writing downloaded data to the cache.
*
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- *
* When this is called, ownership of the subrequest is transferred back to the
* library, along with a ref.
*
* Note that %_op is a void* so that the function can be passed to
* kiocb::term_func without the need for a casting wrapper.
*/
-void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
- bool was_async)
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error)
{
struct netfs_io_subrequest *subreq = _op;
struct netfs_io_request *wreq = subreq->rreq;
- struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
- kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
switch (subreq->source) {
case NETFS_UPLOAD_TO_SERVER:
@@ -753,8 +486,6 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
case NETFS_WRITE_TO_CACHE:
netfs_stat(&netfs_n_wh_write_done);
break;
- case NETFS_INVALID_WRITE:
- break;
default:
BUG();
}
@@ -794,16 +525,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
}
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
-
- /* If we are at the head of the queue, wake up the collector,
- * transferring a ref to it if we were the ones to do so.
- */
- if (list_is_first(&subreq->rreq_link, &stream->subrequests))
- netfs_wake_write_collector(wreq, was_async);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ netfs_subreq_clear_in_progress(subreq);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
}
EXPORT_SYMBOL(netfs_write_subrequest_terminated);