summaryrefslogtreecommitdiff
path: root/fs/iomap/buffered-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/iomap/buffered-io.c')
-rw-r--r--fs/iomap/buffered-io.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 8180061b9e16..10cc7979ce38 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1374,6 +1374,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
WARN_ON_ONCE(!PageLocked(page));
WARN_ON_ONCE(PageWriteback(page));
+ WARN_ON_ONCE(PageDirty(page));
/*
* We cannot cancel the ioend directly here on error. We may have
@@ -1382,33 +1383,22 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* appropriately.
*/
if (unlikely(error)) {
+ /*
+ * Let the filesystem know what portion of the current page
+ * failed to map. If the page wasn't been added to ioend, it
+ * won't be affected by I/O completion and we must unlock it
+ * now.
+ */
+ if (wpc->ops->discard_page)
+ wpc->ops->discard_page(page, file_offset);
if (!count) {
- /*
- * If the current page hasn't been added to ioend, it
- * won't be affected by I/O completions and we must
- * discard and unlock it right here.
- */
- if (wpc->ops->discard_page)
- wpc->ops->discard_page(page);
ClearPageUptodate(page);
unlock_page(page);
goto done;
}
-
- /*
- * If the page was not fully cleaned, we need to ensure that the
- * higher layers come back to it correctly. That means we need
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
- * so another attempt to write this page in this writeback sweep
- * will be made.
- */
- set_page_writeback_keepwrite(page);
- } else {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
}
+ set_page_writeback(page);
unlock_page(page);
/*