summaryrefslogtreecommitdiff
path: root/fs/iomap/buffered-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/iomap/buffered-io.c')
-rw-r--r--fs/iomap/buffered-io.c2683
1 files changed, 1492 insertions, 1191 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index bcfc288dba3f..e5c1ca440d93 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1,403 +1,613 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (C) 2016-2019 Christoph Hellwig.
+ * Copyright (C) 2016-2023 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
-#include <linux/pagemap.h>
-#include <linux/uio.h>
#include <linux/buffer_head.h>
-#include <linux/dax.h>
#include <linux/writeback.h>
-#include <linux/list_sort.h>
#include <linux/swap.h>
-#include <linux/bio.h>
-#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
/*
- * Structure allocated for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
+ * Structure allocated for each folio to track per-block uptodate, dirty state
+ * and I/O completions.
*/
-struct iomap_page {
- atomic_t read_count;
- atomic_t write_count;
- spinlock_t uptodate_lock;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+struct iomap_folio_state {
+ spinlock_t state_lock;
+ unsigned int read_bytes_pending;
+ atomic_t write_bytes_pending;
+
+ /*
+ * Each block has two bits in this bitmap:
+ * Bits [0..blocks_per_folio) has the uptodate status.
+ * Bits [b_p_f...(2*b_p_f)) has the dirty status.
+ */
+ unsigned long state[];
};
-static inline struct iomap_page *to_iomap_page(struct page *page)
+static inline bool ifs_is_fully_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs)
{
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
- return NULL;
+ struct inode *inode = folio->mapping->host;
+
+ return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
}
-static struct bio_set iomap_ioend_bioset;
+/*
+ * Find the next uptodate block in the folio. end_blk is inclusive.
+ * If no uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_uptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_bit(ifs->state, end_blk + 1, start_blk);
+}
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct page *page)
+/*
+ * Find the next non-uptodate block in the folio. end_blk is inclusive.
+ * If no non-uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_nonuptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
+}
+
+static bool ifs_set_range_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = folio->mapping->host;
+ unsigned int first_blk = off >> inode->i_blkbits;
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
- if (iop || i_blocksize(inode) == PAGE_SIZE)
- return iop;
+ bitmap_set(ifs->state, first_blk, nr_blks);
+ return ifs_is_fully_uptodate(folio, ifs);
+}
+
+static void iomap_set_range_uptodate(struct folio *folio, size_t off,
+ size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ unsigned long flags;
+ bool uptodate = true;
+
+ if (folio_test_uptodate(folio))
+ return;
+
+ if (ifs) {
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+ }
+
+ if (uptodate)
+ folio_mark_uptodate(folio);
+}
+
+/*
+ * Find the next dirty block in the folio. end_blk is inclusive.
+ * If no dirty block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_dirty_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks = i_blocks_per_folio(inode, folio);
+
+ return find_next_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
+}
+
+/*
+ * Find the next clean block in the folio. end_blk is inclusive.
+ * If no clean block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_clean_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks = i_blocks_per_folio(inode, folio);
- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
- atomic_set(&iop->read_count, 0);
- atomic_set(&iop->write_count, 0);
- spin_lock_init(&iop->uptodate_lock);
- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+ return find_next_zero_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
+}
+
+static unsigned ifs_find_dirty_range(struct folio *folio,
+ struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned start_blk =
+ offset_in_folio(folio, *range_start) >> inode->i_blkbits;
+ unsigned end_blk = min_not_zero(
+ offset_in_folio(folio, range_end) >> inode->i_blkbits,
+ i_blocks_per_folio(inode, folio)) - 1;
+ unsigned nblks;
+
+ start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
+ if (start_blk > end_blk)
+ return 0;
+ if (start_blk == end_blk)
+ nblks = 1;
+ else
+ nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
+ start_blk;
+
+ *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
+ return nblks << inode->i_blkbits;
+}
+
+static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
+ u64 range_end)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (*range_start >= range_end)
+ return 0;
+
+ if (ifs)
+ return ifs_find_dirty_range(folio, ifs, range_start, range_end);
+ return range_end - *range_start;
+}
+
+static void ifs_clear_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_clear_range_dirty(folio, ifs, off, len);
+}
+
+static void ifs_set_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_set_range_dirty(folio, ifs, off, len);
+}
+
+static struct iomap_folio_state *ifs_alloc(struct inode *inode,
+ struct folio *folio, unsigned int flags)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
+ gfp_t gfp;
+
+ if (ifs || nr_blocks <= 1)
+ return ifs;
+
+ if (flags & IOMAP_NOWAIT)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_NOFS | __GFP_NOFAIL;
/*
- * migrate_page_move_mapping() assumes that pages with private data have
- * their count elevated by 1.
+ * ifs->state tracks two sets of state flags when the
+ * filesystem block size is smaller than the folio size.
+ * The first state tracks per-block uptodate and the
+ * second tracks per-block dirty state.
*/
- attach_page_private(page, iop);
- return iop;
+ ifs = kzalloc(struct_size(ifs, state,
+ BITS_TO_LONGS(2 * nr_blocks)), gfp);
+ if (!ifs)
+ return ifs;
+
+ spin_lock_init(&ifs->state_lock);
+ if (folio_test_uptodate(folio))
+ bitmap_set(ifs->state, 0, nr_blocks);
+ if (folio_test_dirty(folio))
+ bitmap_set(ifs->state, nr_blocks, nr_blocks);
+ folio_attach_private(folio, ifs);
+
+ return ifs;
}
-static void
-iomap_page_release(struct page *page)
+static void ifs_free(struct folio *folio)
{
- struct iomap_page *iop = detach_page_private(page);
+ struct iomap_folio_state *ifs = folio_detach_private(folio);
- if (!iop)
+ if (!ifs)
return;
- WARN_ON_ONCE(atomic_read(&iop->read_count));
- WARN_ON_ONCE(atomic_read(&iop->write_count));
- kfree(iop);
+ WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
+ WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
+ folio_test_uptodate(folio));
+ kfree(ifs);
}
/*
- * Calculate the range inside the page that we actually need to read.
+ * Calculate how many bytes to truncate based off the number of blocks to
+ * truncate and the end position to start truncating from.
*/
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
- loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
+ unsigned blocks_truncated)
+{
+ unsigned block_size = 1 << block_bits;
+ unsigned block_offset = end_pos & (block_size - 1);
+
+ if (!block_offset)
+ return blocks_truncated << block_bits;
+
+ return ((blocks_truncated - 1) << block_bits) + block_offset;
+}
+
+/*
+ * Calculate the range inside the folio that we actually need to read.
+ */
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
{
+ struct iomap_folio_state *ifs = folio->private;
loff_t orig_pos = *pos;
loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
- unsigned poff = offset_in_page(*pos);
- unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ size_t poff = offset_in_folio(folio, *pos);
+ size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
+ size_t orig_plen = plen;
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
/*
- * If the block size is smaller than the page size we need to check the
+ * If the block size is smaller than the page size, we need to check the
* per-block uptodate status and adjust the offset and length if needed
* to avoid reading in already uptodate ranges.
*/
- if (iop) {
- unsigned int i;
-
- /* move forward for each leading block marked uptodate */
- for (i = first; i <= last; i++) {
- if (!test_bit(i, iop->uptodate))
- break;
- *pos += block_size;
- poff += block_size;
- plen -= block_size;
- first++;
+ if (ifs) {
+ unsigned int next, blocks_skipped;
+
+ next = ifs_next_nonuptodate_block(folio, first, last);
+ blocks_skipped = next - first;
+
+ if (blocks_skipped) {
+ unsigned long block_offset = *pos & (block_size - 1);
+ unsigned bytes_skipped =
+ (blocks_skipped << block_bits) - block_offset;
+
+ *pos += bytes_skipped;
+ poff += bytes_skipped;
+ plen -= bytes_skipped;
}
+ first = next;
/* truncate len if we find any trailing uptodate block(s) */
- for ( ; i <= last; i++) {
- if (test_bit(i, iop->uptodate)) {
- plen -= (last - i + 1) * block_size;
- last = i - 1;
- break;
+ if (++next <= last) {
+ next = ifs_next_uptodate_block(folio, next, last);
+ if (next <= last) {
+ plen -= iomap_bytes_to_truncate(*pos + plen,
+ block_bits, last - next + 1);
+ last = next - 1;
}
}
}
/*
- * If the extent spans the block that contains the i_size we need to
+ * If the extent spans the block that contains the i_size, we need to
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
- if (orig_pos <= isize && orig_pos + length > isize) {
- unsigned end = offset_in_page(isize - 1) >> block_bits;
+ if (orig_pos <= isize && orig_pos + orig_plen > isize) {
+ unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
- plen -= (last - end) * block_size;
+ plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
+ last - end);
}
*offp = poff;
*lenp = plen;
}
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
+ loff_t pos)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned first = off >> inode->i_blkbits;
- unsigned last = (off + len - 1) >> inode->i_blkbits;
- bool uptodate = true;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&iop->uptodate_lock, flags);
- for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
- if (i >= first && i <= last)
- set_bit(i, iop->uptodate);
- else if (!test_bit(i, iop->uptodate))
- uptodate = false;
- }
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
- if (uptodate)
- SetPageUptodate(page);
- spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+ return srcmap->type != IOMAP_MAPPED ||
+ (srcmap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(iter->inode);
}
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+/**
+ * iomap_read_inline_data - copy inline data into the page cache
+ * @iter: iteration structure
+ * @folio: folio to copy to
+ *
+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
+ * Returns zero for success to complete the read, or the usual negative errno.
+ */
+static int iomap_read_inline_data(const struct iomap_iter *iter,
+ struct folio *folio)
{
- if (PageError(page))
- return;
+ const struct iomap *iomap = iomap_iter_srcmap(iter);
+ size_t size = i_size_read(iter->inode) - iomap->offset;
+ size_t offset = offset_in_folio(folio, iomap->offset);
- if (page_has_private(page))
- iomap_iop_set_range_uptodate(page, off, len);
- else
- SetPageUptodate(page);
-}
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return -EIO;
-static void
-iomap_read_finish(struct iomap_page *iop, struct page *page)
-{
- if (!iop || atomic_dec_and_test(&iop->read_count))
- unlock_page(page);
+ if (folio_test_uptodate(folio))
+ return 0;
+
+ if (WARN_ON_ONCE(size > iomap->length))
+ return -EIO;
+ if (offset > 0)
+ ifs_alloc(iter->inode, folio, iter->flags);
+
+ folio_fill_tail(folio, offset, iomap->inline_data, size);
+ iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
+ return 0;
}
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error)
{
- struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
-
- if (unlikely(error)) {
- ClearPageUptodate(page);
- SetPageError(page);
- } else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ struct iomap_folio_state *ifs = folio->private;
+ bool uptodate = !error;
+ bool finished = true;
+
+ if (ifs) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ if (!error)
+ uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ ifs->read_bytes_pending -= len;
+ finished = !ifs->read_bytes_pending;
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
}
- iomap_read_finish(iop, page);
+ if (finished)
+ folio_end_read(folio, uptodate);
}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
-static void
-iomap_read_end_io(struct bio *bio)
+static void iomap_read_init(struct folio *folio)
{
- int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct iomap_folio_state *ifs = folio->private;
- bio_for_each_segment_all(bvec, bio, iter_all)
- iomap_read_page_end_io(bvec, error);
- bio_put(bio);
-}
+ if (ifs) {
+ size_t len = folio_size(folio);
-struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
- struct bio *bio;
- struct readahead_control *rac;
-};
+ /*
+ * ifs->read_bytes_pending is used to track how many bytes are
+ * read in asynchronously by the IO helper. We need to track
+ * this so that we can know when the IO helper has finished
+ * reading in all the necessary ranges of the folio and can end
+ * the read.
+ *
+ * Increase ->read_bytes_pending by the folio size to start, and
+ * add a +1 bias. We'll subtract the bias and any uptodate /
+ * zeroed ranges that did not require IO in iomap_read_end()
+ * after we're done processing the folio.
+ *
+ * We do this because otherwise, we would have to increment
+ * ifs->read_bytes_pending every time a range in the folio needs
+ * to be read in, which can get expensive since the spinlock
+ * needs to be held whenever modifying ifs->read_bytes_pending.
+ *
+ * We add the bias to ensure the read has not been ended on the
+ * folio when iomap_read_end() is called, even if the IO helper
+ * has already finished reading in the entire folio.
+ */
+ spin_lock_irq(&ifs->state_lock);
+ WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+ ifs->read_bytes_pending = len + 1;
+ spin_unlock_irq(&ifs->state_lock);
+ }
+}
-static void
-iomap_read_inline_data(struct inode *inode, struct page *page,
- struct iomap *iomap)
+/*
+ * This ends IO if no bytes were submitted to an IO helper.
+ *
+ * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
+ * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
+ * have already been completed by the IO helper, then this will end the read.
+ * Else the IO helper will end the read after all submitted ranges have been
+ * read.
+ */
+static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
{
- size_t size = i_size_read(inode);
- void *addr;
+ struct iomap_folio_state *ifs = folio->private;
- if (PageUptodate(page))
- return;
+ if (ifs) {
+ bool end_read, uptodate;
- BUG_ON(page->index);
- BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+ spin_lock_irq(&ifs->state_lock);
+ if (!ifs->read_bytes_pending) {
+ WARN_ON_ONCE(bytes_submitted);
+ spin_unlock_irq(&ifs->state_lock);
+ folio_unlock(folio);
+ return;
+ }
- addr = kmap_atomic(page);
- memcpy(addr, iomap->inline_data, size);
- memset(addr + size, 0, PAGE_SIZE - size);
- kunmap_atomic(addr);
- SetPageUptodate(page);
-}
+ /*
+ * Subtract any bytes that were initially accounted to
+ * read_bytes_pending but skipped for IO. The +1 accounts for
+ * the bias we added in iomap_read_init().
+ */
+ ifs->read_bytes_pending -=
+ (folio_size(folio) + 1 - bytes_submitted);
-static inline bool iomap_block_needs_zeroing(struct inode *inode,
- struct iomap *iomap, loff_t pos)
-{
- return iomap->type != IOMAP_MAPPED ||
- (iomap->flags & IOMAP_F_NEW) ||
- pos >= i_size_read(inode);
+ /*
+ * If !ifs->read_bytes_pending, this means all pending reads by
+ * the IO helper have already completed, which means we need to
+ * end the folio read here. If ifs->read_bytes_pending != 0,
+ * the IO helper will end the folio read.
+ */
+ end_read = !ifs->read_bytes_pending;
+ if (end_read)
+ uptodate = ifs_is_fully_uptodate(folio, ifs);
+ spin_unlock_irq(&ifs->state_lock);
+ if (end_read)
+ folio_end_read(folio, uptodate);
+ } else if (!bytes_submitted) {
+ /*
+ * If there were no bytes submitted, this means we are
+ * responsible for unlocking the folio here, since no IO helper
+ * has taken ownership of it. If there were bytes submitted,
+ * then the IO helper will end the read via
+ * iomap_finish_folio_read().
+ */
+ folio_unlock(folio);
+ }
}
-static loff_t
-iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap, struct iomap *srcmap)
+static int iomap_read_folio_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
{
- struct iomap_readpage_ctx *ctx = data;
- struct page *page = ctx->cur_page;
- struct iomap_page *iop = iomap_page_create(inode, page);
- bool same_page = false, is_contig = false;
- loff_t orig_pos = pos;
- unsigned poff, plen;
- sector_t sector;
+ const struct iomap *iomap = &iter->iomap;
+ loff_t pos = iter->pos;
+ loff_t length = iomap_length(iter);
+ struct folio *folio = ctx->cur_folio;
+ size_t poff, plen;
+ loff_t pos_diff;
+ int ret;
if (iomap->type == IOMAP_INLINE) {
- WARN_ON_ONCE(pos);
- iomap_read_inline_data(inode, page, iomap);
- return PAGE_SIZE;
- }
-
- /* zero post-eof blocks as the page may be mapped */
- iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
- if (plen == 0)
- goto done;
-
- if (iomap_block_needs_zeroing(inode, iomap, pos)) {
- zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
- goto done;
+ ret = iomap_read_inline_data(iter, folio);
+ if (ret)
+ return ret;
+ return iomap_iter_advance(iter, length);
}
- ctx->cur_page_in_bio = true;
+ ifs_alloc(iter->inode, folio, iter->flags);
- /*
- * Try to merge into a previous segment if we can.
- */
- sector = iomap_sector(iomap, pos);
- if (ctx->bio && bio_end_sector(ctx->bio) == sector)
- is_contig = true;
-
- if (is_contig &&
- __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
- if (!same_page && iop)
- atomic_inc(&iop->read_count);
- goto done;
- }
+ length = min_t(loff_t, length,
+ folio_size(folio) - offset_in_folio(folio, pos));
+ while (length) {
+ iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
+ &plen);
- /*
- * If we start a new segment we need to increase the read count, and we
- * need to do so before submitting any previous full bio to make sure
- * that we don't prematurely unlock the page.
- */
- if (iop)
- atomic_inc(&iop->read_count);
+ pos_diff = pos - iter->pos;
+ if (WARN_ON_ONCE(pos_diff + plen > length))
+ return -EIO;
- if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
- gfp_t orig_gfp = gfp;
- int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ret = iomap_iter_advance(iter, pos_diff);
+ if (ret)
+ return ret;
- if (ctx->bio)
- submit_bio(ctx->bio);
+ if (plen == 0)
+ return 0;
+
+ /* zero post-eof blocks as the page may be mapped */
+ if (iomap_block_needs_zeroing(iter, pos)) {
+ folio_zero_range(folio, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
+ } else {
+ if (!*bytes_submitted)
+ iomap_read_init(folio);
+ ret = ctx->ops->read_folio_range(iter, ctx, plen);
+ if (ret)
+ return ret;
+ *bytes_submitted += plen;
+ }
- if (ctx->rac) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
- /*
- * If the bio_alloc fails, try it again for a single page to
- * avoid having to deal with partial page reads. This emulates
- * what do_mpage_readpage does.
- */
- if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
- ctx->bio->bi_opf = REQ_OP_READ;
- if (ctx->rac)
- ctx->bio->bi_opf |= REQ_RAHEAD;
- ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
- ctx->bio->bi_end_io = iomap_read_end_io;
+ ret = iomap_iter_advance(iter, plen);
+ if (ret)
+ return ret;
+ length -= pos_diff + plen;
+ pos = iter->pos;
}
-
- bio_add_page(ctx->bio, page, plen, poff);
-done:
- /*
- * Move the caller beyond our range so that it keeps making progress.
- * For that we have to include any leading non-uptodate ranges, but
- * we can skip trailing ones as they will be handled in the next
- * iteration.
- */
- return pos - orig_pos + plen;
+ return 0;
}
-int
-iomap_readpage(struct page *page, const struct iomap_ops *ops)
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
- struct iomap_readpage_ctx ctx = { .cur_page = page };
- struct inode *inode = page->mapping->host;
- unsigned poff;
- loff_t ret;
-
- trace_iomap_readpage(page->mapping->host, 1);
-
- for (poff = 0; poff < PAGE_SIZE; poff += ret) {
- ret = iomap_apply(inode, page_offset(page) + poff,
- PAGE_SIZE - poff, 0, ops, &ctx,
- iomap_readpage_actor);
- if (ret <= 0) {
- WARN_ON_ONCE(ret == 0);
- SetPageError(page);
- break;
- }
- }
+ struct folio *folio = ctx->cur_folio;
+ struct iomap_iter iter = {
+ .inode = folio->mapping->host,
+ .pos = folio_pos(folio),
+ .len = folio_size(folio),
+ };
+ size_t bytes_submitted = 0;
+ int ret;
- if (ctx.bio) {
- submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_page_in_bio);
- } else {
- WARN_ON_ONCE(ctx.cur_page_in_bio);
- unlock_page(page);
- }
+ trace_iomap_readpage(iter.inode, 1);
- /*
- * Just like mpage_readahead and block_read_full_page we always
- * return 0 and just mark the page as PageError on errors. This
- * should be cleaned up all through the stack eventually.
- */
- return 0;
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = iomap_read_folio_iter(&iter, ctx,
+ &bytes_submitted);
+
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
+
+ iomap_read_end(folio, bytes_submitted);
}
-EXPORT_SYMBOL_GPL(iomap_readpage);
+EXPORT_SYMBOL_GPL(iomap_read_folio);
-static loff_t
-iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_readahead_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
{
- struct iomap_readpage_ctx *ctx = data;
- loff_t done, ret;
-
- for (done = 0; done < length; done += ret) {
- if (ctx->cur_page && offset_in_page(pos + done) == 0) {
- if (!ctx->cur_page_in_bio)
- unlock_page(ctx->cur_page);
- put_page(ctx->cur_page);
- ctx->cur_page = NULL;
+ int ret;
+
+ while (iomap_length(iter)) {
+ if (ctx->cur_folio &&
+ offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
+ iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
+ ctx->cur_folio = NULL;
}
- if (!ctx->cur_page) {
- ctx->cur_page = readahead_page(ctx->rac);
- ctx->cur_page_in_bio = false;
+ if (!ctx->cur_folio) {
+ ctx->cur_folio = readahead_folio(ctx->rac);
+ if (WARN_ON_ONCE(!ctx->cur_folio))
+ return -EINVAL;
+ *cur_bytes_submitted = 0;
}
- ret = iomap_readpage_actor(inode, pos + done, length - done,
- ctx, iomap, srcmap);
+ ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
+ if (ret)
+ return ret;
}
- return done;
+ return 0;
}
/**
* iomap_readahead - Attempt to read pages from a file.
- * @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
+ * @ctx: The ctx used for issuing readahead.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
@@ -409,133 +619,126 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
- struct inode *inode = rac->mapping->host;
- loff_t pos = readahead_pos(rac);
- loff_t length = readahead_length(rac);
- struct iomap_readpage_ctx ctx = {
- .rac = rac,
+ struct readahead_control *rac = ctx->rac;
+ struct iomap_iter iter = {
+ .inode = rac->mapping->host,
+ .pos = readahead_pos(rac),
+ .len = readahead_length(rac),
};
+ size_t cur_bytes_submitted;
- trace_iomap_readahead(inode, readahead_count(rac));
+ trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
- while (length > 0) {
- loff_t ret = iomap_apply(inode, pos, length, 0, ops,
- &ctx, iomap_readahead_actor);
- if (ret <= 0) {
- WARN_ON_ONCE(ret == 0);
- break;
- }
- pos += ret;
- length -= ret;
- }
+ while (iomap_iter(&iter, ops) > 0)
+ iter.status = iomap_readahead_iter(&iter, ctx,
+ &cur_bytes_submitted);
- if (ctx.bio)
- submit_bio(ctx.bio);
- if (ctx.cur_page) {
- if (!ctx.cur_page_in_bio)
- unlock_page(ctx.cur_page);
- put_page(ctx.cur_page);
- }
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
+
+ if (ctx->cur_folio)
+ iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
}
EXPORT_SYMBOL_GPL(iomap_readahead);
/*
- * iomap_is_partially_uptodate checks whether blocks within a page are
+ * iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.
*
- * Returns true if all blocks which correspond to a file portion
- * we want to read within the page are uptodate.
+ * Returns true if all blocks which correspond to the specified part
+ * of the folio are uptodate.
*/
-int
-iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count)
+bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned len, first, last;
- unsigned i;
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned first, last;
- /* Limit range to one page */
- len = min_t(unsigned, PAGE_SIZE - from, count);
+ if (!ifs)
+ return false;
- /* First and last blocks in range within page */
- first = from >> inode->i_blkbits;
- last = (from + len - 1) >> inode->i_blkbits;
+ /* Caller's range may extend past the end of this folio */
+ count = min(folio_size(folio) - from, count);
- if (iop) {
- for (i = first; i <= last; i++)
- if (!test_bit(i, iop->uptodate))
- return 0;
- return 1;
- }
+ /* First and last blocks in range within folio */
+ first = from >> inode->i_blkbits;
+ last = (from + count - 1) >> inode->i_blkbits;
- return 0;
+ return ifs_next_nonuptodate_block(folio, first, last) > last;
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
-int
-iomap_releasepage(struct page *page, gfp_t gfp_mask)
+/**
+ * iomap_get_folio - get a folio reference for writing
+ * @iter: iteration structure
+ * @pos: start offset of write
+ * @len: Suggested size of folio to create.
+ *
+ * Returns a locked reference to the folio at @pos, or an error pointer if the
+ * folio could not be obtained.
+ */
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
+{
+ fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
+
+ if (iter->flags & IOMAP_NOWAIT)
+ fgp |= FGP_NOWAIT;
+ if (iter->flags & IOMAP_DONTCACHE)
+ fgp |= FGP_DONTCACHE;
+ fgp |= fgf_set_order(len);
+
+ return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+ fgp, mapping_gfp_mask(iter->inode->i_mapping));
+}
+EXPORT_SYMBOL_GPL(iomap_get_folio);
+
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- trace_iomap_releasepage(page->mapping->host, page_offset(page),
- PAGE_SIZE);
+ trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
+ folio_size(folio));
/*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(), skip those here.
+ * If the folio is dirty, we refuse to release our metadata because
+ * it may be partially dirty. Once we track per-block dirty state,
+ * we can release the metadata if every block is dirty.
*/
- if (PageDirty(page) || PageWriteback(page))
- return 0;
- iomap_page_release(page);
- return 1;
+ if (folio_test_dirty(folio))
+ return false;
+ ifs_free(folio);
+ return true;
}
-EXPORT_SYMBOL_GPL(iomap_releasepage);
+EXPORT_SYMBOL_GPL(iomap_release_folio);
-void
-iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
- trace_iomap_invalidatepage(page->mapping->host, offset, len);
+ trace_iomap_invalidate_folio(folio->mapping->host,
+ folio_pos(folio) + offset, len);
/*
- * If we are invalidating the entire page, clear the dirty state from it
- * and release it to avoid unnecessary buildup of the LRU.
+ * If we're invalidating the entire folio, clear the dirty state
+ * from it and release it to avoid unnecessary buildup of the LRU.
*/
- if (offset == 0 && len == PAGE_SIZE) {
- WARN_ON_ONCE(PageWriteback(page));
- cancel_dirty_page(page);
- iomap_page_release(page);
+ if (offset == 0 && len == folio_size(folio)) {
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ folio_cancel_dirty(folio);
+ ifs_free(folio);
}
}
-EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- int ret;
-
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (page_has_private(page))
- attach_page_private(newpage, detach_page_private(page));
+ struct inode *inode = mapping->host;
+ size_t len = folio_size(folio);
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
+ ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, len);
+ return filemap_dirty_folio(mapping, folio);
}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
-
-enum {
- IOMAP_WRITE_F_UNSHARE = (1 << 0),
-};
+EXPORT_SYMBOL_GPL(iomap_dirty_folio);
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
@@ -547,462 +750,875 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
* write started inside the existing inode size.
*/
if (pos + len > i_size)
- truncate_pagecache_range(inode, max(pos, i_size), pos + len);
+ truncate_pagecache_range(inode, max(pos, i_size),
+ pos + len - 1);
}
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
- unsigned plen, struct iomap *iomap)
+static int __iomap_write_begin(const struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len,
+ struct folio *folio)
{
- struct bio_vec bvec;
- struct bio bio;
-
- bio_init(&bio, &bvec, 1);
- bio.bi_opf = REQ_OP_READ;
- bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_set_dev(&bio, iomap->bdev);
- __bio_add_page(&bio, page, plen, poff);
- return submit_bio_wait(&bio);
-}
+ struct iomap_folio_state *ifs;
+ loff_t pos = iter->pos;
+ loff_t block_size = i_blocksize(iter->inode);
+ loff_t block_start = round_down(pos, block_size);
+ loff_t block_end = round_up(pos + len, block_size);
+ unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
+ size_t from = offset_in_folio(folio, pos), to = from + len;
+ size_t poff, plen;
-static int
-__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
- struct page *page, struct iomap *srcmap)
-{
- struct iomap_page *iop = iomap_page_create(inode, page);
- loff_t block_size = i_blocksize(inode);
- loff_t block_start = pos & ~(block_size - 1);
- loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
- unsigned from = offset_in_page(pos), to = from + len, poff, plen;
- int status;
+ /*
+ * If the write or zeroing completely overlaps the current folio, then
+ * entire folio will be dirtied so there is no need for
+ * per-block state tracking structures to be attached to this folio.
+ * For the unshare case, we must read in the ondisk contents because we
+ * are not changing pagecache contents.
+ */
+ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
+ pos + len >= folio_next_pos(folio))
+ return 0;
+
+ ifs = ifs_alloc(iter->inode, folio, iter->flags);
+ if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
+ return -EAGAIN;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
do {
- iomap_adjust_read_range(inode, iop, &block_start,
+ iomap_adjust_read_range(iter->inode, folio, &block_start,
block_end - block_start, &poff, &plen);
if (plen == 0)
break;
- if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
- (from <= poff || from >= poff + plen) &&
- (to <= poff || to >= poff + plen))
+ /*
+ * If the read range will be entirely overwritten by the write,
+ * we can skip having to zero/read it in.
+ */
+ if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
+ to >= poff + plen)
continue;
- if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
- if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
+ if (iomap_block_needs_zeroing(iter, block_start)) {
+ if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
return -EIO;
- zero_user_segments(page, poff, from, to, poff + plen);
- iomap_set_range_uptodate(page, poff, plen);
- continue;
+ folio_zero_segments(folio, poff, from, to, poff + plen);
+ } else {
+ int status;
+
+ if (iter->flags & IOMAP_NOWAIT)
+ return -EAGAIN;
+
+ if (write_ops && write_ops->read_folio_range)
+ status = write_ops->read_folio_range(iter,
+ folio, block_start, plen);
+ else
+ status = iomap_bio_read_folio_range_sync(iter,
+ folio, block_start, plen);
+ if (status)
+ return status;
}
-
- status = iomap_read_page_sync(block_start, page, poff, plen,
- srcmap);
- if (status)
- return status;
+ iomap_set_range_uptodate(folio, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
}
-static int
-iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
+static struct folio *__iomap_get_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len)
{
- const struct iomap_page_ops *page_ops = iomap->page_ops;
- struct page *page;
+ loff_t pos = iter->pos;
+
+ if (!mapping_large_folio_support(iter->inode->i_mapping))
+ len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
+ if (iter->fbatch) {
+ struct folio *folio = folio_batch_next(iter->fbatch);
+
+ if (!folio)
+ return NULL;
+
+ /*
+ * The folio mapping generally shouldn't have changed based on
+ * fs locks, but be consistent with filemap lookup and retry
+ * the iter if it does.
+ */
+ folio_lock(folio);
+ if (unlikely(folio->mapping != iter->inode->i_mapping)) {
+ iter->iomap.flags |= IOMAP_F_STALE;
+ folio_unlock(folio);
+ return NULL;
+ }
+
+ folio_get(folio);
+ return folio;
+ }
+
+ if (write_ops && write_ops->get_folio)
+ return write_ops->get_folio(iter, pos, len);
+ return iomap_get_folio(iter, pos, len);
+}
+
+static void __iomap_put_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t ret,
+ struct folio *folio)
+{
+ loff_t pos = iter->pos;
+
+ if (write_ops && write_ops->put_folio) {
+ write_ops->put_folio(iter->inode, pos, ret, folio);
+ } else {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+}
+
+/* trim pos and bytes to within a given folio */
+static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
+ struct folio *folio, size_t *offset, u64 *bytes)
+{
+ loff_t pos = iter->pos;
+ size_t fsize = folio_size(folio);
+
+ WARN_ON_ONCE(pos < folio_pos(folio));
+ WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
+
+ *offset = offset_in_folio(folio, pos);
+ *bytes = min(*bytes, fsize - *offset);
+
+ return pos;
+}
+
+static int iomap_write_begin_inline(const struct iomap_iter *iter,
+ struct folio *folio)
+{
+ /* needs more work for the tailpacking case; disable for now */
+ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
+ return -EIO;
+ return iomap_read_inline_data(iter, folio);
+}
+
+/*
+ * Grab and prepare a folio for write based on iter state. Returns the folio,
+ * offset, and length. Callers can optionally pass a max length *plen,
+ * otherwise init to zero.
+ */
+static int iomap_write_begin(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, struct folio **foliop,
+ size_t *poffset, u64 *plen)
+{
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t pos;
+ u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
+ struct folio *folio;
int status = 0;
- BUG_ON(pos + len > iomap->offset + iomap->length);
- if (srcmap != iomap)
- BUG_ON(pos + len > srcmap->offset + srcmap->length);
+ len = min_not_zero(len, *plen);
+ *foliop = NULL;
+ *plen = 0;
if (fatal_signal_pending(current))
return -EINTR;
- if (page_ops && page_ops->page_prepare) {
- status = page_ops->page_prepare(inode, pos, len, iomap);
- if (status)
- return status;
+ folio = __iomap_get_folio(iter, write_ops, len);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ /*
+ * No folio means we're done with a batch. We still have range to
+ * process so return and let the caller iterate and refill the batch.
+ */
+ if (!folio) {
+ WARN_ON_ONCE(!iter->fbatch);
+ return 0;
}
- page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
- AOP_FLAG_NOFS);
- if (!page) {
- status = -ENOMEM;
- goto out_no_page;
+ /*
+ * Now we have a locked folio, before we do anything with it we need to
+ * check that the iomap we have cached is not stale. The inode extent
+ * mapping can change due to concurrent IO in flight (e.g.
+ * IOMAP_UNWRITTEN state can change and memory reclaim could have
+ * reclaimed a previously partially written page at this index after IO
+ * completion before this write reaches this file offset) and hence we
+ * could do the wrong thing here (zero a page range incorrectly or fail
+ * to zero) and corrupt data.
+ */
+ if (write_ops && write_ops->iomap_valid) {
+ bool iomap_valid = write_ops->iomap_valid(iter->inode,
+ &iter->iomap);
+ if (!iomap_valid) {
+ iter->iomap.flags |= IOMAP_F_STALE;
+ status = 0;
+ goto out_unlock;
+ }
}
+ /*
+ * The folios in a batch may not be contiguous. If we've skipped
+ * forward, advance the iter to the pos of the current folio. If the
+ * folio starts beyond the end of the mapping, it may have been trimmed
+ * since the lookup for whatever reason. Return a NULL folio to
+ * terminate the op.
+ */
+ if (folio_pos(folio) > iter->pos) {
+ len = min_t(u64, folio_pos(folio) - iter->pos,
+ iomap_length(iter));
+ status = iomap_iter_advance(iter, len);
+ len = iomap_length(iter);
+ if (status || !len)
+ goto out_unlock;
+ }
+
+ pos = iomap_trim_folio_range(iter, folio, poffset, &len);
+
if (srcmap->type == IOMAP_INLINE)
- iomap_read_inline_data(inode, page, srcmap);
- else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, srcmap);
+ status = iomap_write_begin_inline(iter, folio);
+ else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
+ status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(inode, pos, len, flags, page,
- srcmap);
+ status = __iomap_write_begin(iter, write_ops, len, folio);
if (unlikely(status))
goto out_unlock;
- *pagep = page;
+ *foliop = folio;
+ *plen = len;
return 0;
out_unlock:
- unlock_page(page);
- put_page(page);
- iomap_write_failed(inode, pos, len);
-
-out_no_page:
- if (page_ops && page_ops->page_done)
- page_ops->page_done(inode, pos, 0, NULL, iomap);
+ __iomap_put_folio(iter, write_ops, 0, folio);
return status;
}
-int
-iomap_set_page_dirty(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
- int newly_dirty;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
-
- /*
- * Lock out page->mem_cgroup migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- if (newly_dirty)
- __set_page_dirty(page, mapping, 0);
- unlock_page_memcg(page);
-
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
-}
-EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
-
-static int
-__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+ size_t copied, struct folio *folio)
{
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
/*
* The blocks that were entirely written will now be uptodate, so we
- * don't have to worry about a readpage reading them and overwriting a
- * partial write. However if we have encountered a short write and only
+ * don't have to worry about a read_folio reading them and overwriting a
+ * partial write. However, if we've encountered a short write and only
* partially written into a block, it will not be marked uptodate, so a
- * readpage might come in and destroy our partial write.
+ * read_folio might come in and destroy our partial write.
*
- * Do the simplest thing, and just treat any short write to a non
- * uptodate page as a zero-length write, and force the caller to redo
- * the whole thing.
+ * Do the simplest thing and just treat any short write to a
+ * non-uptodate page as a zero-length write, and force the caller to
+ * redo the whole thing.
*/
- if (unlikely(copied < len && !PageUptodate(page)))
- return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
- iomap_set_page_dirty(page);
- return copied;
+ if (unlikely(copied < len && !folio_test_uptodate(folio)))
+ return false;
+ iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
+ iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
+ filemap_dirty_folio(inode->i_mapping, folio);
+ return true;
}
-static int
-iomap_write_end_inline(struct inode *inode, struct page *page,
- struct iomap *iomap, loff_t pos, unsigned copied)
+static bool iomap_write_end_inline(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t copied)
{
+ const struct iomap *iomap = &iter->iomap;
void *addr;
- WARN_ON_ONCE(!PageUptodate(page));
- BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
+ BUG_ON(!iomap_inline_data_valid(iomap));
+
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return false;
- addr = kmap_atomic(page);
- memcpy(iomap->inline_data + pos, addr + pos, copied);
- kunmap_atomic(addr);
+ flush_dcache_folio(folio);
+ addr = kmap_local_folio(folio, pos);
+ memcpy(iomap_inline_data(iomap, pos), addr, copied);
+ kunmap_local(addr);
- mark_inode_dirty(inode);
- return copied;
+ mark_inode_dirty(iter->inode);
+ return true;
}
-static int
-iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied,
- struct page *page, struct iomap *iomap, struct iomap *srcmap)
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
+ struct folio *folio)
{
- const struct iomap_page_ops *page_ops = iomap->page_ops;
- loff_t old_size = inode->i_size;
- int ret;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t pos = iter->pos;
- if (srcmap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
- } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
- page, NULL);
- } else {
- ret = __iomap_write_end(inode, pos, len, copied, page);
- }
+ if (srcmap->type == IOMAP_INLINE)
+ return iomap_write_end_inline(iter, folio, pos, copied);
- /*
- * Update the in-memory inode size after copying the data into the page
- * cache. It's up to the file system to write the updated size to disk,
- * preferably after I/O completion so that no stale data is exposed.
- */
- if (pos + ret > old_size) {
- i_size_write(inode, pos + ret);
- iomap->flags |= IOMAP_F_SIZE_CHANGED;
- }
- unlock_page(page);
+ if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ size_t bh_written;
- if (old_size < pos)
- pagecache_isize_extended(inode, old_size, pos);
- if (page_ops && page_ops->page_done)
- page_ops->page_done(inode, pos, ret, page, iomap);
- put_page(page);
+ bh_written = block_write_end(pos, len, copied, folio);
+ WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+ return bh_written == copied;
+ }
- if (ret < len)
- iomap_write_failed(inode, pos, len);
- return ret;
+ return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
-static loff_t
-iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap, struct iomap *srcmap)
+static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
+ const struct iomap_write_ops *write_ops)
{
- struct iov_iter *i = data;
- long status = 0;
- ssize_t written = 0;
+ ssize_t total_written = 0;
+ int status = 0;
+ struct address_space *mapping = iter->inode->i_mapping;
+ size_t chunk = mapping_max_folio_size(mapping);
+ unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
- struct page *page;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
+ struct folio *folio;
+ loff_t old_size;
+ size_t offset; /* Offset into folio */
+ u64 bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ u64 written; /* Bytes have been written */
+ loff_t pos;
+
+ bytes = iov_iter_count(i);
+retry:
+ offset = iter->pos & (chunk - 1);
+ bytes = min(chunk - offset, bytes);
+ status = balance_dirty_pages_ratelimited_flags(mapping,
+ bdp_flags);
+ if (unlikely(status))
+ break;
- offset = offset_in_page(pos);
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_count(i));
-again:
- if (bytes > length)
- bytes = length;
+ if (bytes > iomap_length(iter))
+ bytes = iomap_length(iter);
/*
- * Bring in the user page that we will copy from _first_.
+ * Bring in the user page that we'll copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
+ * For async buffered writes the assumption is that the user
+ * page has already been faulted in. This can be optimized by
+ * faulting the user page.
*/
- if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
status = -EFAULT;
break;
}
- status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
- srcmap);
- if (unlikely(status))
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
+ if (unlikely(status)) {
+ iomap_write_failed(iter->inode, iter->pos, bytes);
+ break;
+ }
+ if (iter->iomap.flags & IOMAP_F_STALE)
break;
- if (mapping_writably_mapped(inode->i_mapping))
- flush_dcache_page(page);
+ pos = iter->pos;
- copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
- flush_dcache_page(page);
+ copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
+ written = iomap_write_end(iter, bytes, copied, folio) ?
+ copied : 0;
- status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
- srcmap);
- if (unlikely(status < 0))
- break;
- copied = status;
+ /*
+ * Update the in-memory inode size after copying the data into
+ * the page cache. It's up to the file system to write the
+ * updated size to disk, preferably after I/O completion so that
+ * no stale data is exposed. Only once that's done can we
+ * unlock and release the folio.
+ */
+ old_size = iter->inode->i_size;
+ if (pos + written > old_size) {
+ i_size_write(iter->inode, pos + written);
+ iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
+ }
+ __iomap_put_folio(iter, write_ops, written, folio);
- cond_resched();
+ if (old_size < pos)
+ pagecache_isize_extended(iter->inode, old_size, pos);
- iov_iter_advance(i, copied);
- if (unlikely(copied == 0)) {
+ cond_resched();
+ if (unlikely(written == 0)) {
/*
- * If we were unable to copy any data at all, we must
- * fall back to a single segment length write.
- *
- * If we didn't fallback here, we could livelock
- * because not all segments in the iov can be copied at
- * once without a pagefault.
+ * A short copy made iomap_write_end() reject the
+ * thing entirely. Might be memory poisoning
+ * halfway through, might be a race with munmap,
+ * might be severe memory pressure.
*/
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_single_seg_count(i));
- goto again;
+ iomap_write_failed(iter->inode, pos, bytes);
+ iov_iter_revert(i, copied);
+
+ if (chunk > PAGE_SIZE)
+ chunk /= 2;
+ if (copied) {
+ bytes = copied;
+ goto retry;
+ }
+ } else {
+ total_written += written;
+ iomap_iter_advance(iter, written);
}
- pos += copied;
- written += copied;
- length -= copied;
-
- balance_dirty_pages_ratelimited(inode->i_mapping);
- } while (iov_iter_count(i) && length);
+ } while (iov_iter_count(i) && iomap_length(iter));
- return written ? written : status;
+ return total_written ? 0 : status;
}
ssize_t
-iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops)
+iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
- struct inode *inode = iocb->ki_filp->f_mapping->host;
- loff_t pos = iocb->ki_pos, ret = 0, written = 0;
+ struct iomap_iter iter = {
+ .inode = iocb->ki_filp->f_mapping->host,
+ .pos = iocb->ki_pos,
+ .len = iov_iter_count(i),
+ .flags = IOMAP_WRITE,
+ .private = private,
+ };
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ iter.flags |= IOMAP_NOWAIT;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ iter.flags |= IOMAP_DONTCACHE;
+
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = iomap_write_iter(&iter, i, write_ops);
+
+ if (unlikely(iter.pos == iocb->ki_pos))
+ return ret;
+ ret = iter.pos - iocb->ki_pos;
+ iocb->ki_pos = iter.pos;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+
+static void iomap_write_delalloc_ifs_punch(struct inode *inode,
+ struct folio *folio, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ unsigned int first_blk, last_blk;
+ loff_t last_byte;
+ u8 blkbits = inode->i_blkbits;
+ struct iomap_folio_state *ifs;
+
+ /*
+ * When we have per-block dirty tracking, there can be
+ * blocks within a folio which are marked uptodate
+ * but not dirty. In that case it is necessary to punch
+ * out such blocks to avoid leaking any delalloc blocks.
+ */
+ ifs = folio->private;
+ if (!ifs)
+ return;
+
+ last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
+ first_blk = offset_in_folio(folio, start_byte) >> blkbits;
+ last_blk = offset_in_folio(folio, last_byte) >> blkbits;
+ while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
+ <= last_blk) {
+ punch(inode, folio_pos(folio) + (first_blk << blkbits),
+ 1 << blkbits, iomap);
+ first_blk++;
+ }
+}
+
+static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ if (!folio_test_dirty(folio))
+ return;
+
+ /* if dirty, punch up to offset */
+ if (start_byte > *punch_start_byte) {
+ punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
+ iomap);
+ }
+
+ /* Punch non-dirty blocks within folio */
+ iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
+ iomap, punch);
- while (iov_iter_count(iter)) {
- ret = iomap_apply(inode, pos, iov_iter_count(iter),
- IOMAP_WRITE, ops, iter, iomap_write_actor);
- if (ret <= 0)
+ /*
+ * Make sure the next punch start is correctly bound to
+ * the end of this data range, not the end of the folio.
+ */
+ *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
+}
+
+/*
+ * Scan the data range passed to us for dirty page cache folios. If we find a
+ * dirty folio, punch out the preceding range and update the offset from which
+ * the next punch will start from.
+ *
+ * We can punch out storage reservations under clean pages because they either
+ * contain data that has been written back - in which case the delalloc punch
+ * over that range is a no-op - or they have been read faults in which case they
+ * contain zeroes and we can remove the delalloc backing range and any new
+ * writes to those pages will do the normal hole filling operation...
+ *
+ * This makes the logic simple: we only need to keep the delalloc extents only
+ * over the dirty ranges of the page cache.
+ *
+ * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
+ * simplify range iterations.
+ */
+static void iomap_write_delalloc_scan(struct inode *inode,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ while (start_byte < end_byte) {
+ struct folio *folio;
+
+ /* grab locked page */
+ folio = filemap_lock_folio(inode->i_mapping,
+ start_byte >> PAGE_SHIFT);
+ if (IS_ERR(folio)) {
+ start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
+ PAGE_SIZE;
+ continue;
+ }
+
+ iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, iomap, punch);
+
+ /* move offset to start of next folio in range */
+ start_byte = folio_next_pos(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+}
+
+/*
+ * When a short write occurs, the filesystem might need to use ->iomap_end
+ * to remove space reservations created in ->iomap_begin.
+ *
+ * For filesystems that use delayed allocation, there can be dirty pages over
+ * the delalloc extent outside the range of a short write but still within the
+ * delalloc extent allocated for this iomap if the write raced with page
+ * faults.
+ *
+ * Punch out all the delalloc blocks in the range given except for those that
+ * have dirty data still pending in the page cache - those are going to be
+ * written and so must still retain the delalloc backing for writeback.
+ *
+ * The punch() callback *must* only punch delalloc extents in the range passed
+ * to it. It must skip over all other types of extents in the range and leave
+ * them completely unchanged. It must do this punch atomically with respect to
+ * other extent modifications.
+ *
+ * The punch() callback may be called with a folio locked to prevent writeback
+ * extent allocation racing at the edge of the range we are currently punching.
+ * The locked folio may or may not cover the range being punched, so it is not
+ * safe for the punch() callback to lock folios itself.
+ *
+ * Lock order is:
+ *
+ * inode->i_rwsem (shared or exclusive)
+ * inode->i_mapping->invalidate_lock (exclusive)
+ * folio_lock()
+ * ->punch
+ * internal filesystem allocation lock
+ *
+ * As we are scanning the page cache for data, we don't need to reimplement the
+ * wheel - mapping_seek_hole_data() does exactly what we need to identify the
+ * start and end of data ranges correctly even for sub-folio block sizes. This
+ * byte range based iteration is especially convenient because it means we
+ * don't have to care about variable size folios, nor where the start or end of
+ * the data range lies within a folio, if they lie within the same folio or even
+ * if there are multiple discontiguous data ranges within the folio.
+ *
+ * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
+ * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
+ * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
+ * date. A write page fault can then mark it dirty. If we then fail a write()
+ * beyond EOF into that up to date cached range, we allocate a delalloc block
+ * beyond EOF and then have to punch it out. Because the range is up to date,
+ * mapping_seek_hole_data() will return it, and we will skip the punch because
+ * the folio is dirty. THis is incorrect - we always need to punch out delalloc
+ * beyond EOF in this case as writeback will never write back and covert that
+ * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
+ * resulting in always punching out the range from the EOF to the end of the
+ * range the iomap spans.
+ *
+ * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
+ * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
+ * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
+ * returns the end of the data range (data_end). Using closed intervals would
+ * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
+ * the code to subtle off-by-one bugs....
+ */
+void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch)
+{
+ loff_t punch_start_byte = start_byte;
+ loff_t scan_end_byte = min(i_size_read(inode), end_byte);
+
+ /*
+ * The caller must hold invalidate_lock to avoid races with page faults
+ * re-instantiating folios and dirtying them via ->page_mkwrite whilst
+ * we walk the cache and perform delalloc extent removal. Failing to do
+ * this can leave dirty pages with no space reservation in the cache.
+ */
+ lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
+
+ while (start_byte < scan_end_byte) {
+ loff_t data_end;
+
+ start_byte = mapping_seek_hole_data(inode->i_mapping,
+ start_byte, scan_end_byte, SEEK_DATA);
+ /*
+ * If there is no more data to scan, all that is left is to
+ * punch out the remaining range.
+ *
+ * Note that mapping_seek_hole_data is only supposed to return
+ * either an offset or -ENXIO, so WARN on any other error as
+ * that would be an API change without updating the callers.
+ */
+ if (start_byte == -ENXIO || start_byte == scan_end_byte)
break;
- pos += ret;
- written += ret;
+ if (WARN_ON_ONCE(start_byte < 0))
+ return;
+ WARN_ON_ONCE(start_byte < punch_start_byte);
+ WARN_ON_ONCE(start_byte > scan_end_byte);
+
+ /*
+ * We find the end of this contiguous cached data range by
+ * seeking from start_byte to the beginning of the next hole.
+ */
+ data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
+ scan_end_byte, SEEK_HOLE);
+ if (WARN_ON_ONCE(data_end < 0))
+ return;
+
+ /*
+ * If we race with post-direct I/O invalidation of the page cache,
+ * there might be no data left at start_byte.
+ */
+ if (data_end == start_byte)
+ continue;
+
+ WARN_ON_ONCE(data_end < start_byte);
+ WARN_ON_ONCE(data_end > scan_end_byte);
+
+ iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
+ data_end, iomap, punch);
+
+ /* The next data search starts at the end of this one. */
+ start_byte = data_end;
}
- return written ? written : ret;
+ if (punch_start_byte < end_byte)
+ punch(inode, punch_start_byte, end_byte - punch_start_byte,
+ iomap);
}
-EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
-static loff_t
-iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap, struct iomap *srcmap)
+static int iomap_unshare_iter(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops)
{
- long status = 0;
- loff_t written = 0;
+ struct iomap *iomap = &iter->iomap;
+ u64 bytes = iomap_length(iter);
+ int status;
- /* don't bother with blocks that are not shared to start with */
- if (!(iomap->flags & IOMAP_F_SHARED))
- return length;
- /* don't bother with holes or unwritten extents */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
+ if (!iomap_want_unshare_iter(iter))
+ return iomap_iter_advance(iter, bytes);
do {
- unsigned long offset = offset_in_page(pos);
- unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
- struct page *page;
+ struct folio *folio;
+ size_t offset;
+ bool ret;
- status = iomap_write_begin(inode, pos, bytes,
- IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status))
return status;
+ if (iomap->flags & IOMAP_F_STALE)
+ break;
- status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
- srcmap);
- if (unlikely(status <= 0)) {
- if (WARN_ON_ONCE(status == 0))
- return -EIO;
- return status;
- }
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
+ return -EIO;
cond_resched();
- pos += status;
- written += status;
- length -= status;
+ balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- balance_dirty_pages_ratelimited(inode->i_mapping);
- } while (length);
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
- return written;
+ return status;
}
int
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops)
{
- loff_t ret;
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+ .flags = IOMAP_WRITE | IOMAP_UNSHARE,
+ };
+ loff_t size = i_size_read(inode);
+ int ret;
- while (len) {
- ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
- iomap_unshare_actor);
- if (ret <= 0)
- return ret;
- pos += ret;
- len -= ret;
- }
+ if (pos < 0 || pos >= size)
+ return 0;
- return 0;
+ iter.len = min(len, size - pos);
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = iomap_unshare_iter(&iter, write_ops);
+ return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
- unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
+/*
+ * Flush the remaining range of the iter and mark the current mapping stale.
+ * This is used when zero range sees an unwritten mapping that may have had
+ * dirty pagecache over it.
+ */
+static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
{
- struct page *page;
- int status;
-
- status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
- if (status)
- return status;
+ struct address_space *mapping = i->inode->i_mapping;
+ loff_t end = i->pos + i->len - 1;
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
-
- return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
+ i->iomap.flags |= IOMAP_F_STALE;
+ return filemap_write_and_wait_range(mapping, i->pos, end);
}
-static loff_t
-iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
- void *data, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ const struct iomap_write_ops *write_ops)
{
- bool *did_zero = data;
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
int status;
- /* already zeroed? we're done. */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return count;
-
do {
- unsigned offset, bytes;
+ struct folio *folio;
+ size_t offset;
+ bool ret;
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, count);
-
- if (IS_DAX(inode))
- status = dax_iomap_zero(pos, offset, bytes, iomap);
- else
- status = iomap_zero(inode, pos, offset, bytes, iomap,
- srcmap);
- if (status < 0)
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
+ if (status)
return status;
+ if (iter->iomap.flags & IOMAP_F_STALE)
+ break;
+
+ /* a NULL folio means we're done with a folio batch */
+ if (!folio) {
+ status = iomap_iter_advance_full(iter);
+ break;
+ }
+
+ /* warn about zeroing folios beyond eof that won't write back */
+ WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
+
+ trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
+ bytes);
+
+ folio_zero_range(folio, offset, bytes);
+ folio_mark_accessed(folio);
+
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
+ return -EIO;
+
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
- pos += bytes;
- count -= bytes;
- written += bytes;
- if (did_zero)
- *did_zero = true;
- } while (count > 0);
+ if (did_zero)
+ *did_zero = true;
+ return status;
+}
- return written;
+loff_t
+iomap_fill_dirty_folios(
+ struct iomap_iter *iter,
+ loff_t offset,
+ loff_t length)
+{
+ struct address_space *mapping = iter->inode->i_mapping;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
+
+ iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
+ if (!iter->fbatch)
+ return offset + length;
+ folio_batch_init(iter->fbatch);
+
+ filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
+ return (start << PAGE_SHIFT);
}
+EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
int
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
- loff_t ret;
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+ .len = len,
+ .flags = IOMAP_ZERO,
+ .private = private,
+ };
+ struct address_space *mapping = inode->i_mapping;
+ int ret;
+ bool range_dirty;
- while (len > 0) {
- ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
- ops, did_zero, iomap_zero_range_actor);
- if (ret <= 0)
- return ret;
+ /*
+ * To avoid an unconditional flush, check pagecache state and only flush
+ * if dirty and the fs returns a mapping that might convert on
+ * writeback.
+ */
+ range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
+ iter.pos + iter.len - 1);
+ while ((ret = iomap_iter(&iter, ops)) > 0) {
+ const struct iomap *srcmap = iomap_iter_srcmap(&iter);
+
+ if (WARN_ON_ONCE(iter.fbatch &&
+ srcmap->type != IOMAP_UNWRITTEN))
+ return -EIO;
+
+ if (!iter.fbatch &&
+ (srcmap->type == IOMAP_HOLE ||
+ srcmap->type == IOMAP_UNWRITTEN)) {
+ s64 status;
+
+ if (range_dirty) {
+ range_dirty = false;
+ status = iomap_zero_iter_flush_and_stale(&iter);
+ } else {
+ status = iomap_iter_advance_full(&iter);
+ }
+ iter.status = status;
+ continue;
+ }
- pos += ret;
- len -= ret;
+ iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
}
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(iomap_zero_range);
int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
@@ -1010,609 +1626,294 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
/* Block boundary? Nothing to do */
if (!off)
return 0;
- return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
+ write_ops, private);
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t
-iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+ struct folio *folio)
{
- struct page *page = data;
+ loff_t length = iomap_length(iter);
int ret;
- if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+ if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
+ ret = __block_write_begin_int(folio, iter->pos, length, NULL,
+ &iter->iomap);
if (ret)
return ret;
- block_commit_write(page, 0, length);
+ block_commit_write(folio, 0, length);
} else {
- WARN_ON_ONCE(!PageUptodate(page));
- iomap_page_create(inode, page);
- set_page_dirty(page);
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
+ folio_mark_dirty(folio);
}
- return length;
+ return iomap_iter_advance(iter, length);
}
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private)
{
- struct page *page = vmf->page;
- struct inode *inode = file_inode(vmf->vma->vm_file);
- unsigned long length;
- loff_t offset;
+ struct iomap_iter iter = {
+ .inode = file_inode(vmf->vma->vm_file),
+ .flags = IOMAP_WRITE | IOMAP_FAULT,
+ .private = private,
+ };
+ struct folio *folio = page_folio(vmf->page);
ssize_t ret;
- lock_page(page);
- ret = page_mkwrite_check_truncate(page, inode);
+ folio_lock(folio);
+ ret = folio_mkwrite_check_truncate(folio, iter.inode);
if (ret < 0)
goto out_unlock;
- length = ret;
-
- offset = page_offset(page);
- while (length > 0) {
- ret = iomap_apply(inode, offset, length,
- IOMAP_WRITE | IOMAP_FAULT, ops, page,
- iomap_page_mkwrite_actor);
- if (unlikely(ret <= 0))
- goto out_unlock;
- offset += ret;
- length -= ret;
- }
+ iter.pos = folio_pos(folio);
+ iter.len = ret;
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = iomap_folio_mkwrite_iter(&iter, folio);
- wait_for_stable_page(page);
+ if (ret < 0)
+ goto out_unlock;
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
- return block_page_mkwrite_return(ret);
+ folio_unlock(folio);
+ return vmf_fs_error(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
- int error)
-{
- struct iomap_page *iop = to_iomap_page(page);
-
- if (error) {
- SetPageError(page);
- mapping_set_error(inode->i_mapping, -EIO);
- }
-
- WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
-
- if (!iop || atomic_dec_and_test(&iop->write_count))
- end_page_writeback(page);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-static void
-iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+static void iomap_writeback_init(struct inode *inode, struct folio *folio)
{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_inline_bio;
- struct bio *last = ioend->io_bio, *next;
- u64 start = bio->bi_iter.bi_sector;
- loff_t offset = ioend->io_offset;
- bool quiet = bio_flagged(bio, BIO_QUIET);
-
- for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct iomap_folio_state *ifs = folio->private;
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ if (ifs) {
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
/*
- * For the last bio, bi_private points to the ioend, so we
- * need to explicitly end the iteration here.
+ * Set this to the folio size. After processing the folio for
+ * writeback in iomap_writeback_folio(), we'll subtract any
+ * ranges not written back.
+ *
+ * We do this because otherwise, we would have to atomically
+ * increment ifs->write_bytes_pending every time a range in the
+ * folio needs to be written back.
*/
- if (bio == last)
- next = NULL;
- else
- next = bio->bi_private;
-
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bv, bio, iter_all)
- iomap_finish_page_writeback(inode, bv->bv_page, error);
- bio_put(bio);
- }
- /* The ioend has been freed by bio_put() */
-
- if (unlikely(error && !quiet)) {
- printk_ratelimited(KERN_ERR
-"%s: writeback error on inode %lu, offset %lld, sector %llu",
- inode->i_sb->s_id, inode->i_ino, offset, start);
+ atomic_set(&ifs->write_bytes_pending, folio_size(folio));
}
}
-void
-iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len)
{
- struct list_head tmp;
+ struct iomap_folio_state *ifs = folio->private;
- list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
- while (!list_empty(&tmp)) {
- ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
- list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
- }
+ if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
+ folio_end_writeback(folio);
}
-EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
-/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
+static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
+ size_t *bytes_submitted)
{
- if (ioend->io_bio->bi_status != next->io_bio->bi_status)
- return false;
- if ((ioend->io_flags & IOMAP_F_SHARED) ^
- (next->io_flags & IOMAP_F_SHARED))
- return false;
- if ((ioend->io_type == IOMAP_UNWRITTEN) ^
- (next->io_type == IOMAP_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- return true;
-}
-
-void
-iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
- void (*merge_private)(struct iomap_ioend *ioend,
- struct iomap_ioend *next))
-{
- struct iomap_ioend *next;
-
- INIT_LIST_HEAD(&ioend->io_list);
+ do {
+ ssize_t ret;
- while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
- io_list))) {
- if (!iomap_ioend_can_merge(ioend, next))
- break;
- list_move_tail(&next->io_list, &ioend->io_list);
- ioend->io_size += next->io_size;
- if (next->io_private && merge_private)
- merge_private(ioend, next);
- }
-}
-EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+ ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
+ if (WARN_ON_ONCE(ret == 0 || ret > rlen))
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ rlen -= ret;
+ pos += ret;
-static int
-iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
-{
- struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
- struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+ /*
+ * Holes are not written back by ->writeback_range, so track
+ * if we did handle anything that is not a hole here.
+ */
+ if (wpc->iomap.type != IOMAP_HOLE)
+ *bytes_submitted += ret;
+ } while (rlen);
- if (ia->io_offset < ib->io_offset)
- return -1;
- if (ia->io_offset > ib->io_offset)
- return 1;
return 0;
}
-void
-iomap_sort_ioends(struct list_head *ioend_list)
-{
- list_sort(NULL, ioend_list, iomap_ioend_compare);
-}
-EXPORT_SYMBOL_GPL(iomap_sort_ioends);
-
-static void iomap_writepage_end_bio(struct bio *bio)
-{
- struct iomap_ioend *ioend = bio->bi_private;
-
- iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
-}
-
/*
- * Submit the final bio for an ioend.
+ * Check interaction of the folio with the file end.
*
- * If @error is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the bio instead of
- * submitting it. This typically only happens on a filesystem shutdown.
+ * If the folio is entirely beyond i_size, return false. If it straddles
+ * i_size, adjust end_pos and zero all data beyond i_size.
*/
-static int
-iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
- int error)
+static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
+ u64 *end_pos)
{
- ioend->io_bio->bi_private = ioend;
- ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
+ u64 isize = i_size_read(inode);
+
+ if (*end_pos > isize) {
+ size_t poff = offset_in_folio(folio, isize);
+ pgoff_t end_index = isize >> PAGE_SHIFT;
- if (wpc->ops->prepare_ioend)
- error = wpc->ops->prepare_ioend(ioend, error);
- if (error) {
/*
- * If we are failing the IO now, just mark the ioend with an
- * error and finish it. This will run IO completion immediately
- * as there is only one reference to the ioend at this point in
- * time.
+ * If the folio is entirely ouside of i_size, skip it.
+ *
+ * This can happen due to a truncate operation that is in
+ * progress and in that case truncate will finish it off once
+ * we've dropped the folio lock.
+ *
+ * Note that the pgoff_t used for end_index is an unsigned long.
+ * If the given offset is greater than 16TB on a 32-bit system,
+ * then if we checked if the folio is fully outside i_size with
+ * "if (folio->index >= end_index + 1)", "end_index + 1" would
+ * overflow and evaluate to 0. Hence this folio would be
+ * redirtied and written out repeatedly, which would result in
+ * an infinite loop; the user program performing this operation
+ * would hang. Instead, we can detect this situation by
+ * checking if the folio is totally beyond i_size or if its
+ * offset is just equal to the EOF.
*/
- ioend->io_bio->bi_status = errno_to_blk_status(error);
- bio_endio(ioend->io_bio);
- return error;
- }
+ if (folio->index > end_index ||
+ (folio->index == end_index && poff == 0))
+ return false;
- submit_bio(ioend->io_bio);
- return 0;
-}
-
-static struct iomap_ioend *
-iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
- loff_t offset, sector_t sector, struct writeback_control *wbc)
-{
- struct iomap_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
- bio_set_dev(bio, wpc->iomap.bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
-
- ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = wpc->iomap.type;
- ioend->io_flags = wpc->iomap.flags;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = offset;
- ioend->io_private = NULL;
- ioend->io_bio = bio;
- return ioend;
-}
-
-/*
- * Allocate a new bio, and chain the old bio to the new one.
- *
- * Note that we have to do perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in iomap_finish_ioend().
- */
-static struct bio *
-iomap_chain_bio(struct bio *prev)
-{
- struct bio *new;
-
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
- new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
-
- bio_chain(prev, new);
- bio_get(prev); /* for iomap_finish_ioend */
- submit_bio(prev);
- return new;
-}
+ /*
+ * The folio straddles i_size.
+ *
+ * It must be zeroed out on each and every writepage invocation
+ * because it may be mmapped:
+ *
+ * A file is mapped in multiples of the page size. For a
+ * file that is not a multiple of the page size, the
+ * remaining memory is zeroed when mapped, and writes to that
+ * region are not written out to the file.
+ *
+ * Also adjust the end_pos to the end of file and skip writeback
+ * for all blocks entirely beyond i_size.
+ */
+ folio_zero_segment(folio, poff, folio_size(folio));
+ *end_pos = isize;
+ }
-static bool
-iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
- sector_t sector)
-{
- if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
- (wpc->ioend->io_flags & IOMAP_F_SHARED))
- return false;
- if (wpc->iomap.type != wpc->ioend->io_type)
- return false;
- if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
- return false;
- if (sector != bio_end_sector(wpc->ioend->io_bio))
- return false;
return true;
}
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first, otherwise finish off the current ioend and start another.
- */
-static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
- struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct list_head *iolist)
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
- unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
- bool merged, same_page = false;
-
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
- if (wpc->ioend)
- list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
- }
-
- merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
- &same_page);
- if (iop && !same_page)
- atomic_inc(&iop->write_count);
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = wpc->inode;
+ u64 pos = folio_pos(folio);
+ u64 end_pos = pos + folio_size(folio);
+ u64 end_aligned = 0;
+ size_t bytes_submitted = 0;
+ int error = 0;
+ u32 rlen;
+
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ WARN_ON_ONCE(folio_test_dirty(folio));
+ WARN_ON_ONCE(folio_test_writeback(folio));
+
+ trace_iomap_writeback_folio(inode, pos, folio_size(folio));
+
+ if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
+ return 0;
+ WARN_ON_ONCE(end_pos <= pos);
- if (!merged) {
- if (bio_full(wpc->ioend->io_bio, len)) {
- wpc->ioend->io_bio =
- iomap_chain_bio(wpc->ioend->io_bio);
+ if (i_blocks_per_folio(inode, folio) > 1) {
+ if (!ifs) {
+ ifs = ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, end_pos - pos);
}
- bio_add_page(wpc->ioend->io_bio, page, len, poff);
- }
-
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
-}
-/*
- * We implement an immediate ioend submission policy here to avoid needing to
- * chain multiple ioends and hence nest mempool allocations which can violate
- * forward progress guarantees we need to provide. The current ioend we are
- * adding blocks to is cached on the writepage context, and if the new block
- * does not append to the cached ioend it will create a new ioend and cache that
- * instead.
- *
- * If a new ioend is created and cached, the old ioend is returned and queued
- * locally for submission once the entire page is processed or an error has been
- * detected. While ioends are submitted immediately after they are completed,
- * batching optimisations are provided by higher level block plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int
-iomap_writepage_map(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode,
- struct page *page, u64 end_offset)
-{
- struct iomap_page *iop = to_iomap_page(page);
- struct iomap_ioend *ioend, *next;
- unsigned len = i_blocksize(inode);
- u64 file_offset; /* file offset of page */
- int error = 0, count = 0, i;
- LIST_HEAD(submit_list);
-
- WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
+ iomap_writeback_init(inode, folio);
+ }
/*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
+ * Set the writeback bit ASAP, as the I/O completion for the single
+ * block per folio case happen hit as soon as we're submitting the bio.
*/
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
- if (iop && !test_bit(i, iop->uptodate))
- continue;
+ folio_start_writeback(folio);
- error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ /*
+ * Walk through the folio to find dirty areas to write back.
+ */
+ end_aligned = round_up(end_pos, i_blocksize(inode));
+ while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
+ error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
+ &bytes_submitted);
if (error)
break;
- if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
- continue;
- if (wpc->iomap.type == IOMAP_HOLE)
- continue;
- iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
- &submit_list);
- count++;
+ pos += rlen;
}
- WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
- WARN_ON_ONCE(!PageLocked(page));
- WARN_ON_ONCE(PageWriteback(page));
+ if (bytes_submitted)
+ wpc->nr_folios++;
/*
- * We cannot cancel the ioend directly here on error. We may have
- * already set other pages under writeback and hence we have to run I/O
- * completion to mark the error state of the pages under writeback
- * appropriately.
+ * We can have dirty bits set past end of file in page_mkwrite path
+ * while mapping the last partial folio. Hence it's better to clear
+ * all the dirty bits in the folio here.
*/
- if (unlikely(error)) {
- if (!count) {
- /*
- * If the current page hasn't been added to ioend, it
- * won't be affected by I/O completions and we must
- * discard and unlock it right here.
- */
- if (wpc->ops->discard_page)
- wpc->ops->discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
- goto done;
- }
-
- /*
- * If the page was not fully cleaned, we need to ensure that the
- * higher layers come back to it correctly. That means we need
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
- * so another attempt to write this page in this writeback sweep
- * will be made.
- */
- set_page_writeback_keepwrite(page);
- } else {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- }
-
- unlock_page(page);
+ iomap_clear_range_dirty(folio, 0, folio_size(folio));
/*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
+ * Usually the writeback bit is cleared by the I/O completion handler.
+ * But we may end up either not actually writing any blocks, or (when
+ * there are multiple blocks in a folio) all I/O might have finished
+ * already at this point. In that case we need to clear the writeback
+ * bit ourselves right after unlocking the page.
*/
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = iomap_submit_ioend(wpc, ioend, error);
- if (error2 && !error)
- error = error2;
+ if (ifs) {
+ /*
+ * Subtract any bytes that were initially accounted to
+ * write_bytes_pending but skipped for writeback.
+ */
+ size_t bytes_not_submitted = folio_size(folio) -
+ bytes_submitted;
+
+ if (bytes_not_submitted)
+ iomap_finish_folio_write(inode, folio,
+ bytes_not_submitted);
+ } else if (!bytes_submitted) {
+ folio_end_writeback(folio);
}
- /*
- * We can end up here with no error and nothing to write only if we race
- * with a partial page truncate on a sub-page block sized filesystem.
- */
- if (!count)
- end_page_writeback(page);
-done:
- mapping_set_error(page->mapping, error);
+ mapping_set_error(inode->i_mapping, error);
return error;
}
+EXPORT_SYMBOL_GPL(iomap_writeback_folio);
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page we need to allocate space and flush it.
- * For unwritten space on the page we need to start the conversion to
- * regular allocated space.
- */
-static int
-iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+int
+iomap_writepages(struct iomap_writepage_ctx *wpc)
{
- struct iomap_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- pgoff_t end_index;
- u64 end_offset;
- loff_t offset;
-
- trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
+ struct address_space *mapping = wpc->inode->i_mapping;
+ struct folio *folio = NULL;
+ int error;
/*
- * Refuse to write the page out if we are called from reclaim context.
- *
- * This avoids stack overflows when called from deeply used stacks in
- * random callers for direct reclaim or memcg reclaim. We explicitly
- * allow reclaim from kswapd as the stack usage there is relatively low.
- *
- * This should never happen except in the case of a VM regression so
- * warn about it.
+ * Writeback from reclaim context should never happen except in the case
+ * of a VM regression so warn about it and refuse to write the data.
*/
- if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
PF_MEMALLOC))
- goto redirty;
+ return -EIO;
- /*
- * Given that we do not allow direct reclaim to call us, we should
- * never be called in a recursive filesystem reclaim context.
- */
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
- goto redirty;
+ while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
+ error = iomap_writeback_folio(wpc, folio);
+ folio_unlock(folio);
+ }
/*
- * Is this page beyond the end of the file?
+ * If @error is non-zero, it means that we have a situation where some
+ * part of the submission process has failed after we've marked pages
+ * for writeback.
*
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
- * -----------------------------------------------------
- * | file mapping | <EOF> |
- * -----------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | |
- * ^--------------------------------^----------|--------
- * | desired writeback range | see else |
- * ---------------------------------^------------------|
+ * We cannot cancel the writeback directly in that case, so always call
+ * ->writeback_submit to run the I/O completion handler to clear the
+ * writeback bit and let the file system proess the errors.
*/
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
- else {
- /*
- * Check whether the page to write out is beyond or straddles
- * i_size or not.
- * -------------------------------------------------------
- * | file mapping | <EOF> |
- * -------------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
- * ^--------------------------------^-----------|---------
- * | | Straddles |
- * ---------------------------------^-----------|--------|
- */
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
-
- /*
- * Skip the page if it is fully outside i_size, e.g. due to a
- * truncate operation that is in progress. We must redirty the
- * page so that reclaim stops reclaiming it. Otherwise
- * iomap_vm_releasepage() is called on it and gets confused.
- *
- * Note that the end_index is unsigned long, it would overflow
- * if the given offset is greater than 16TB on 32-bit system
- * and if we do check the page is fully outside i_size or not
- * via "if (page->index >= end_index + 1)" as "end_index + 1"
- * will be evaluated to 0. Hence this page will be redirtied
- * and be written out repeatedly which would result in an
- * infinite loop, the user program that perform this operation
- * will hang. Instead, we can verify this situation by checking
- * if the page to write is totally beyond the i_size or if it's
- * offset is just equal to the EOF.
- */
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
- goto redirty;
-
- /*
- * The page straddles i_size. It must be zeroed out on each
- * and every writepage invocation because it may be mmapped.
- * "A file is mapped in multiples of the page size. For a file
- * that is not a multiple of the page size, the remaining
- * memory is zeroed when mapped, and writes to that region are
- * not written out to the file."
- */
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
- }
-
- return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
-
-int
-iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
-{
- int ret;
-
- wpc->ops = ops;
- ret = iomap_do_writepage(page, wbc, wpc);
- if (!wpc->ioend)
- return ret;
- return iomap_submit_ioend(wpc, wpc->ioend, ret);
-}
-EXPORT_SYMBOL_GPL(iomap_writepage);
-
-int
-iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
-{
- int ret;
-
- wpc->ops = ops;
- ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
- if (!wpc->ioend)
- return ret;
- return iomap_submit_ioend(wpc, wpc->ioend, ret);
+ if (wpc->wb_ctx)
+ return wpc->ops->writeback_submit(wpc, error);
+ return error;
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-
-static int __init iomap_init(void)
-{
- return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct iomap_ioend, io_inline_bio),
- BIOSET_NEED_BVECS);
-}
-fs_initcall(iomap_init);