summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/kernel-api.rst1
-rw-r--r--block/bio.c22
-rw-r--r--fs/buffer.c23
-rw-r--r--fs/internal.h2
-rw-r--r--fs/iomap/buffered-io.c551
-rw-r--r--fs/xfs/xfs_aops.c24
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--include/linux/bio.h56
-rw-r--r--include/linux/iomap.h3
9 files changed, 389 insertions, 295 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 19f501d58f5d..d6b3f94b9f1f 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -279,6 +279,7 @@ Accounting Framework
Block Devices
=============
+.. kernel-doc:: include/linux/bio.h
.. kernel-doc:: block/blk-core.c
:export:
diff --git a/block/bio.c b/block/bio.c
index 6fadc977cd7f..0d400ba2dbd1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1035,6 +1035,28 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+/**
+ * bio_add_folio - Attempt to add part of a folio to a bio.
+ * @bio: BIO to add to.
+ * @folio: Folio to add.
+ * @len: How many bytes from the folio to add.
+ * @off: First byte in this folio to add.
+ *
+ * Filesystems that use folios can call this function instead of calling
+ * bio_add_page() for each page in the folio. If @off is bigger than
+ * PAGE_SIZE, this function can create a bio_vec that starts in a page
+ * after the bv_page. BIOs do not support folios that are 4GiB or larger.
+ *
+ * Return: Whether the addition was successful.
+ */
+bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
+ size_t off)
+{
+ if (len > UINT_MAX || off > UINT_MAX)
+ return 0;
+ return bio_add_page(bio, &folio->page, len, off) > 0;
+}
+
void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
struct bvec_iter_all iter_all;
diff --git a/fs/buffer.c b/fs/buffer.c
index 46bc589b7a03..8e112b6bd371 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
}
}
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
- head = create_page_buffers(page, inode, 0);
+ head = create_page_buffers(&folio->page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
@@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
- zero_user_segments(page,
+ folio_zero_segments(folio,
to, block_end,
block_start, from);
continue;
}
}
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
@@ -2050,14 +2050,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err))
- page_zero_new_buffers(page, from, to);
+ page_zero_new_buffers(&folio->page, from, to);
return err;
}
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page, pos, len, get_block, NULL);
+ return __block_write_begin_int(page_folio(page), pos, len, get_block,
+ NULL);
}
EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/internal.h b/fs/internal.h
index 7979ff8d168c..8590c973c2f4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
/*
* buffer.c
*/
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap);
/*
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 71a36ae120ee..c6b3a148e898 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -22,8 +22,8 @@
#include "../internal.h"
/*
- * Structure allocated for each page or THP when block size < page size
- * to track sub-page uptodate status and I/O completions.
+ * Structure allocated for each folio when block size < folio size
+ * to track sub-folio uptodate status and I/O completions.
*/
struct iomap_page {
atomic_t read_bytes_pending;
@@ -32,27 +32,20 @@ struct iomap_page {
unsigned long uptodate[];
};
-static inline struct iomap_page *to_iomap_page(struct page *page)
+static inline struct iomap_page *to_iomap_page(struct folio *folio)
{
- /*
- * per-block data is stored in the head page. Callers should
- * not be dealing with tail pages, and if they are, they can
- * call thp_head() first.
- */
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
-
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
+ if (folio_test_private(folio))
+ return folio_get_private(folio);
return NULL;
}
static struct bio_set iomap_ioend_bioset;
static struct iomap_page *
-iomap_page_create(struct inode *inode, struct page *page)
+iomap_page_create(struct inode *inode, struct folio *folio)
{
- struct iomap_page *iop = to_iomap_page(page);
- unsigned int nr_blocks = i_blocks_per_page(inode, page);
+ struct iomap_page *iop = to_iomap_page(folio);
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (iop || nr_blocks <= 1)
return iop;
@@ -60,40 +53,40 @@ iomap_page_create(struct inode *inode, struct page *page)
iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
GFP_NOFS | __GFP_NOFAIL);
spin_lock_init(&iop->uptodate_lock);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
bitmap_fill(iop->uptodate, nr_blocks);
- attach_page_private(page, iop);
+ folio_attach_private(folio, iop);
return iop;
}
-static void
-iomap_page_release(struct page *page)
+static void iomap_page_release(struct folio *folio)
{
- struct iomap_page *iop = detach_page_private(page);
- unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
+ struct iomap_page *iop = folio_detach_private(folio);
+ struct inode *inode = folio->mapping->host;
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (!iop)
return;
WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
- PageUptodate(page));
+ folio_test_uptodate(folio));
kfree(iop);
}
/*
- * Calculate the range inside the page that we actually need to read.
+ * Calculate the range inside the folio that we actually need to read.
*/
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
- loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
{
+ struct iomap_page *iop = to_iomap_page(folio);
loff_t orig_pos = *pos;
loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
- unsigned poff = offset_in_page(*pos);
- unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ size_t poff = offset_in_folio(folio, *pos);
+ size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -131,7 +124,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
* page cache for blocks that are entirely outside of i_size.
*/
if (orig_pos <= isize && orig_pos + length > isize) {
- unsigned end = offset_in_page(isize - 1) >> block_bits;
+ unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
plen -= (last - end) * block_size;
@@ -141,66 +134,62 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
*lenp = plen;
}
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct folio *folio,
+ struct iomap_page *iop, size_t off, size_t len)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
unsigned long flags;
spin_lock_irqsave(&iop->uptodate_lock, flags);
bitmap_set(iop->uptodate, first, last - first + 1);
- if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
- SetPageUptodate(page);
+ if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
+ folio_mark_uptodate(folio);
spin_unlock_irqrestore(&iop->uptodate_lock, flags);
}
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct folio *folio,
+ struct iomap_page *iop, size_t off, size_t len)
{
- if (PageError(page))
+ if (folio_test_error(folio))
return;
- if (page_has_private(page))
- iomap_iop_set_range_uptodate(page, off, len);
+ if (iop)
+ iomap_iop_set_range_uptodate(folio, iop, off, len);
else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
+static void iomap_finish_folio_read(struct folio *folio, size_t offset,
+ size_t len, int error)
{
- struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (unlikely(error)) {
- ClearPageUptodate(page);
- SetPageError(page);
+ folio_clear_uptodate(folio);
+ folio_set_error(folio);
} else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ iomap_set_range_uptodate(folio, iop, offset, len);
}
- if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
- unlock_page(page);
+ if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
+ folio_unlock(folio);
}
-static void
-iomap_read_end_io(struct bio *bio)
+static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all)
- iomap_read_page_end_io(bvec, error);
+ bio_for_each_folio_all(fi, bio)
+ iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
bio_put(bio);
}
struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
+ struct folio *cur_folio;
+ bool cur_folio_in_bio;
struct bio *bio;
struct readahead_control *rac;
};
@@ -208,21 +197,23 @@ struct iomap_readpage_ctx {
/**
* iomap_read_inline_data - copy inline data into the page cache
* @iter: iteration structure
- * @page: page to copy to
+ * @folio: folio to copy to
*
- * Copy the inline data in @iter into @page and zero out the rest of the page.
+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
* Returns zero for success to complete the read, or the usual negative errno.
*/
static int iomap_read_inline_data(const struct iomap_iter *iter,
- struct page *page)
+ struct folio *folio)
{
+ struct iomap_page *iop;
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t poff = offset_in_page(iomap->offset);
+ size_t offset = offset_in_folio(folio, iomap->offset);
void *addr;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
@@ -232,14 +223,16 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
return -EIO;
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
- if (poff > 0)
- iomap_page_create(iter->inode, page);
+ if (offset > 0)
+ iop = iomap_page_create(iter->inode, folio);
+ else
+ iop = to_iomap_page(folio);
- addr = kmap_local_page(page) + poff;
+ addr = kmap_local_folio(folio, offset);
memcpy(addr, iomap->inline_data, size);
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
- iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
+ iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
return 0;
}
@@ -259,36 +252,36 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos + offset;
loff_t length = iomap_length(iter) - offset;
- struct page *page = ctx->cur_page;
+ struct folio *folio = ctx->cur_folio;
struct iomap_page *iop;
loff_t orig_pos = pos;
- unsigned poff, plen;
+ size_t poff, plen;
sector_t sector;
if (iomap->type == IOMAP_INLINE)
- return iomap_read_inline_data(iter, page);
+ return iomap_read_inline_data(iter, folio);
/* zero post-eof blocks as the page may be mapped */
- iop = iomap_page_create(iter->inode, page);
- iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
+ iop = iomap_page_create(iter->inode, folio);
+ iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
if (iomap_block_needs_zeroing(iter, pos)) {
- zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
+ folio_zero_range(folio, poff, plen);
+ iomap_set_range_uptodate(folio, iop, poff, plen);
goto done;
}
- ctx->cur_page_in_bio = true;
+ ctx->cur_folio_in_bio = true;
if (iop)
atomic_add(plen, &iop->read_bytes_pending);
sector = iomap_sector(iomap, pos);
if (!ctx->bio ||
bio_end_sector(ctx->bio) != sector ||
- bio_add_page(ctx->bio, page, plen, poff) != plen) {
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ !bio_add_folio(ctx->bio, folio, plen, poff)) {
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -311,8 +304,9 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
ctx->bio->bi_iter.bi_sector = sector;
bio_set_dev(ctx->bio, iomap->bdev);
ctx->bio->bi_end_io = iomap_read_end_io;
- __bio_add_page(ctx->bio, page, plen, poff);
+ bio_add_folio(ctx->bio, folio, plen, poff);
}
+
done:
/*
* Move the caller beyond our range so that it keeps making progress.
@@ -326,30 +320,31 @@ done:
int
iomap_readpage(struct page *page, const struct iomap_ops *ops)
{
+ struct folio *folio = page_folio(page);
struct iomap_iter iter = {
- .inode = page->mapping->host,
- .pos = page_offset(page),
- .len = PAGE_SIZE,
+ .inode = folio->mapping->host,
+ .pos = folio_pos(folio),
+ .len = folio_size(folio),
};
struct iomap_readpage_ctx ctx = {
- .cur_page = page,
+ .cur_folio = folio,
};
int ret;
- trace_iomap_readpage(page->mapping->host, 1);
+ trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
if (ret < 0)
- SetPageError(page);
+ folio_set_error(folio);
if (ctx.bio) {
submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_page_in_bio);
+ WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else {
- WARN_ON_ONCE(ctx.cur_page_in_bio);
- unlock_page(page);
+ WARN_ON_ONCE(ctx.cur_folio_in_bio);
+ folio_unlock(folio);
}
/*
@@ -368,15 +363,15 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
loff_t done, ret;
for (done = 0; done < length; done += ret) {
- if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
- if (!ctx->cur_page_in_bio)
- unlock_page(ctx->cur_page);
- put_page(ctx->cur_page);
- ctx->cur_page = NULL;
+ if (ctx->cur_folio &&
+ offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
+ if (!ctx->cur_folio_in_bio)
+ folio_unlock(ctx->cur_folio);
+ ctx->cur_folio = NULL;
}
- if (!ctx->cur_page) {
- ctx->cur_page = readahead_page(ctx->rac);
- ctx->cur_page_in_bio = false;
+ if (!ctx->cur_folio) {
+ ctx->cur_folio = readahead_folio(ctx->rac);
+ ctx->cur_folio_in_bio = false;
}
ret = iomap_readpage_iter(iter, ctx, done);
if (ret <= 0)
@@ -419,10 +414,9 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
if (ctx.bio)
submit_bio(ctx.bio);
- if (ctx.cur_page) {
- if (!ctx.cur_page_in_bio)
- unlock_page(ctx.cur_page);
- put_page(ctx.cur_page);
+ if (ctx.cur_folio) {
+ if (!ctx.cur_folio_in_bio)
+ folio_unlock(ctx.cur_folio);
}
}
EXPORT_SYMBOL_GPL(iomap_readahead);
@@ -438,7 +432,8 @@ int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned len, first, last;
unsigned i;
@@ -464,36 +459,49 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
- trace_iomap_releasepage(page->mapping->host, page_offset(page),
- PAGE_SIZE);
+ struct folio *folio = page_folio(page);
+
+ trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
+ folio_size(folio));
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(); skip those here.
*/
- if (PageDirty(page) || PageWriteback(page))
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
- iomap_page_release(page);
+ iomap_page_release(folio);
return 1;
}
EXPORT_SYMBOL_GPL(iomap_releasepage);
-void
-iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
- trace_iomap_invalidatepage(page->mapping->host, offset, len);
+ trace_iomap_invalidatepage(folio->mapping->host, offset, len);
/*
- * If we're invalidating the entire page, clear the dirty state from it
- * and release it to avoid unnecessary buildup of the LRU.
+ * If we're invalidating the entire folio, clear the dirty state
+ * from it and release it to avoid unnecessary buildup of the LRU.
*/
- if (offset == 0 && len == PAGE_SIZE) {
- WARN_ON_ONCE(PageWriteback(page));
- cancel_dirty_page(page);
- iomap_page_release(page);
+ if (offset == 0 && len == folio_size(folio)) {
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ folio_cancel_dirty(folio);
+ iomap_page_release(folio);
+ } else if (folio_test_large(folio)) {
+ /* Must release the iop so the page can be split */
+ WARN_ON_ONCE(!folio_test_uptodate(folio) &&
+ folio_test_dirty(folio));
+ iomap_page_release(folio);
}
}
+EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
+
+void iomap_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int len)
+{
+ iomap_invalidate_folio(page_folio(page), offset, len);
+}
EXPORT_SYMBOL_GPL(iomap_invalidatepage);
#ifdef CONFIG_MIGRATION
@@ -501,19 +509,21 @@ int
iomap_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode)
{
+ struct folio *folio = page_folio(page);
+ struct folio *newfolio = page_folio(newpage);
int ret;
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
+ ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
- if (page_has_private(page))
- attach_page_private(newpage, detach_page_private(page));
+ if (folio_test_private(folio))
+ folio_attach_private(newfolio, folio_detach_private(folio));
if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ folio_migrate_copy(newfolio, folio);
else
- migrate_page_states(newpage, page);
+ folio_migrate_flags(newfolio, folio);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL_GPL(iomap_migrate_page);
@@ -532,9 +542,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
truncate_pagecache_range(inode, max(pos, i_size), pos + len);
}
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
- unsigned plen, const struct iomap *iomap)
+static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
+ size_t poff, size_t plen, const struct iomap *iomap)
{
struct bio_vec bvec;
struct bio bio;
@@ -543,26 +552,27 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
bio.bi_opf = REQ_OP_READ;
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
bio_set_dev(&bio, iomap->bdev);
- __bio_add_page(&bio, page, plen, poff);
+ bio_add_folio(&bio, folio, plen, poff);
return submit_bio_wait(&bio);
}
static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page *page)
+ size_t len, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct iomap_page *iop = iomap_page_create(iter->inode, page);
+ struct iomap_page *iop = iomap_page_create(iter->inode, folio);
loff_t block_size = i_blocksize(iter->inode);
loff_t block_start = round_down(pos, block_size);
loff_t block_end = round_up(pos + len, block_size);
- unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ size_t from = offset_in_folio(folio, pos), to = from + len;
+ size_t poff, plen;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
- ClearPageError(page);
+ folio_clear_error(folio);
do {
- iomap_adjust_read_range(iter->inode, iop, &block_start,
+ iomap_adjust_read_range(iter->inode, folio, &block_start,
block_end - block_start, &poff, &plen);
if (plen == 0)
break;
@@ -575,34 +585,35 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (iomap_block_needs_zeroing(iter, block_start)) {
if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
return -EIO;
- zero_user_segments(page, poff, from, to, poff + plen);
+ folio_zero_segments(folio, poff, from, to, poff + plen);
} else {
- int status = iomap_read_page_sync(block_start, page,
+ int status = iomap_read_folio_sync(block_start, folio,
poff, plen, srcmap);
if (status)
return status;
}
- iomap_set_range_uptodate(page, poff, plen);
+ iomap_set_range_uptodate(folio, iop, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
}
static int iomap_write_begin_inline(const struct iomap_iter *iter,
- struct page *page)
+ struct folio *folio)
{
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- return iomap_read_inline_data(iter, page);
+ return iomap_read_inline_data(iter, folio);
}
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page **pagep)
+ size_t len, struct folio **foliop)
{
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct page *page;
+ struct folio *folio;
+ unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
int status = 0;
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -612,35 +623,40 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (fatal_signal_pending(current))
return -EINTR;
+ if (!mapping_large_folio_support(iter->inode->i_mapping))
+ len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
if (page_ops && page_ops->page_prepare) {
status = page_ops->page_prepare(iter->inode, pos, len);
if (status)
return status;
}
- page = grab_cache_page_write_begin(iter->inode->i_mapping,
- pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+ fgp, mapping_gfp_mask(iter->inode->i_mapping));
+ if (!folio) {
status = -ENOMEM;
goto out_no_page;
}
+ if (pos + len > folio_pos(folio) + folio_size(folio))
+ len = folio_pos(folio) + folio_size(folio) - pos;
if (srcmap->type == IOMAP_INLINE)
- status = iomap_write_begin_inline(iter, page);
+ status = iomap_write_begin_inline(iter, folio);
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, srcmap);
+ status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(iter, pos, len, page);
+ status = __iomap_write_begin(iter, pos, len, folio);
if (unlikely(status))
goto out_unlock;
- *pagep = page;
+ *foliop = folio;
return 0;
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
iomap_write_failed(iter->inode, pos, len);
out_no_page:
@@ -650,9 +666,10 @@ out_no_page:
}
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
- size_t copied, struct page *page)
+ size_t copied, struct folio *folio)
{
- flush_dcache_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
+ flush_dcache_folio(folio);
/*
* The blocks that were entirely written will now be uptodate, so we
@@ -665,24 +682,24 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
* non-uptodate page as a zero-length write, and force the caller to
* redo the whole thing.
*/
- if (unlikely(copied < len && !PageUptodate(page)))
+ if (unlikely(copied < len && !folio_test_uptodate(folio)))
return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
- __set_page_dirty_nobuffers(page);
+ iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
+ filemap_dirty_folio(inode->i_mapping, folio);
return copied;
}
static size_t iomap_write_end_inline(const struct iomap_iter *iter,
- struct page *page, loff_t pos, size_t copied)
+ struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
void *addr;
- WARN_ON_ONCE(!PageUptodate(page));
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
BUG_ON(!iomap_inline_data_valid(iomap));
- flush_dcache_page(page);
- addr = kmap_local_page(page) + pos;
+ flush_dcache_folio(folio);
+ addr = kmap_local_folio(folio, pos);
memcpy(iomap_inline_data(iomap, pos), addr, copied);
kunmap_local(addr);
@@ -692,7 +709,7 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
- size_t copied, struct page *page)
+ size_t copied, struct folio *folio)
{
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -700,12 +717,12 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t ret;
if (srcmap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(iter, page, pos, copied);
+ ret = iomap_write_end_inline(iter, folio, pos, copied);
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
- copied, page, NULL);
+ copied, &folio->page, NULL);
} else {
- ret = __iomap_write_end(iter->inode, pos, len, copied, page);
+ ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
}
/*
@@ -717,13 +734,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
i_size_write(iter->inode, pos + ret);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
- unlock_page(page);
+ folio_unlock(folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, ret, page);
- put_page(page);
+ page_ops->page_done(iter->inode, pos, ret, &folio->page);
+ folio_put(folio);
if (ret < len)
iomap_write_failed(iter->inode, pos, len);
@@ -738,6 +755,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
long status = 0;
do {
+ struct folio *folio;
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
@@ -761,16 +779,17 @@ again:
break;
}
- status = iomap_write_begin(iter, pos, bytes, &page);
+ status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
break;
+ page = folio_file_page(folio, pos >> PAGE_SHIFT);
if (mapping_writably_mapped(iter->inode->i_mapping))
flush_dcache_page(page);
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
- status = iomap_write_end(iter, pos, bytes, copied, page);
+ status = iomap_write_end(iter, pos, bytes, copied, folio);
if (unlikely(copied != status))
iov_iter_revert(i, copied - status);
@@ -836,13 +855,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
do {
unsigned long offset = offset_in_page(pos);
unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
- struct page *page;
+ struct folio *folio;
- status = iomap_write_begin(iter, pos, bytes, &page);
+ status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
return status;
- status = iomap_write_end(iter, pos, bytes, bytes, page);
+ status = iomap_write_end(iter, pos, bytes, bytes, folio);
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -876,23 +895,6 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
-{
- struct page *page;
- int status;
- unsigned offset = offset_in_page(pos);
- unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
-
- status = iomap_write_begin(iter, pos, bytes, &page);
- if (status)
- return status;
-
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
-
- return iomap_write_end(iter, pos, bytes, bytes, page);
-}
-
static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
struct iomap *iomap = &iter->iomap;
@@ -906,14 +908,34 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
return length;
do {
- s64 bytes;
+ struct folio *folio;
+ int status;
+ size_t offset;
+ size_t bytes = min_t(u64, SIZE_MAX, length);
+
+ if (IS_DAX(iter->inode)) {
+ s64 tmp = dax_iomap_zero(pos, bytes, iomap);
+ if (tmp < 0)
+ return tmp;
+ bytes = tmp;
+ goto good;
+ }
- if (IS_DAX(iter->inode))
- bytes = dax_iomap_zero(pos, length, iomap);
- else
- bytes = __iomap_zero_iter(iter, pos, length);
- if (bytes < 0)
- return bytes;
+ status = iomap_write_begin(iter, pos, bytes, &folio);
+ if (status)
+ return status;
+
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
+
+ folio_zero_range(folio, offset, bytes);
+ folio_mark_accessed(folio);
+
+ bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+good:
+ if (WARN_ON_ONCE(bytes == 0))
+ return -EIO;
pos += bytes;
length -= bytes;
@@ -957,21 +979,21 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
- struct page *page)
+static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+ struct folio *folio)
{
loff_t length = iomap_length(iter);
int ret;
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, iter->pos, length, NULL,
+ ret = __block_write_begin_int(folio, iter->pos, length, NULL,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(page, 0, length);
+ block_commit_write(&folio->page, 0, length);
} else {
- WARN_ON_ONCE(!PageUptodate(page));
- set_page_dirty(page);
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
+ folio_mark_dirty(folio);
}
return length;
@@ -983,44 +1005,43 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
.inode = file_inode(vmf->vma->vm_file),
.flags = IOMAP_WRITE | IOMAP_FAULT,
};
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
ssize_t ret;
- lock_page(page);
- ret = page_mkwrite_check_truncate(page, iter.inode);
+ folio_lock(folio);
+ ret = folio_mkwrite_check_truncate(folio, iter.inode);
if (ret < 0)
goto out_unlock;
- iter.pos = page_offset(page);
+ iter.pos = folio_pos(folio);
iter.len = ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_page_mkwrite_iter(&iter, page);
+ iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
if (ret < 0)
goto out_unlock;
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
- int error, unsigned int len)
+static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len, int error)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (error) {
- SetPageError(page);
+ folio_set_error(folio);
mapping_set_error(inode->i_mapping, error);
}
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
/*
@@ -1039,8 +1060,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
bool quiet = bio_flagged(bio, BIO_QUIET);
for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
/*
* For the last bio, bi_private points to the ioend, so we
@@ -1051,10 +1071,10 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
else
next = bio->bi_private;
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bv, bio, iter_all)
- iomap_finish_page_writeback(inode, bv->bv_page, error,
- bv->bv_len);
+ /* walk all folios in bio, ending page IO on them */
+ bio_for_each_folio_all(fi, bio)
+ iomap_finish_folio_write(inode, fi.folio, fi.length,
+ error);
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
@@ -1249,29 +1269,29 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
* first; otherwise finish off the current ioend and start another.
*/
static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct list_head *iolist)
{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
+ sector_t sector = iomap_sector(&wpc->iomap, pos);
unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
+ size_t poff = offset_in_folio(folio, pos);
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
}
- if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
+ if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
- __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
}
if (iop)
atomic_add(len, &iop->write_bytes_pending);
wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
+ wbc_account_cgroup_owner(wbc, &folio->page, len);
}
/*
@@ -1293,44 +1313,43 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
static int
iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
- struct page *page, u64 end_offset)
+ struct folio *folio, u64 end_pos)
{
- struct iomap_page *iop = iomap_page_create(inode, page);
+ struct iomap_page *iop = iomap_page_create(inode, folio);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
- u64 file_offset; /* file offset of page */
+ unsigned nblocks = i_blocks_per_folio(inode, folio);
+ u64 pos = folio_pos(folio);
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
+ * Walk through the folio to find areas to write back. If we
+ * run off the end of the current map or find the current map
+ * invalid, grab a new one.
*/
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
+ for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
if (iop && !test_bit(i, iop->uptodate))
continue;
- error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ error = wpc->ops->map_blocks(wpc, inode, pos);
if (error)
break;
if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
continue;
if (wpc->iomap.type == IOMAP_HOLE)
continue;
- iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
&submit_list);
count++;
}
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
- WARN_ON_ONCE(!PageLocked(page));
- WARN_ON_ONCE(PageWriteback(page));
- WARN_ON_ONCE(PageDirty(page));
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ WARN_ON_ONCE(folio_test_dirty(folio));
/*
* We cannot cancel the ioend directly here on error. We may have
@@ -1345,17 +1364,17 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* won't be affected by I/O completion and we must unlock it
* now.
*/
- if (wpc->ops->discard_page)
- wpc->ops->discard_page(page, file_offset);
+ if (wpc->ops->discard_folio)
+ wpc->ops->discard_folio(folio, pos);
if (!count) {
- ClearPageUptodate(page);
- unlock_page(page);
+ folio_clear_uptodate(folio);
+ folio_unlock(folio);
goto done;
}
}
- set_page_writeback(page);
- unlock_page(page);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
/*
* Preserve the original error if there was one; catch
@@ -1376,9 +1395,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* with a partial page truncate on a sub-page block sized filesystem.
*/
if (!count)
- end_page_writeback(page);
+ folio_end_writeback(folio);
done:
- mapping_set_error(page->mapping, error);
+ mapping_set_error(folio->mapping, error);
return error;
}
@@ -1392,16 +1411,15 @@ done:
static int
iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
{
+ struct folio *folio = page_folio(page);
struct iomap_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- pgoff_t end_index;
- u64 end_offset;
- loff_t offset;
+ struct inode *inode = folio->mapping->host;
+ u64 end_pos, isize;
- trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
+ trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
/*
- * Refuse to write the page out if we're called from reclaim context.
+ * Refuse to write the folio out if we're called from reclaim context.
*
* This avoids stack overflows when called from deeply used stacks in
* random callers for direct reclaim or memcg reclaim. We explicitly
@@ -1415,10 +1433,10 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
goto redirty;
/*
- * Is this page beyond the end of the file?
+ * Is this folio beyond the end of the file?
*
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
+ * The folio index is less than the end_index, adjust the end_pos
+ * to the highest offset that this folio should represent.
* -----------------------------------------------------
* | file mapping | <EOF> |
* -----------------------------------------------------
@@ -1427,11 +1445,9 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* | desired writeback range | see else |
* ---------------------------------^------------------|
*/
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
- else {
+ isize = i_size_read(inode);
+ end_pos = folio_pos(folio) + folio_size(folio);
+ if (end_pos > isize) {
/*
* Check whether the page to write out is beyond or straddles
* i_size or not.
@@ -1443,7 +1459,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+ size_t poff = offset_in_folio(folio, isize);
+ pgoff_t end_index = isize >> PAGE_SHIFT;
/*
* Skip the page if it's fully outside i_size, e.g. due to a
@@ -1462,8 +1479,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* checking if the page is totally beyond i_size or if its
* offset is just equal to the EOF.
*/
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
+ if (folio->index > end_index ||
+ (folio->index == end_index && poff == 0))
goto redirty;
/*
@@ -1474,17 +1491,15 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
+ folio_zero_segment(folio, poff, folio_size(folio));
+ end_pos = isize;
}
- return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
+ return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
return 0;
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c8c15c3c3147..4098a9875c5b 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -437,37 +437,37 @@ xfs_prepare_ioend(
* see a ENOSPC in writeback).
*/
static void
-xfs_discard_page(
- struct page *page,
- loff_t fileoff)
+xfs_discard_folio(
+ struct folio *folio,
+ loff_t pos)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- unsigned int pageoff = offset_in_page(fileoff);
- xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
- xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
+ size_t offset = offset_in_folio(folio, pos);
+ xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, pos);
+ xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, offset);
int error;
if (xfs_is_shutdown(mp))
goto out_invalidate;
xfs_alert_ratelimited(mp,
- "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
- page, ip->i_ino, fileoff);
+ "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
+ folio, ip->i_ino, pos);
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
- i_blocks_per_page(inode, page) - pageoff_fsb);
+ i_blocks_per_folio(inode, folio) - pageoff_fsb);
if (error && !xfs_is_shutdown(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
- iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
+ iomap_invalidate_folio(folio, offset, folio_size(folio) - offset);
}
static const struct iomap_writeback_ops xfs_writeback_ops = {
.map_blocks = xfs_map_blocks,
.prepare_ioend = xfs_prepare_ioend,
- .discard_page = xfs_discard_page,
+ .discard_folio = xfs_discard_folio,
};
STATIC int
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index d019c98eb839..2e718728986f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -87,6 +87,7 @@ xfs_inode_alloc(
/* VFS doesn't initialise i_mode or i_state! */
VFS_I(ip)->i_mode = 0;
VFS_I(ip)->i_state = 0;
+ mapping_set_large_folios(VFS_I(ip)->i_mapping);
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -320,6 +321,7 @@ xfs_reinit_inode(
inode->i_rdev = dev;
inode->i_uid = uid;
inode->i_gid = gid;
+ mapping_set_large_folios(inode->i_mapping);
return error;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0a41efe02208..117d7f248ac9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -166,7 +166,7 @@ static inline void bio_advance(struct bio *bio, unsigned int nbytes)
*/
#define bio_for_each_bvec_all(bvl, bio, i) \
for (i = 0, bvl = bio_first_bvec_all(bio); \
- i < (bio)->bi_vcnt; i++, bvl++) \
+ i < (bio)->bi_vcnt; i++, bvl++)
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
@@ -260,6 +260,57 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
+ */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ size_t _seg_count;
+ int _i;
+};
+
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
+{
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_i = i;
+}
+
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = folio_next(fi->folio);
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ } else if (fi->_i + 1 < bio->bi_vcnt) {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ } else {
+ fi->folio = NULL;
+ }
+}
+
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -409,7 +460,8 @@ extern void bio_uninit(struct bio *);
extern void bio_reset(struct bio *);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
+bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
int bio_add_zone_append_page(struct bio *bio, struct page *page,
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6d1b08d0ae93..5ef5088dbbd8 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -225,6 +225,7 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
void iomap_invalidatepage(struct page *page, unsigned int offset,
unsigned int len);
#ifdef CONFIG_MIGRATION
@@ -284,7 +285,7 @@ struct iomap_writeback_ops {
* Optional, allows the file system to discard state on a page where
* we failed to submit any I/O.
*/
- void (*discard_page)(struct page *page, loff_t fileoff);
+ void (*discard_folio)(struct folio *folio, loff_t pos);
};
struct iomap_writepage_ctx {