summaryrefslogtreecommitdiff
path: root/fs/iomap
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-11-05 14:24:09 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-18 00:06:07 -0500
commita25def1fe56858efa40a8490e875da4a711487f8 (patch)
treee29401aa007c42efa8962aef16637ab4c5f81109 /fs/iomap
parentd454ab82bc7f4aa7af9f539d5cf9a1e237cdcbc2 (diff)
iomap: Convert __iomap_zero_iter to use a folio
The zero iterator can work in folio-sized chunks instead of page-sized chunks. This will save a lot of page cache lookups if the file is cached in large folios. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index b1ded5204d1c..47cf558244f4 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -893,19 +893,23 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
{
+ struct folio *folio;
struct page *page;
int status;
- unsigned offset = offset_in_page(pos);
+ size_t offset;
unsigned bytes = min_t(u64, UINT_MAX, length);
status = iomap_write_begin(iter, pos, bytes, &page);
if (status)
return status;
- if (bytes > PAGE_SIZE - offset)
- bytes = PAGE_SIZE - offset;
+ folio = page_folio(page);
+
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
+ folio_zero_range(folio, offset, bytes);
+ folio_mark_accessed(folio);
return iomap_write_end(iter, pos, bytes, bytes, page);
}