summaryrefslogtreecommitdiff
path: root/fs/iomap
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-04-27 22:56:17 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-16 15:49:51 -0500
commit95c4cd053a1d7c4f1e171ec31d2fb8a8f5c87efe (patch)
treec4024f38e0fe70a69aa5a075713396e273f7a6ac /fs/iomap
parentd1bd0b4ebfe0521964e6937195bd2f76866660c7 (diff)
iomap: Convert to_iomap_page to take a folio
The big comment about only using a head page can go away now that it takes a folio argument. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ecb65167715b..101d5b0754e9 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -22,8 +22,8 @@
#include "../internal.h"
/*
- * Structure allocated for each page or THP when block size < page size
- * to track sub-page uptodate status and I/O completions.
+ * Structure allocated for each folio when block size < folio size
+ * to track sub-folio uptodate status and I/O completions.
*/
struct iomap_page {
atomic_t read_bytes_pending;
@@ -32,17 +32,10 @@ struct iomap_page {
unsigned long uptodate[];
};
-static inline struct iomap_page *to_iomap_page(struct page *page)
+static inline struct iomap_page *to_iomap_page(struct folio *folio)
{
- /*
- * per-block data is stored in the head page. Callers should
- * not be dealing with tail pages, and if they are, they can
- * call thp_head() first.
- */
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
-
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
+ if (folio_test_private(folio))
+ return folio_get_private(folio);
return NULL;
}
@@ -51,7 +44,8 @@ static struct bio_set iomap_ioend_bioset;
static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
unsigned int nr_blocks = i_blocks_per_page(inode, page);
if (iop || nr_blocks <= 1)
@@ -144,7 +138,8 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
static void
iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
@@ -173,7 +168,8 @@ static void
iomap_read_page_end_io(struct bio_vec *bvec, int error)
{
struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (unlikely(error)) {
ClearPageUptodate(page);
@@ -438,7 +434,8 @@ int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned len, first, last;
unsigned i;
@@ -1012,7 +1009,8 @@ static void
iomap_finish_page_writeback(struct inode *inode, struct page *page,
int error, unsigned int len)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (error) {
SetPageError(page);