summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2024-08-19 13:40:11 +0200
committerChristian Brauner <brauner@kernel.org>2024-08-19 13:40:11 +0200
commit424f8cb7449220e1d479eeba233c8da32c20caf5 (patch)
tree6cf5a7b3e0b4507be2ab6c8d6f6fe7f51051cf94
parent3e673d651c3749e7447f0ccccfebb9cea58d8bf2 (diff)
parent2da4c51a66cd633d9fc8feb3e98eaa5d9924fd45 (diff)
Merge patch series "Finish converting jffs2 to folios"
Matthew Wilcox (Oracle) <willy@infradead.org> says: This patch series applies on top of fs-next. After applying these two patches, there are no more references to 'struct page' in jffs2. I obviously haven't tested it at all beyond compilation. * patches from https://lore.kernel.org/r/20240814195915.249871-1-willy@infradead.org: jffs2: Use a folio in jffs2_garbage_collect_dnode() jffs2: Convert jffs2_do_readpage_nolock to take a folio Link: https://lore.kernel.org/r/20240814195915.249871-1-willy@infradead.org Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/jffs2/file.c24
-rw-r--r--fs/jffs2/gc.c25
2 files changed, 23 insertions, 26 deletions
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index ada572c466f8..13c18ccc13b0 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -77,29 +77,27 @@ const struct address_space_operations jffs2_file_address_operations =
.write_end = jffs2_write_end,
};
-static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- unsigned char *pg_buf;
+ unsigned char *kaddr;
int ret;
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
- __func__, inode->i_ino, pg->index << PAGE_SHIFT);
+ __func__, inode->i_ino, folio->index << PAGE_SHIFT);
- BUG_ON(!PageLocked(pg));
+ BUG_ON(!folio_test_locked(folio));
- pg_buf = kmap(pg);
- /* FIXME: Can kmap fail? */
-
- ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+ kaddr = kmap_local_folio(folio, 0);
+ ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT,
PAGE_SIZE);
+ kunmap_local(kaddr);
if (!ret)
- SetPageUptodate(pg);
+ folio_mark_uptodate(folio);
- flush_dcache_page(pg);
- kunmap(pg);
+ flush_dcache_folio(folio);
jffs2_dbg(2, "readpage finished\n");
return ret;
@@ -107,7 +105,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
int __jffs2_read_folio(struct file *file, struct folio *folio)
{
- int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page);
+ int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
folio_unlock(folio);
return ret;
}
@@ -221,7 +219,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
*/
if (!folio_test_uptodate(folio)) {
mutex_lock(&f->sem);
- ret = jffs2_do_readpage_nolock(inode, &folio->page);
+ ret = jffs2_do_readpage_nolock(inode, folio);
mutex_unlock(&f->sem);
if (ret) {
folio_unlock(folio);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5c6602f3c189..822949d0eb00 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- struct page *page;
+ struct folio *folio;
unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri));
@@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start);
}
- /* The rules state that we must obtain the page lock *before* f->sem, so
+ /* The rules state that we must obtain the folio lock *before* f->sem, so
* drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
* actually going to *change* so we're safe; we only allow reading.
*
* It is important to note that jffs2_write_begin() will ensure that its
- * page is marked Uptodate before allocating space. That means that if we
- * end up here trying to GC the *same* page that jffs2_write_begin() is
- * trying to write out, read_cache_page() will not deadlock. */
+ * folio is marked uptodate before allocating space. That means that if we
+ * end up here trying to GC the *same* folio that jffs2_write_begin() is
+ * trying to write out, read_cache_folio() will not deadlock. */
mutex_unlock(&f->sem);
- page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
+ folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
__jffs2_read_folio, NULL);
- if (IS_ERR(page)) {
- pr_warn("read_cache_page() returned error: %ld\n",
- PTR_ERR(page));
+ if (IS_ERR(folio)) {
+ pr_warn("read_cache_folio() returned error: %ld\n",
+ PTR_ERR(folio));
mutex_lock(&f->sem);
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
- pg_ptr = kmap(page);
+ pg_ptr = kmap_local_folio(folio, 0);
mutex_lock(&f->sem);
offset = start;
@@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
}
}
- kunmap(page);
- put_page(page);
+ folio_release_kmap(folio, pg_ptr);
return ret;
}