summaryrefslogtreecommitdiff
path: root/fs/erofs
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2020-09-17 09:18:21 +0800
committerGao Xiang <hsiangkao@redhat.com>2020-09-18 22:17:44 +0800
commite3f78d5e7e6b0825f4e646f74b0e469b023e5df4 (patch)
tree3d66c497febb984af3aac57c19a6b2f14f98535f /fs/erofs
parentd578b46db69d125a654f509bdc9091d84e924dc8 (diff)
erofs: remove unneeded parameter
After commit 0615090c5044 ("erofs: convert compressed files from readpages to readahead"), add_to_page_cache_lru() was moved to mm code, so that in below call path, no page will be cached into @pagepool list or grabbed from @pagepool list: - z_erofs_readpage - z_erofs_do_read_page - preload_compressed_pages - erofs_allocpage Let's get rid of this unneeded @pagepool parameter. Signed-off-by: Chao Yu <yuchao0@huawei.com> Link: https://lore.kernel.org/r/20200917011821.22767-1-yuchao0@huawei.com Reviewed-by: Gao Xiang <hsiangkao@redhat.com> Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
Diffstat (limited to 'fs/erofs')
-rw-r--r--fs/erofs/zdata.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 6c939def00f9..ac6cb73df192 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -153,8 +153,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);
static void preload_compressed_pages(struct z_erofs_collector *clt,
struct address_space *mc,
- enum z_erofs_cache_alloctype type,
- struct list_head *pagepool)
+ enum z_erofs_cache_alloctype type)
{
const struct z_erofs_pcluster *pcl = clt->pcl;
const unsigned int clusterpages = BIT(pcl->clusterbits);
@@ -562,8 +561,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
}
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
- struct page *page,
- struct list_head *pagepool)
+ struct page *page)
{
struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -620,8 +618,7 @@ restart_now:
else
cache_strategy = DONTALLOC;
- preload_compressed_pages(clt, MNGD_MAPPING(sbi),
- cache_strategy, pagepool);
+ preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy);
hitted:
/*
@@ -653,7 +650,7 @@ retry:
/* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
- erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
+ alloc_page(GFP_NOFS | __GFP_NOFAIL);
newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage,
@@ -1282,7 +1279,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
- err = z_erofs_do_read_page(&f, page, &pagepool);
+ err = z_erofs_do_read_page(&f, page);
(void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */
@@ -1341,7 +1338,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
/* traversal in reverse order */
head = (void *)page_private(page);
- err = z_erofs_do_read_page(&f, page, &pagepool);
+ err = z_erofs_do_read_page(&f, page);
if (err)
erofs_err(inode->i_sb,
"readahead error at page %lu @ nid %llu",