diff options
Diffstat (limited to 'fs/f2fs')
-rw-r--r-- | fs/f2fs/acl.c | 33 | ||||
-rw-r--r-- | fs/f2fs/acl.h | 10 | ||||
-rw-r--r-- | fs/f2fs/checkpoint.c | 248 | ||||
-rw-r--r-- | fs/f2fs/compress.c | 278 | ||||
-rw-r--r-- | fs/f2fs/data.c | 413 | ||||
-rw-r--r-- | fs/f2fs/debug.c | 21 | ||||
-rw-r--r-- | fs/f2fs/dir.c | 243 | ||||
-rw-r--r-- | fs/f2fs/extent_cache.c | 16 | ||||
-rw-r--r-- | fs/f2fs/f2fs.h | 452 | ||||
-rw-r--r-- | fs/f2fs/file.c | 344 | ||||
-rw-r--r-- | fs/f2fs/gc.c | 191 | ||||
-rw-r--r-- | fs/f2fs/gc.h | 5 | ||||
-rw-r--r-- | fs/f2fs/inline.c | 318 | ||||
-rw-r--r-- | fs/f2fs/inode.c | 171 | ||||
-rw-r--r-- | fs/f2fs/namei.c | 143 | ||||
-rw-r--r-- | fs/f2fs/node.c | 804 | ||||
-rw-r--r-- | fs/f2fs/node.h | 83 | ||||
-rw-r--r-- | fs/f2fs/recovery.c | 236 | ||||
-rw-r--r-- | fs/f2fs/segment.c | 279 | ||||
-rw-r--r-- | fs/f2fs/segment.h | 189 | ||||
-rw-r--r-- | fs/f2fs/shrinker.c | 13 | ||||
-rw-r--r-- | fs/f2fs/super.c | 2275 | ||||
-rw-r--r-- | fs/f2fs/sysfs.c | 89 | ||||
-rw-r--r-- | fs/f2fs/xattr.c | 116 | ||||
-rw-r--r-- | fs/f2fs/xattr.h | 24 |
25 files changed, 3863 insertions, 3131 deletions
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 1fbc0607363b..d4d7f329d23f 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -166,7 +166,7 @@ fail: } static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, - struct page *dpage) + struct folio *dfolio) { int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; void *value = NULL; @@ -176,13 +176,13 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, if (type == ACL_TYPE_ACCESS) name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; - retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage); + retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dfolio); if (retval > 0) { value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO); if (!value) return ERR_PTR(-ENOMEM); retval = f2fs_getxattr(inode, name_index, "", value, - retval, dpage); + retval, dfolio); } if (retval > 0) @@ -227,7 +227,7 @@ static int f2fs_acl_update_mode(struct mnt_idmap *idmap, static int __f2fs_set_acl(struct mnt_idmap *idmap, struct inode *inode, int type, - struct posix_acl *acl, struct page *ipage) + struct posix_acl *acl, struct folio *ifolio) { int name_index; void *value = NULL; @@ -238,9 +238,8 @@ static int __f2fs_set_acl(struct mnt_idmap *idmap, switch (type) { case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl && !ipage) { - error = f2fs_acl_update_mode(idmap, inode, - &mode, &acl); + if (acl && !ifolio) { + error = f2fs_acl_update_mode(idmap, inode, &mode, &acl); if (error) return error; set_acl_inode(inode, mode); @@ -265,7 +264,7 @@ static int __f2fs_set_acl(struct mnt_idmap *idmap, } } - error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0); + error = f2fs_setxattr(inode, name_index, "", value, size, ifolio, 0); kfree(value); if (!error) @@ -360,7 +359,7 @@ static int f2fs_acl_create_masq(struct posix_acl *acl, umode_t *mode_p) static int f2fs_acl_create(struct inode *dir, umode_t *mode, struct posix_acl **default_acl, struct posix_acl **acl, - struct page *dpage) + struct folio *dfolio) { struct posix_acl *p; struct posix_acl *clone; @@ -372,7 +371,7 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode, if (S_ISLNK(*mode) || !IS_POSIXACL(dir)) return 0; - p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage); + p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dfolio); if (!p || p == ERR_PTR(-EOPNOTSUPP)) { *mode &= ~current_umask(); return 0; @@ -409,29 +408,29 @@ release_acl: return ret; } -int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, - struct page *dpage) +int f2fs_init_acl(struct inode *inode, struct inode *dir, struct folio *ifolio, + struct folio *dfolio) { struct posix_acl *default_acl = NULL, *acl = NULL; int error; - error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dpage); + error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dfolio); if (error) return error; f2fs_mark_inode_dirty_sync(inode, true); if (default_acl) { - error = __f2fs_set_acl(NULL, inode, ACL_TYPE_DEFAULT, default_acl, - ipage); + error = __f2fs_set_acl(NULL, inode, ACL_TYPE_DEFAULT, + default_acl, ifolio); posix_acl_release(default_acl); } else { inode->i_default_acl = NULL; } if (acl) { if (!error) - error = __f2fs_set_acl(NULL, inode, ACL_TYPE_ACCESS, acl, - ipage); + error = __f2fs_set_acl(NULL, inode, ACL_TYPE_ACCESS, + acl, ifolio); posix_acl_release(acl); } else { inode->i_acl = NULL; diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h index 94ebfbfbdc6f..20e87e63c089 100644 --- a/fs/f2fs/acl.h +++ b/fs/f2fs/acl.h @@ -33,17 +33,17 @@ struct f2fs_acl_header { #ifdef CONFIG_F2FS_FS_POSIX_ACL -extern struct posix_acl *f2fs_get_acl(struct inode *, int, bool); -extern int f2fs_set_acl(struct mnt_idmap *, struct dentry *, +struct posix_acl *f2fs_get_acl(struct inode *, int, bool); +int f2fs_set_acl(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); -extern int f2fs_init_acl(struct inode *, struct inode *, struct page *, - struct page *); +int f2fs_init_acl(struct inode *, struct inode *, struct folio *ifolio, + struct folio *dfolio); #else #define f2fs_get_acl NULL #define f2fs_set_acl NULL static inline int f2fs_init_acl(struct inode *inode, struct inode *dir, - struct page *ipage, struct page *dpage) + struct folio *ifolio, struct folio *dfolio) { return 0; } diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index cf77987d0698..db3831f7f2f5 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -29,7 +29,7 @@ struct kmem_cache *f2fs_inode_entry_slab; void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, unsigned char reason) { - f2fs_build_fault_attr(sbi, 0, 0); + f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL); if (!end_io) f2fs_flush_merged_writes(sbi); f2fs_handle_critical_error(sbi, reason); @@ -38,23 +38,23 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, /* * We guarantee no failure on the returned page. */ -struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = META_MAPPING(sbi); - struct page *page; + struct folio *folio; repeat: - page = f2fs_grab_cache_page(mapping, index, false); - if (!page) { + folio = f2fs_grab_cache_folio(mapping, index, false); + if (IS_ERR(folio)) { cond_resched(); goto repeat; } - f2fs_wait_on_page_writeback(page, META, true, true); - if (!PageUptodate(page)) - SetPageUptodate(page); - return page; + f2fs_folio_wait_writeback(folio, META, true, true); + if (!folio_test_uptodate(folio)) + folio_mark_uptodate(folio); + return folio; } -static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, +static struct folio *__get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index, bool is_meta) { struct address_space *mapping = META_MAPPING(sbi); @@ -82,7 +82,7 @@ repeat: if (folio_test_uptodate(folio)) goto out; - fio.page = &folio->page; + fio.folio = folio; err = f2fs_submit_page_bio(&fio); if (err) { @@ -93,7 +93,7 @@ repeat: f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); folio_lock(folio); - if (unlikely(folio->mapping != mapping)) { + if (unlikely(!is_meta_folio(folio))) { f2fs_folio_put(folio, true); goto repeat; } @@ -104,34 +104,34 @@ repeat: return ERR_PTR(-EIO); } out: - return &folio->page; + return folio; } -struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index) { - return __get_meta_page(sbi, index, true); + return __get_meta_folio(sbi, index, true); } -struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index) { - struct page *page; + struct folio *folio; int count = 0; retry: - page = __get_meta_page(sbi, index, true); - if (IS_ERR(page)) { - if (PTR_ERR(page) == -EIO && + folio = __get_meta_folio(sbi, index, true); + if (IS_ERR(folio)) { + if (PTR_ERR(folio) == -EIO && ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); } - return page; + return folio; } /* for POR only */ -struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index) { - return __get_meta_page(sbi, index, false); + return __get_meta_folio(sbi, index, false); } static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, @@ -252,7 +252,6 @@ bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type, bool sync) { - struct page *page; block_t blkno = start; struct f2fs_io_info fio = { .sbi = sbi, @@ -271,6 +270,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, blk_start_plug(&plug); for (; nrpages-- > 0; blkno++) { + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) goto out; @@ -300,18 +300,18 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, BUG(); } - page = f2fs_grab_cache_page(META_MAPPING(sbi), + folio = f2fs_grab_cache_folio(META_MAPPING(sbi), fio.new_blkaddr, false); - if (!page) + if (IS_ERR(folio)) continue; - if (PageUptodate(page)) { - f2fs_put_page(page, 1); + if (folio_test_uptodate(folio)) { + f2fs_folio_put(folio, true); continue; } - fio.page = page; + fio.folio = folio; err = f2fs_submit_page_bio(&fio); - f2fs_put_page(page, err ? 1 : 0); + f2fs_folio_put(folio, err ? true : false); if (!err) f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, @@ -325,27 +325,26 @@ out: void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, unsigned int ra_blocks) { - struct page *page; + struct folio *folio; bool readahead = false; if (ra_blocks == RECOVERY_MIN_RA_BLOCKS) return; - page = find_get_page(META_MAPPING(sbi), index); - if (!page || !PageUptodate(page)) + folio = filemap_get_folio(META_MAPPING(sbi), index); + if (IS_ERR(folio) || !folio_test_uptodate(folio)) readahead = true; - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); if (readahead) f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); } -static int __f2fs_write_meta_page(struct page *page, +static bool __f2fs_write_meta_folio(struct folio *folio, struct writeback_control *wbc, enum iostat_type io_type) { - struct f2fs_sb_info *sbi = F2FS_P_SB(page); - struct folio *folio = page_folio(page); + struct f2fs_sb_info *sbi = F2FS_F_SB(folio); trace_f2fs_writepage(folio, META); @@ -354,31 +353,26 @@ static int __f2fs_write_meta_page(struct page *page, folio_clear_uptodate(folio); dec_page_count(sbi, F2FS_DIRTY_META); folio_unlock(folio); - return 0; + return true; } goto redirty_out; } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; - if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0)) - goto redirty_out; f2fs_do_write_meta_page(sbi, folio, io_type); dec_page_count(sbi, F2FS_DIRTY_META); - if (wbc->for_reclaim) - f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META); - folio_unlock(folio); if (unlikely(f2fs_cp_error(sbi))) f2fs_submit_merged_write(sbi, META); - return 0; + return true; redirty_out: - redirty_page_for_writepage(wbc, page); - return AOP_WRITEPAGE_ACTIVATE; + folio_redirty_for_writepage(wbc, folio); + return false; } static int f2fs_write_meta_pages(struct address_space *mapping, @@ -421,9 +415,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, struct folio_batch fbatch; long nwritten = 0; int nr_folios; - struct writeback_control wbc = { - .for_reclaim = 0, - }; + struct writeback_control wbc = {}; struct blk_plug plug; folio_batch_init(&fbatch); @@ -447,7 +439,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, folio_lock(folio); - if (unlikely(folio->mapping != mapping)) { + if (unlikely(!is_meta_folio(folio))) { continue_unlock: folio_unlock(folio); continue; @@ -457,13 +449,12 @@ continue_unlock: goto continue_unlock; } - f2fs_wait_on_page_writeback(&folio->page, META, - true, true); + f2fs_folio_wait_writeback(folio, META, true, true); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - if (__f2fs_write_meta_page(&folio->page, &wbc, + if (!__f2fs_write_meta_folio(folio, &wbc, io_type)) { folio_unlock(folio); break; @@ -494,7 +485,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping, folio_mark_uptodate(folio); if (filemap_dirty_folio(mapping, folio)) { inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META); - set_page_private_reference(&folio->page); + folio_set_f2fs_reference(folio); return true; } return false; @@ -513,6 +504,7 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, { struct inode_management *im = &sbi->im[type]; struct ino_entry *e = NULL, *new = NULL; + int ret; if (type == FLUSH_INO) { rcu_read_lock(); @@ -525,7 +517,8 @@ retry: new = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS, true, NULL); - radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); + ret = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); + f2fs_bug_on(sbi, ret); spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); @@ -750,26 +743,26 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true); for (i = 0; i < orphan_blocks; i++) { - struct page *page; + struct folio *folio; struct f2fs_orphan_block *orphan_blk; - page = f2fs_get_meta_page(sbi, start_blk + i); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_meta_folio(sbi, start_blk + i); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto out; } - orphan_blk = (struct f2fs_orphan_block *)page_address(page); + orphan_blk = folio_address(folio); for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { nid_t ino = le32_to_cpu(orphan_blk->ino[j]); err = recover_orphan_inode(sbi, ino); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); goto out; } } - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } /* clear Orphan Flag */ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG); @@ -786,7 +779,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) unsigned int nentries = 0; unsigned short index = 1; unsigned short orphan_blocks; - struct page *page = NULL; + struct folio *folio = NULL; struct ino_entry *orphan = NULL; struct inode_management *im = &sbi->im[ORPHAN_INO]; @@ -801,10 +794,9 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) /* loop for each orphan inode entry and write them in journal block */ list_for_each_entry(orphan, head, list) { - if (!page) { - page = f2fs_grab_meta_page(sbi, start_blk++); - orphan_blk = - (struct f2fs_orphan_block *)page_address(page); + if (!folio) { + folio = f2fs_grab_meta_folio(sbi, start_blk++); + orphan_blk = folio_address(folio); memset(orphan_blk, 0, sizeof(*orphan_blk)); } @@ -819,62 +811,61 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); index++; nentries = 0; - page = NULL; + folio = NULL; } } - if (page) { + if (folio) { orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } } -static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi, - struct f2fs_checkpoint *ckpt) +static __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *ckpt) { unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset); __u32 chksum; - chksum = f2fs_crc32(sbi, ckpt, chksum_ofs); + chksum = f2fs_crc32(ckpt, chksum_ofs); if (chksum_ofs < CP_CHKSUM_OFFSET) { chksum_ofs += sizeof(chksum); - chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs, - F2FS_BLKSIZE - chksum_ofs); + chksum = f2fs_chksum(chksum, (__u8 *)ckpt + chksum_ofs, + F2FS_BLKSIZE - chksum_ofs); } return chksum; } static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, - struct f2fs_checkpoint **cp_block, struct page **cp_page, + struct f2fs_checkpoint **cp_block, struct folio **cp_folio, unsigned long long *version) { size_t crc_offset = 0; __u32 crc; - *cp_page = f2fs_get_meta_page(sbi, cp_addr); - if (IS_ERR(*cp_page)) - return PTR_ERR(*cp_page); + *cp_folio = f2fs_get_meta_folio(sbi, cp_addr); + if (IS_ERR(*cp_folio)) + return PTR_ERR(*cp_folio); - *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page); + *cp_block = folio_address(*cp_folio); crc_offset = le32_to_cpu((*cp_block)->checksum_offset); if (crc_offset < CP_MIN_CHKSUM_OFFSET || crc_offset > CP_CHKSUM_OFFSET) { - f2fs_put_page(*cp_page, 1); + f2fs_folio_put(*cp_folio, true); f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); return -EINVAL; } - crc = f2fs_checkpoint_chksum(sbi, *cp_block); + crc = f2fs_checkpoint_chksum(*cp_block); if (crc != cur_cp_crc(*cp_block)) { - f2fs_put_page(*cp_page, 1); + f2fs_folio_put(*cp_folio, true); f2fs_warn(sbi, "invalid crc value"); return -EINVAL; } @@ -883,17 +874,17 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, return 0; } -static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, +static struct folio *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr, unsigned long long *version) { - struct page *cp_page_1 = NULL, *cp_page_2 = NULL; + struct folio *cp_folio_1 = NULL, *cp_folio_2 = NULL; struct f2fs_checkpoint *cp_block = NULL; unsigned long long cur_version = 0, pre_version = 0; unsigned int cp_blocks; int err; err = get_checkpoint_version(sbi, cp_addr, &cp_block, - &cp_page_1, version); + &cp_folio_1, version); if (err) return NULL; @@ -908,19 +899,19 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, cp_addr += cp_blocks - 1; err = get_checkpoint_version(sbi, cp_addr, &cp_block, - &cp_page_2, version); + &cp_folio_2, version); if (err) goto invalid_cp; cur_version = *version; if (cur_version == pre_version) { *version = cur_version; - f2fs_put_page(cp_page_2, 1); - return cp_page_1; + f2fs_folio_put(cp_folio_2, true); + return cp_folio_1; } - f2fs_put_page(cp_page_2, 1); + f2fs_folio_put(cp_folio_2, true); invalid_cp: - f2fs_put_page(cp_page_1, 1); + f2fs_folio_put(cp_folio_1, true); return NULL; } @@ -928,7 +919,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *cp_block; struct f2fs_super_block *fsb = sbi->raw_super; - struct page *cp1, *cp2, *cur_page; + struct folio *cp1, *cp2, *cur_folio; unsigned long blk_size = sbi->blocksize; unsigned long long cp1_version = 0, cp2_version = 0; unsigned long long cp_start_blk_no; @@ -955,22 +946,22 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) if (cp1 && cp2) { if (ver_after(cp2_version, cp1_version)) - cur_page = cp2; + cur_folio = cp2; else - cur_page = cp1; + cur_folio = cp1; } else if (cp1) { - cur_page = cp1; + cur_folio = cp1; } else if (cp2) { - cur_page = cp2; + cur_folio = cp2; } else { err = -EFSCORRUPTED; goto fail_no_cp; } - cp_block = (struct f2fs_checkpoint *)page_address(cur_page); + cp_block = folio_address(cur_folio); memcpy(sbi->ckpt, cp_block, blk_size); - if (cur_page == cp1) + if (cur_folio == cp1) sbi->cur_cp_pack = 1; else sbi->cur_cp_pack = 2; @@ -985,30 +976,30 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) goto done; cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); - if (cur_page == cp2) + if (cur_folio == cp2) cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg)); for (i = 1; i < cp_blks; i++) { void *sit_bitmap_ptr; unsigned char *ckpt = (unsigned char *)sbi->ckpt; - cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); - if (IS_ERR(cur_page)) { - err = PTR_ERR(cur_page); + cur_folio = f2fs_get_meta_folio(sbi, cp_blk_no + i); + if (IS_ERR(cur_folio)) { + err = PTR_ERR(cur_folio); goto free_fail_no_cp; } - sit_bitmap_ptr = page_address(cur_page); + sit_bitmap_ptr = folio_address(cur_folio); memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size); - f2fs_put_page(cur_page, 1); + f2fs_folio_put(cur_folio, true); } done: - f2fs_put_page(cp1, 1); - f2fs_put_page(cp2, 1); + f2fs_folio_put(cp1, true); + f2fs_folio_put(cp2, true); return 0; free_fail_no_cp: - f2fs_put_page(cp1, 1); - f2fs_put_page(cp2, 1); + f2fs_folio_put(cp1, true); + f2fs_folio_put(cp2, true); fail_no_cp: kvfree(sbi->ckpt); return err; @@ -1054,7 +1045,7 @@ void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio) inode_inc_dirty_pages(inode); spin_unlock(&sbi->inode_lock[type]); - set_page_private_reference(&folio->page); + folio_set_f2fs_reference(folio); } void f2fs_remove_dirty_inode(struct inode *inode) @@ -1218,7 +1209,6 @@ static int block_operations(struct f2fs_sb_info *sbi) struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, - .for_reclaim = 0, }; int err = 0, cnt = 0; @@ -1402,35 +1392,31 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) static void commit_checkpoint(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { - struct writeback_control wbc = { - .for_reclaim = 0, - }; + struct writeback_control wbc = {}; /* - * filemap_get_folios_tag and lock_page again will take + * filemap_get_folios_tag and folio_lock again will take * some extra time. Therefore, f2fs_update_meta_pages and * f2fs_sync_meta_pages are combined in this function. */ - struct page *page = f2fs_grab_meta_page(sbi, blk_addr); - int err; + struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr); - f2fs_wait_on_page_writeback(page, META, true, true); + memcpy(folio_address(folio), src, PAGE_SIZE); - memcpy(page_address(page), src, PAGE_SIZE); - - set_page_dirty(page); - if (unlikely(!clear_page_dirty_for_io(page))) + folio_mark_dirty(folio); + if (unlikely(!folio_clear_dirty_for_io(folio))) f2fs_bug_on(sbi, 1); /* writeout cp pack 2 page */ - err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO); - if (unlikely(err && f2fs_cp_error(sbi))) { - f2fs_put_page(page, 1); - return; + if (unlikely(!__f2fs_write_meta_folio(folio, &wbc, FS_CP_META_IO))) { + if (f2fs_cp_error(sbi)) { + f2fs_folio_put(folio, true); + return; + } + f2fs_bug_on(sbi, true); } - f2fs_bug_on(sbi, err); - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); /* submit checkpoint (with barrier if NOBARRIER is not set) */ f2fs_submit_merged_write(sbi, META_FLUSH); @@ -1520,7 +1506,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); - crc32 = f2fs_checkpoint_chksum(sbi, ckpt); + crc32 = f2fs_checkpoint_chksum(ckpt); *((__le32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset))) = cpu_to_le32(crc32); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 9b94810675c1..5c1f47e45dab 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -23,20 +23,18 @@ static struct kmem_cache *cic_entry_slab; static struct kmem_cache *dic_entry_slab; -static void *page_array_alloc(struct inode *inode, int nr) +static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (likely(size <= sbi->page_array_slab_size)) return f2fs_kmem_cache_alloc(sbi->page_array_slab, - GFP_F2FS_ZERO, false, F2FS_I_SB(inode)); + GFP_F2FS_ZERO, false, sbi); return f2fs_kzalloc(sbi, size, GFP_NOFS); } -static void page_array_free(struct inode *inode, void *pages, int nr) +static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (!pages) @@ -73,17 +71,15 @@ static pgoff_t start_idx_of_cluster(struct compress_ctx *cc) return cc->cluster_idx << cc->log_cluster_size; } -bool f2fs_is_compressed_page(struct page *page) +bool f2fs_is_compressed_page(struct folio *folio) { - if (!PagePrivate(page)) - return false; - if (!page_private(page)) + if (!folio->private) return false; - if (page_private_nonpointer(page)) + if (folio_test_f2fs_nonpointer(folio)) return false; - f2fs_bug_on(F2FS_M_SB(page->mapping), - *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); + f2fs_bug_on(F2FS_F_SB(folio), + *((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC); return true; } @@ -137,9 +133,11 @@ static void f2fs_put_rpages_wbc(struct compress_ctx *cc, } } -struct page *f2fs_compress_control_page(struct page *page) +struct folio *f2fs_compress_control_folio(struct folio *folio) { - return ((struct compress_io_ctx *)page_private(page))->rpages[0]; + struct compress_io_ctx *ctx = folio->private; + + return page_folio(ctx->rpages[0]); } int f2fs_init_compress_ctx(struct compress_ctx *cc) @@ -147,13 +145,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc) if (cc->rpages) return 0; - cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); + cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size); return cc->rpages ? 0 : -ENOMEM; } void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) { - page_array_free(cc->inode, cc->rpages, cc->cluster_size); + page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size); cc->rpages = NULL; cc->nr_rpages = 0; cc->nr_cpages = 0; @@ -178,8 +176,8 @@ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio) #ifdef CONFIG_F2FS_FS_LZO static int lzo_init_compress_ctx(struct compress_ctx *cc) { - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - LZO1X_MEM_COMPRESS, GFP_NOFS); + cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), + LZO1X_MEM_COMPRESS); if (!cc->private) return -ENOMEM; @@ -189,7 +187,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc) static void lzo_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } @@ -214,13 +212,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic) ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, dic->rbuf, &dic->rlen); if (ret != LZO_E_OK) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "lzo decompress failed, ret:%d", ret); return -EIO; } if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "lzo invalid rlen:%zu, expected:%lu", dic->rlen, PAGE_SIZE << dic->log_cluster_size); return -EIO; @@ -246,7 +244,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) size = LZ4HC_MEM_COMPRESS; #endif - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); + cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size); if (!cc->private) return -ENOMEM; @@ -261,7 +259,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) static void lz4_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } @@ -294,13 +292,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, dic->clen, dic->rlen); if (ret < 0) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "lz4 decompress failed, ret:%d", ret); return -EIO; } if (ret != PAGE_SIZE << dic->log_cluster_size) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "lz4 invalid ret:%d, expected:%lu", ret, PAGE_SIZE << dic->log_cluster_size); return -EIO; @@ -342,8 +340,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) params = zstd_get_params(level, cc->rlen); workspace_size = zstd_cstream_workspace_bound(¶ms.cParams); - workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size); if (!workspace) return -ENOMEM; @@ -351,7 +348,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) if (!stream) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "%s zstd_init_cstream failed", __func__); - kvfree(workspace); + vfree(workspace); return -EIO; } @@ -364,7 +361,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) static void zstd_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; cc->private2 = NULL; } @@ -423,16 +420,15 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) workspace_size = zstd_dstream_workspace_bound(max_window_size); - workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(dic->sbi, workspace_size); if (!workspace) return -ENOMEM; stream = zstd_init_dstream(max_window_size, workspace, workspace_size); if (!stream) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "%s zstd_init_dstream failed", __func__); - kvfree(workspace); + vfree(workspace); return -EIO; } @@ -444,7 +440,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) { - kvfree(dic->private); + vfree(dic->private); dic->private = NULL; dic->private2 = NULL; } @@ -466,14 +462,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic) ret = zstd_decompress_stream(stream, &outbuf, &inbuf); if (zstd_is_error(ret)) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "%s zstd_decompress_stream failed, ret: %d", __func__, zstd_get_error_code(ret)); return -EIO; } if (dic->rlen != outbuf.pos) { - f2fs_err_ratelimited(F2FS_I_SB(dic->inode), + f2fs_err_ratelimited(dic->sbi, "%s ZSTD invalid rlen:%zu, expected:%lu", __func__, dic->rlen, PAGE_SIZE << dic->log_cluster_size); @@ -593,11 +589,14 @@ static struct page *f2fs_compress_alloc_page(void) static void f2fs_compress_free_page(struct page *page) { + struct folio *folio; + if (!page) return; - detach_page_private(page); - page->mapping = NULL; - unlock_page(page); + folio = page_folio(page); + folio_detach_private(folio); + folio->mapping = NULL; + folio_unlock(folio); mempool_free(page, compress_page_pool); } @@ -619,6 +618,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count) static int f2fs_compress_pages(struct compress_ctx *cc) { + struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct f2fs_inode_info *fi = F2FS_I(cc->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; @@ -639,7 +639,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); cc->valid_nr_cpages = cc->nr_cpages; - cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); + cc->cpages = page_array_alloc(sbi, cc->nr_cpages); if (!cc->cpages) { ret = -ENOMEM; goto destroy_compress_ctx; @@ -674,8 +674,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) cc->cbuf->clen = cpu_to_le32(cc->clen); if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM)) - chksum = f2fs_crc32(F2FS_I_SB(cc->inode), - cc->cbuf->cdata, cc->clen); + chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen); cc->cbuf->chksum = cpu_to_le32(chksum); for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) @@ -714,7 +713,7 @@ out_free_cpages: if (cc->cpages[i]) f2fs_compress_free_page(cc->cpages[i]); } - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; destroy_compress_ctx: if (cops->destroy_compress_ctx) @@ -732,7 +731,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { - struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); + struct f2fs_sb_info *sbi = dic->sbi; struct f2fs_inode_info *fi = F2FS_I(dic->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; @@ -771,7 +770,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) { u32 provided = le32_to_cpu(dic->cbuf->chksum); - u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); + u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen); if (provided != calculated) { if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) { @@ -794,25 +793,27 @@ out_end_io: f2fs_decompress_end_io(dic, ret, in_task); } +static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, + struct folio *folio, nid_t ino, block_t blkaddr); + /* * This is called when a page of a compressed cluster has been read from disk * (or failed to be read from disk). It checks whether this page was the last * page being waited on in the cluster, and if so, it decompresses the cluster * (or in the case of a failure, cleans up without actually decompressing). */ -void f2fs_end_read_compressed_page(struct page *page, bool failed, +void f2fs_end_read_compressed_page(struct folio *folio, bool failed, block_t blkaddr, bool in_task) { - struct decompress_io_ctx *dic = - (struct decompress_io_ctx *)page_private(page); - struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); + struct decompress_io_ctx *dic = folio->private; + struct f2fs_sb_info *sbi = dic->sbi; dec_page_count(sbi, F2FS_RD_DATA); if (failed) WRITE_ONCE(dic->failed, true); else if (blkaddr && in_task) - f2fs_cache_compressed_page(sbi, page, + f2fs_cache_compressed_page(sbi, folio, dic->inode->i_ino, blkaddr); if (atomic_dec_and_test(&dic->remaining_pages)) @@ -909,7 +910,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) } for (i = 1, count = 1; i < cluster_size; i++, count++) { - block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); /* [COMPR_ADDR, ..., COMPR_ADDR] */ @@ -950,7 +951,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode, int count, i; for (i = 0, count = 0; i < cluster_size; i++) { - block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); if (__is_valid_data_blkaddr(blkaddr)) @@ -1090,7 +1091,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, { struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct address_space *mapping = cc->inode->i_mapping; - struct page *page; + struct folio *folio; sector_t last_block_in_bio; fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; pgoff_t start_idx = start_idx_of_cluster(cc); @@ -1105,19 +1106,19 @@ retry: if (ret) return ret; - /* keep page reference to avoid page reclaim */ + /* keep folio reference to avoid page reclaim */ for (i = 0; i < cc->cluster_size; i++) { - page = f2fs_pagecache_get_page(mapping, start_idx + i, - fgp_flag, GFP_NOFS); - if (!page) { - ret = -ENOMEM; + folio = f2fs_filemap_get_folio(mapping, start_idx + i, + fgp_flag, GFP_NOFS); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); goto unlock_pages; } - if (PageUptodate(page)) - f2fs_put_page(page, 1); + if (folio_test_uptodate(folio)) + f2fs_folio_put(folio, true); else - f2fs_compress_ctx_add_page(cc, page_folio(page)); + f2fs_compress_ctx_add_page(cc, folio); } if (!f2fs_cluster_is_empty(cc)) { @@ -1140,17 +1141,17 @@ retry: for (i = 0; i < cc->cluster_size; i++) { f2fs_bug_on(sbi, cc->rpages[i]); - page = find_lock_page(mapping, start_idx + i); - if (!page) { - /* page can be truncated */ + folio = filemap_lock_folio(mapping, start_idx + i); + if (IS_ERR(folio)) { + /* folio could be truncated */ goto release_and_retry; } - f2fs_wait_on_page_writeback(page, DATA, true, true); - f2fs_compress_ctx_add_page(cc, page_folio(page)); + f2fs_folio_wait_writeback(folio, DATA, true, true); + f2fs_compress_ctx_add_page(cc, folio); - if (!PageUptodate(page)) { - f2fs_handle_page_eio(sbi, page_folio(page), DATA); + if (!folio_test_uptodate(folio)) { + f2fs_handle_page_eio(sbi, folio, DATA); release_and_retry: f2fs_put_rpages(cc); f2fs_unlock_rpages(cc, i + 1); @@ -1317,7 +1318,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, goto out_unlock_op; for (i = 0; i < cc->cluster_size; i++) { - if (data_blkaddr(dn.inode, dn.node_page, + if (data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i) == NULL_ADDR) goto out_put_dnode; } @@ -1338,7 +1339,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; cic->inode = inode; atomic_set(&cic->pending_pages, cc->valid_nr_cpages); - cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); + cic->rpages = page_array_alloc(sbi, cc->cluster_size); if (!cic->rpages) goto out_put_cic; @@ -1349,7 +1350,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, page_folio(cc->rpages[i + 1])->index, cic); fio.compressed_page = cc->cpages[i]; - fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, + fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i + 1); /* wait for GCed page writeback via META_MAPPING */ @@ -1418,7 +1419,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, (*submitted)++; unlock_continue: inode_dec_dirty_pages(cc->inode); - unlock_page(fio.page); + folio_unlock(fio.folio); } if (fio.compr_blocks) @@ -1440,13 +1441,13 @@ unlock_continue: spin_unlock(&fi->i_size_lock); f2fs_put_rpages(cc); - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; f2fs_destroy_compress_ctx(cc, false); return 0; out_destroy_crypt: - page_array_free(cc->inode, cic->rpages, cc->cluster_size); + page_array_free(sbi, cic->rpages, cc->cluster_size); for (--i; i >= 0; i--) { if (!cc->cpages[i]) @@ -1467,21 +1468,21 @@ out_free: f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; return -EAGAIN; } -void f2fs_compress_write_end_io(struct bio *bio, struct page *page) +void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio) { + struct page *page = &folio->page; struct f2fs_sb_info *sbi = bio->bi_private; - struct compress_io_ctx *cic = - (struct compress_io_ctx *)page_private(page); - enum count_type type = WB_DATA_TYPE(page, - f2fs_is_compressed_page(page)); + struct compress_io_ctx *cic = folio->private; + enum count_type type = WB_DATA_TYPE(folio, + f2fs_is_compressed_page(folio)); int i; - if (unlikely(bio->bi_status)) + if (unlikely(bio->bi_status != BLK_STS_OK)) mapping_set_error(cic->inode->i_mapping, -EIO); f2fs_compress_free_page(page); @@ -1497,7 +1498,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page) end_page_writeback(cic->rpages[i]); } - page_array_free(cic->inode, cic->rpages, cic->nr_rpages); + page_array_free(sbi, cic->rpages, cic->nr_rpages); kmem_cache_free(cic_entry_slab, cic); } @@ -1529,37 +1530,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, f2fs_lock_op(sbi); for (i = 0; i < cc->cluster_size; i++) { + struct folio *folio; + if (!cc->rpages[i]) continue; + folio = page_folio(cc->rpages[i]); retry_write: - lock_page(cc->rpages[i]); + folio_lock(folio); - if (cc->rpages[i]->mapping != mapping) { + if (folio->mapping != mapping) { continue_unlock: - unlock_page(cc->rpages[i]); + folio_unlock(folio); continue; } - if (!PageDirty(cc->rpages[i])) + if (!folio_test_dirty(folio)) goto continue_unlock; - if (folio_test_writeback(page_folio(cc->rpages[i]))) { + if (folio_test_writeback(folio)) { if (wbc->sync_mode == WB_SYNC_NONE) goto continue_unlock; - f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); } - if (!clear_page_dirty_for_io(cc->rpages[i])) + if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; submitted = 0; - ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]), - &submitted, + ret = f2fs_write_single_data_page(folio, &submitted, NULL, NULL, wbc, io_type, compr_blocks, false); if (ret) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - unlock_page(cc->rpages[i]); + if (ret == 1) { ret = 0; } else if (ret == -EAGAIN) { ret = 0; @@ -1630,14 +1632,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi, static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool pre_alloc) { - const struct f2fs_compress_ops *cops = - f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; + const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; int i; - if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) + if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) return 0; - dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); + dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size); if (!dic->tpages) return -ENOMEM; @@ -1667,10 +1668,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, bool bypass_destroy_callback, bool pre_alloc) { - const struct f2fs_compress_ops *cops = - f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; + const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; - if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) + if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) return; if (!bypass_destroy_callback && cops->destroy_decompress_ctx) @@ -1697,7 +1697,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) if (!dic) return ERR_PTR(-ENOMEM); - dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); + dic->rpages = page_array_alloc(sbi, cc->cluster_size); if (!dic->rpages) { kmem_cache_free(dic_entry_slab, dic); return ERR_PTR(-ENOMEM); @@ -1705,6 +1705,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; dic->inode = cc->inode; + dic->sbi = sbi; + dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm; atomic_set(&dic->remaining_pages, cc->nr_cpages); dic->cluster_idx = cc->cluster_idx; dic->cluster_size = cc->cluster_size; @@ -1718,7 +1720,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->rpages[i] = cc->rpages[i]; dic->nr_rpages = cc->cluster_size; - dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); + dic->cpages = page_array_alloc(sbi, dic->nr_cpages); if (!dic->cpages) { ret = -ENOMEM; goto out_free; @@ -1748,6 +1750,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, bool bypass_destroy_callback) { int i; + /* use sbi in dic to avoid UFA of dic->inode*/ + struct f2fs_sb_info *sbi = dic->sbi; f2fs_release_decomp_mem(dic, bypass_destroy_callback, true); @@ -1759,7 +1763,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, continue; f2fs_compress_free_page(dic->tpages[i]); } - page_array_free(dic->inode, dic->tpages, dic->cluster_size); + page_array_free(sbi, dic->tpages, dic->cluster_size); } if (dic->cpages) { @@ -1768,10 +1772,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, continue; f2fs_compress_free_page(dic->cpages[i]); } - page_array_free(dic->inode, dic->cpages, dic->nr_cpages); + page_array_free(sbi, dic->cpages, dic->nr_cpages); } - page_array_free(dic->inode, dic->rpages, dic->nr_rpages); + page_array_free(sbi, dic->rpages, dic->nr_rpages); kmem_cache_free(dic_entry_slab, dic); } @@ -1790,8 +1794,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) f2fs_free_dic(dic, false); } else { INIT_WORK(&dic->free_work, f2fs_late_free_dic); - queue_work(F2FS_I_SB(dic->inode)->post_read_wq, - &dic->free_work); + queue_work(dic->sbi->post_read_wq, &dic->free_work); } } } @@ -1862,14 +1865,13 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, } /* - * Put a reference to a compressed page's decompress_io_ctx. + * Put a reference to a compressed folio's decompress_io_ctx. * - * This is called when the page is no longer needed and can be freed. + * This is called when the folio is no longer needed and can be freed. */ -void f2fs_put_page_dic(struct page *page, bool in_task) +void f2fs_put_folio_dic(struct folio *folio, bool in_task) { - struct decompress_io_ctx *dic = - (struct decompress_io_ctx *)page_private(page); + struct decompress_io_ctx *dic = folio->private; f2fs_put_dic(dic, in_task); } @@ -1881,14 +1883,14 @@ void f2fs_put_page_dic(struct page *page, bool in_task) unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, unsigned int ofs_in_node) { - bool compressed = data_blkaddr(dn->inode, dn->node_page, + bool compressed = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node) == COMPRESS_ADDR; int i = compressed ? 1 : 0; - block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, + block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node + i); for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { - block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) @@ -1919,10 +1921,10 @@ void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1); } -void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, - nid_t ino, block_t blkaddr) +static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, + struct folio *folio, nid_t ino, block_t blkaddr) { - struct page *cpage; + struct folio *cfolio; int ret; if (!test_opt(sbi, COMPRESS_CACHE)) @@ -1934,49 +1936,49 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE)) return; - cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr); - if (cpage) { - f2fs_put_page(cpage, 0); + cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr); + if (!IS_ERR(cfolio)) { + f2fs_folio_put(cfolio, false); return; } - cpage = alloc_page(__GFP_NOWARN | __GFP_IO); - if (!cpage) + cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0); + if (!cfolio) return; - ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi), + ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio, blkaddr, GFP_NOFS); if (ret) { - f2fs_put_page(cpage, 0); + f2fs_folio_put(cfolio, false); return; } - set_page_private_data(cpage, ino); + folio_set_f2fs_data(cfolio, ino); - memcpy(page_address(cpage), page_address(page), PAGE_SIZE); - SetPageUptodate(cpage); - f2fs_put_page(cpage, 1); + memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE); + folio_mark_uptodate(cfolio); + f2fs_folio_put(cfolio, true); } -bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, +bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio, block_t blkaddr) { - struct page *cpage; + struct folio *cfolio; bool hitted = false; if (!test_opt(sbi, COMPRESS_CACHE)) return false; - cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi), + cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS); - if (cpage) { - if (PageUptodate(cpage)) { + if (!IS_ERR(cfolio)) { + if (folio_test_uptodate(cfolio)) { atomic_inc(&sbi->compress_page_hit); - memcpy(page_address(page), - page_address(cpage), PAGE_SIZE); + memcpy(folio_address(folio), + folio_address(cfolio), folio_size(folio)); hitted = true; } - f2fs_put_page(cpage, 1); + f2fs_folio_put(cfolio, true); } return hitted; @@ -2010,7 +2012,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) continue; } - if (ino != get_page_private_data(&folio->page)) { + if (ino != folio_get_f2fs_data(folio)) { folio_unlock(folio); continue; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 54f89f0ee69b..7961e0ddfca3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -47,14 +47,14 @@ void f2fs_destroy_bioset(void) bioset_exit(&f2fs_bioset); } -bool f2fs_is_cp_guaranteed(struct page *page) +bool f2fs_is_cp_guaranteed(const struct folio *folio) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = folio->mapping; struct inode *inode; struct f2fs_sb_info *sbi; - if (!mapping) - return false; + if (fscrypt_is_bounce_folio(folio)) + return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio)); inode = mapping->host; sbi = F2FS_I_SB(inode); @@ -65,7 +65,7 @@ bool f2fs_is_cp_guaranteed(struct page *page) return true; if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) || - page_private_gcing(page)) + folio_test_f2fs_gcing(folio)) return true; return false; } @@ -142,16 +142,16 @@ static void f2fs_finish_read_bio(struct bio *bio, bool in_task) bio_for_each_folio_all(fi, bio) { struct folio *folio = fi.folio; - if (f2fs_is_compressed_page(&folio->page)) { + if (f2fs_is_compressed_page(folio)) { if (ctx && !ctx->decompression_attempted) - f2fs_end_read_compressed_page(&folio->page, true, 0, + f2fs_end_read_compressed_page(folio, true, 0, in_task); - f2fs_put_page_dic(&folio->page, in_task); + f2fs_put_folio_dic(folio, in_task); continue; } dec_page_count(F2FS_F_SB(folio), __read_io_type(folio)); - folio_end_read(folio, bio->bi_status == 0); + folio_end_read(folio, bio->bi_status == BLK_STS_OK); } if (ctx) @@ -181,14 +181,13 @@ static void f2fs_verify_bio(struct work_struct *work) * as those were handled separately by f2fs_end_read_compressed_page(). */ if (may_have_compressed_pages) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bv, bio, iter_all) { - struct page *page = bv->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *folio = fi.folio; - if (!f2fs_is_compressed_page(page) && - !fsverity_verify_page(page)) { + if (!f2fs_is_compressed_page(folio) && + !fsverity_verify_page(&folio->page)) { bio->bi_status = BLK_STS_IOERR; break; } @@ -233,16 +232,15 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task) static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, bool in_task) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; bool all_compressed = true; block_t blkaddr = ctx->fs_blkaddr; - bio_for_each_segment_all(bv, ctx->bio, iter_all) { - struct page *page = bv->bv_page; + bio_for_each_folio_all(fi, ctx->bio) { + struct folio *folio = fi.folio; - if (f2fs_is_compressed_page(page)) - f2fs_end_read_compressed_page(page, false, blkaddr, + if (f2fs_is_compressed_page(folio)) + f2fs_end_read_compressed_page(folio, false, blkaddr, in_task); else all_compressed = false; @@ -280,9 +278,9 @@ static void f2fs_post_read_work(struct work_struct *work) static void f2fs_read_end_io(struct bio *bio) { - struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); + struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio)); struct bio_post_read_ctx *ctx; - bool intask = in_task(); + bool intask = in_task() && !irqs_disabled(); iostat_update_and_unbind_ctx(bio); ctx = bio->bi_private; @@ -290,7 +288,7 @@ static void f2fs_read_end_io(struct bio *bio) if (time_to_inject(sbi, FAULT_READ_IO)) bio->bi_status = BLK_STS_IOERR; - if (bio->bi_status) { + if (bio->bi_status != BLK_STS_OK) { f2fs_finish_read_bio(bio, intask); return; } @@ -339,28 +337,28 @@ static void f2fs_write_end_io(struct bio *bio) } #ifdef CONFIG_F2FS_FS_COMPRESSION - if (f2fs_is_compressed_page(&folio->page)) { - f2fs_compress_write_end_io(bio, &folio->page); + if (f2fs_is_compressed_page(folio)) { + f2fs_compress_write_end_io(bio, folio); continue; } #endif - type = WB_DATA_TYPE(&folio->page, false); + type = WB_DATA_TYPE(folio, false); - if (unlikely(bio->bi_status)) { + if (unlikely(bio->bi_status != BLK_STS_OK)) { mapping_set_error(folio->mapping, -EIO); if (type == F2FS_WB_CP_DATA) f2fs_stop_checkpoint(sbi, true, STOP_CP_REASON_WRITE_FAIL); } - f2fs_bug_on(sbi, folio->mapping == NODE_MAPPING(sbi) && - folio->index != nid_of_node(&folio->page)); + f2fs_bug_on(sbi, is_node_folio(folio) && + folio->index != nid_of_node(folio)); dec_page_count(sbi, type); if (f2fs_in_warm_node_list(sbi, folio)) - f2fs_del_fsync_node_entry(sbi, &folio->page); - clear_page_private_gcing(&folio->page); + f2fs_del_fsync_node_entry(sbi, folio); + folio_clear_f2fs_gcing(folio); folio_end_writeback(folio); } if (!get_pages(sbi, F2FS_WB_CP_DATA) && @@ -419,7 +417,6 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio) { unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0); - struct folio *fio_folio = page_folio(fio->page); unsigned int fua_flag, meta_flag, io_flag; blk_opf_t op_flags = 0; @@ -447,7 +444,7 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio) op_flags |= REQ_FUA; if (fio->type == DATA && - F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) + F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) op_flags |= REQ_PRIO; return op_flags; @@ -546,34 +543,33 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) } static bool __has_merged_page(struct bio *bio, struct inode *inode, - struct page *page, nid_t ino) + struct folio *folio, nid_t ino) { - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; if (!bio) return false; - if (!inode && !page && !ino) + if (!inode && !folio && !ino) return true; - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *target = bvec->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *target = fi.folio; - if (fscrypt_is_bounce_page(target)) { - target = fscrypt_pagecache_page(target); + if (fscrypt_is_bounce_folio(target)) { + target = fscrypt_pagecache_folio(target); if (IS_ERR(target)) continue; } if (f2fs_is_compressed_page(target)) { - target = f2fs_compress_control_page(target); + target = f2fs_compress_control_folio(target); if (IS_ERR(target)) continue; } if (inode && inode == target->mapping->host) return true; - if (page && page == target) + if (folio && folio == target) return true; if (ino && ino == ino_of_node(target)) return true; @@ -642,7 +638,7 @@ unlock_out: } static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, - struct inode *inode, struct page *page, + struct inode *inode, struct folio *folio, nid_t ino, enum page_type type, bool force) { enum temp_type temp; @@ -654,7 +650,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, struct f2fs_bio_info *io = sbi->write_io[btype] + temp; f2fs_down_read(&io->io_rwsem); - ret = __has_merged_page(io->bio, inode, page, ino); + ret = __has_merged_page(io->bio, inode, folio, ino); f2fs_up_read(&io->io_rwsem); } if (ret) @@ -672,10 +668,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type) } void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, - struct inode *inode, struct page *page, + struct inode *inode, struct folio *folio, nid_t ino, enum page_type type) { - __submit_merged_write_cond(sbi, inode, page, ino, type, false); + __submit_merged_write_cond(sbi, inode, folio, ino, type, false); } void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) @@ -692,7 +688,7 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) int f2fs_submit_page_bio(struct f2fs_io_info *fio) { struct bio *bio; - struct folio *fio_folio = page_folio(fio->page); + struct folio *fio_folio = fio->folio; struct folio *data_folio = fio->encrypted_page ? page_folio(fio->encrypted_page) : fio_folio; @@ -714,7 +710,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE); inc_page_count(fio->sbi, is_read_io(fio->op) ? - __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false)); + __read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false)); if (is_read_io(bio_op(bio))) f2fs_submit_read_bio(fio->sbi, bio, fio->type); @@ -780,6 +776,7 @@ static void del_bio_entry(struct bio_entry *be) static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { + struct folio *fio_folio = fio->folio; struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; @@ -801,8 +798,8 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, *fio->last_block, fio->new_blkaddr)); if (f2fs_crypt_mergeable_bio(*bio, - fio->page->mapping->host, - page_folio(fio->page)->index, fio) && + fio_folio->mapping->host, + fio_folio->index, fio) && bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { ret = 0; @@ -826,13 +823,13 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, } void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, - struct bio **bio, struct page *page) + struct bio **bio, struct folio *folio) { enum temp_type temp; bool found = false; struct bio *target = bio ? *bio : NULL; - f2fs_bug_on(sbi, !target && !page); + f2fs_bug_on(sbi, !target && !folio); for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; @@ -848,7 +845,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, - page, 0); + folio, 0); if (found) break; } @@ -865,7 +862,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, - page, 0); + folio, 0); if (found) { target = be->bio; del_bio_entry(be); @@ -886,15 +883,15 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, int f2fs_merge_page_bio(struct f2fs_io_info *fio) { struct bio *bio = *fio->bio; - struct page *page = fio->encrypted_page ? - fio->encrypted_page : fio->page; - struct folio *folio = page_folio(fio->page); + struct folio *data_folio = fio->encrypted_page ? + page_folio(fio->encrypted_page) : fio->folio; + struct folio *folio = fio->folio; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) return -EFSCORRUPTED; - trace_f2fs_submit_folio_bio(page_folio(page), fio); + trace_f2fs_submit_folio_bio(data_folio, fio); if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, fio->new_blkaddr)) @@ -905,16 +902,16 @@ alloc_new: f2fs_set_bio_crypt_ctx(bio, folio->mapping->host, folio->index, fio, GFP_NOIO); - add_bio_entry(fio->sbi, bio, page, fio->temp); + add_bio_entry(fio->sbi, bio, &data_folio->page, fio->temp); } else { - if (add_ipu_page(fio, &bio, page)) + if (add_ipu_page(fio, &bio, &data_folio->page)) goto alloc_new; } if (fio->io_wbc) wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio)); - inc_page_count(fio->sbi, WB_DATA_TYPE(page, false)); + inc_page_count(fio->sbi, WB_DATA_TYPE(data_folio, false)); *fio->last_block = fio->new_blkaddr; *fio->bio = bio; @@ -949,7 +946,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) struct f2fs_sb_info *sbi = fio->sbi; enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; - struct page *bio_page; + struct folio *bio_folio; enum count_type type; f2fs_bug_on(sbi, is_read_io(fio->op)); @@ -980,44 +977,44 @@ next: verify_fio_blkaddr(fio); if (fio->encrypted_page) - bio_page = fio->encrypted_page; + bio_folio = page_folio(fio->encrypted_page); else if (fio->compressed_page) - bio_page = fio->compressed_page; + bio_folio = page_folio(fio->compressed_page); else - bio_page = fio->page; + bio_folio = fio->folio; /* set submitted = true as a return value */ fio->submitted = 1; - type = WB_DATA_TYPE(bio_page, fio->compressed_page); + type = WB_DATA_TYPE(bio_folio, fio->compressed_page); inc_page_count(sbi, type); if (io->bio && (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, fio->new_blkaddr) || - !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, - page_folio(bio_page)->index, fio))) + !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio), + bio_folio->index, fio))) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { io->bio = __bio_alloc(fio, BIO_MAX_VECS); - f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, - page_folio(bio_page)->index, fio, GFP_NOIO); + f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio), + bio_folio->index, fio, GFP_NOIO); io->fio = *fio; } - if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { + if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) { __submit_merged_bio(io); goto alloc_new; } if (fio->io_wbc) - wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page), - PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->folio, + folio_size(fio->folio)); io->last_block_in_bio = fio->new_blkaddr; - trace_f2fs_submit_folio_write(page_folio(fio->page), fio); + trace_f2fs_submit_folio_write(fio->folio, fio); #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_has_blkzoned(sbi) && btype < META && is_end_zone_blkaddr(sbi, fio->new_blkaddr)) { @@ -1116,7 +1113,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct folio *folio, static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { - __le32 *addr = get_dnode_addr(dn->inode, dn->node_page); + __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio); dn->data_blkaddr = blkaddr; addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); @@ -1125,14 +1122,14 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) /* * Lock ordering for the change of data block address: * ->data_page - * ->node_page + * ->node_folio * update block addresses in the node page */ void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { - f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); + f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); __set_data_blkaddr(dn, blkaddr); - if (set_page_dirty(dn->node_page)) + if (folio_mark_dirty(dn->node_folio)) dn->node_changed = true; } @@ -1160,7 +1157,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, dn->ofs_in_node, count); - f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); + f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); for (; count > 0; dn->ofs_in_node++) { block_t blkaddr = f2fs_data_blkaddr(dn); @@ -1171,7 +1168,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) } } - if (set_page_dirty(dn->node_page)) + if (folio_mark_dirty(dn->node_folio)) dn->node_changed = true; return 0; } @@ -1189,7 +1186,7 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn) int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) { - bool need_put = dn->inode_page ? false : true; + bool need_put = dn->inode_folio ? false : true; int err; err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE); @@ -1257,7 +1254,7 @@ got_it: * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. - * see, f2fs_add_link -> f2fs_get_new_data_page -> + * see, f2fs_add_link -> f2fs_get_new_data_folio -> * f2fs_init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { @@ -1338,57 +1335,57 @@ struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, * * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). - * Note that, ipage is set only by make_empty_dir, and if any error occur, - * ipage should be released by this function. + * Note that, ifolio is set only by make_empty_dir, and if any error occur, + * ifolio should be released by this function. */ -struct page *f2fs_get_new_data_page(struct inode *inode, - struct page *ipage, pgoff_t index, bool new_i_size) +struct folio *f2fs_get_new_data_folio(struct inode *inode, + struct folio *ifolio, pgoff_t index, bool new_i_size) { struct address_space *mapping = inode->i_mapping; - struct page *page; + struct folio *folio; struct dnode_of_data dn; int err; - page = f2fs_grab_cache_page(mapping, index, true); - if (!page) { + folio = f2fs_grab_cache_folio(mapping, index, true); + if (IS_ERR(folio)) { /* - * before exiting, we should make sure ipage will be released + * before exiting, we should make sure ifolio will be released * if any error occur. */ - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return ERR_PTR(-ENOMEM); } - set_new_dnode(&dn, inode, ipage, NULL, 0); + set_new_dnode(&dn, inode, ifolio, NULL, 0); err = f2fs_reserve_block(&dn, index); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return ERR_PTR(err); } - if (!ipage) + if (!ifolio) f2fs_put_dnode(&dn); - if (PageUptodate(page)) + if (folio_test_uptodate(folio)) goto got_it; if (dn.data_blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_SIZE); - if (!PageUptodate(page)) - SetPageUptodate(page); + folio_zero_segment(folio, 0, folio_size(folio)); + if (!folio_test_uptodate(folio)) + folio_mark_uptodate(folio); } else { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); - /* if ipage exists, blkaddr should be NEW_ADDR */ - f2fs_bug_on(F2FS_I_SB(inode), ipage); - page = f2fs_get_lock_data_page(inode, index, true); - if (IS_ERR(page)) - return page; + /* if ifolio exists, blkaddr should be NEW_ADDR */ + f2fs_bug_on(F2FS_I_SB(inode), ifolio); + folio = f2fs_get_lock_data_folio(inode, index, true); + if (IS_ERR(folio)) + return folio; } got_it: if (new_i_size && i_size_read(inode) < ((loff_t)(index + 1) << PAGE_SHIFT)) f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); - return page; + return folio; } static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) @@ -1553,10 +1550,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) unsigned int start_pgofs; int bidx = 0; bool is_hole; + bool lfs_dio_write; if (!maxblocks) return 0; + lfs_dio_write = (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && + map->m_may_create); + if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag)) goto out; @@ -1572,8 +1573,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) end = pgofs + maxblocks; next_dnode: - if (map->m_may_create) + if (map->m_may_create) { + if (f2fs_lfs_mode(sbi)) + f2fs_balance_fs(sbi, true); f2fs_map_lock(sbi, flag); + } /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -1589,7 +1593,7 @@ next_dnode: start_pgofs = pgofs; prealloc = 0; last_ofs_in_node = ofs_in_node = dn.ofs_in_node; - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); next_block: blkaddr = f2fs_data_blkaddr(&dn); @@ -1603,7 +1607,7 @@ next_block: /* use out-place-update for direct IO under LFS mode */ if (map->m_may_create && (is_hole || (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && - !f2fs_is_pinned_file(inode)))) { + !f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) { if (unlikely(f2fs_cp_error(sbi))) { err = -EIO; goto sync_out; @@ -1687,10 +1691,15 @@ next_block: if (map->m_multidev_dio) map->m_bdev = FDEV(bidx).bdev; + + if (lfs_dio_write) + map->m_last_pblk = NULL_ADDR; } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) { ofs++; map->m_len++; } else { + if (lfs_dio_write && !f2fs_is_pinned_file(inode)) + map->m_last_pblk = blkaddr; goto sync_out; } @@ -1715,14 +1724,6 @@ skip: dn.ofs_in_node = end_offset; } - if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && - map->m_may_create) { - /* the next block to be allocated may not be contiguous. */ - if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) == - CAP_BLKS_PER_SEC(sbi) - 1) - goto sync_out; - } - if (pgofs >= end) goto sync_out; else if (dn.ofs_in_node < end_offset) @@ -1825,7 +1826,6 @@ static int f2fs_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct page *page; struct node_info ni; __u64 phys = 0, len; __u32 flags; @@ -1834,15 +1834,15 @@ static int f2fs_xattr_fiemap(struct inode *inode, if (f2fs_has_inline_xattr(inode)) { int offset; + struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), + inode->i_ino, false); - page = f2fs_grab_cache_page(NODE_MAPPING(sbi), - inode->i_ino, false); - if (!page) - return -ENOMEM; + if (IS_ERR(folio)) + return PTR_ERR(folio); err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return err; } @@ -1854,7 +1854,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, phys += offset; len = inline_xattr_size(inode); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED; @@ -1868,20 +1868,22 @@ static int f2fs_xattr_fiemap(struct inode *inode, } if (xnid) { - page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false); - if (!page) - return -ENOMEM; + struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), + xnid, false); + + if (IS_ERR(folio)) + return PTR_ERR(folio); err = f2fs_get_node_info(sbi, xnid, &ni, false); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return err; } phys = F2FS_BLK_TO_BYTES(ni.blk_addr); len = inode->i_sb->s_blocksize; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); flags = FIEMAP_EXTENT_LAST; } @@ -2077,7 +2079,7 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio, sector_t last_block; sector_t last_block_in_file; sector_t block_nr; - pgoff_t index = folio_index(folio); + pgoff_t index = folio->index; int ret = 0; block_in_file = (sector_t)index; @@ -2245,7 +2247,7 @@ skip_reading_dnode: for (i = 1; i < cc->cluster_size; i++) { block_t blkaddr; - blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, + blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i) : ei.blk + i - 1; @@ -2279,14 +2281,13 @@ skip_reading_dnode: block_t blkaddr; struct bio_post_read_ctx *ctx; - blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, + blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i + 1) : ei.blk + i; f2fs_wait_on_block_writeback(inode, blkaddr); - if (f2fs_load_compressed_page(sbi, folio_page(folio, 0), - blkaddr)) { + if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) { if (atomic_dec_and_test(&dic->remaining_pages)) { f2fs_decompress_cluster(dic, true); break; @@ -2303,7 +2304,7 @@ submit_and_realloc: } if (!bio) { - bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, + bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i, f2fs_ra_op_flags(rac), folio->index, for_write); if (IS_ERR(bio)) { @@ -2376,6 +2377,14 @@ static int f2fs_mpage_readpages(struct inode *inode, unsigned max_nr_pages = nr_pages; int ret = 0; +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (f2fs_compressed_file(inode)) { + index = rac ? readahead_index(rac) : folio->index; + max_nr_pages = round_up(index + nr_pages, cc.cluster_size) - + round_down(index, cc.cluster_size); + } +#endif + map.m_pblk = 0; map.m_lblk = 0; map.m_len = 0; @@ -2392,7 +2401,7 @@ static int f2fs_mpage_readpages(struct inode *inode, } #ifdef CONFIG_F2FS_FS_COMPRESSION - index = folio_index(folio); + index = folio->index; if (!f2fs_compressed_file(inode)) goto read_single_page; @@ -2501,8 +2510,9 @@ static void f2fs_readahead(struct readahead_control *rac) int f2fs_encrypt_one_page(struct f2fs_io_info *fio) { - struct inode *inode = fio->page->mapping->host; - struct page *mpage, *page; + struct inode *inode = fio_inode(fio); + struct folio *mfolio; + struct page *page; gfp_t gfp_flags = GFP_NOFS; if (!f2fs_encrypted_file(inode)) @@ -2527,12 +2537,12 @@ retry_encrypt: return PTR_ERR(fio->encrypted_page); } - mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); - if (mpage) { - if (PageUptodate(mpage)) - memcpy(page_address(mpage), + mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr); + if (!IS_ERR(mfolio)) { + if (folio_test_uptodate(mfolio)) + memcpy(folio_address(mfolio), page_address(fio->encrypted_page), PAGE_SIZE); - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); } return 0; } @@ -2631,7 +2641,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) static inline bool need_inplace_update(struct f2fs_io_info *fio) { - struct inode *inode = fio->page->mapping->host; + struct inode *inode = fio_inode(fio); if (f2fs_should_update_outplace(inode, fio)) return false; @@ -2641,7 +2651,7 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio) int f2fs_do_write_data_page(struct f2fs_io_info *fio) { - struct folio *folio = page_folio(fio->page); + struct folio *folio = fio->folio; struct inode *inode = folio->mapping->host; struct dnode_of_data dn; struct node_info ni; @@ -2651,7 +2661,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) /* Use COW inode to make dnode_of_data for atomic write */ atomic_commit = f2fs_is_atomic_file(inode) && - page_private_atomic(folio_page(folio, 0)); + folio_test_f2fs_atomic(folio); if (atomic_commit) set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); else @@ -2682,7 +2692,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) /* This page is already truncated */ if (fio->old_blkaddr == NULL_ADDR) { folio_clear_uptodate(folio); - clear_page_private_gcing(folio_page(folio, 0)); + folio_clear_f2fs_gcing(folio); goto out_writepage; } got_it: @@ -2752,7 +2762,7 @@ got_it: trace_f2fs_do_write_data_page(folio, OPU); set_inode_flag(inode, FI_APPEND_WRITE); if (atomic_commit) - clear_page_private_atomic(folio_page(folio, 0)); + folio_clear_f2fs_atomic(folio); out_writepage: f2fs_put_dnode(&dn); out: @@ -2770,7 +2780,6 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted, bool allow_balance) { struct inode *inode = folio->mapping->host; - struct page *page = folio_page(folio, 0); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long)i_size) @@ -2787,7 +2796,7 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), .old_blkaddr = NULL_ADDR, - .page = page, + .folio = folio, .encrypted_page = NULL, .submitted = 0, .compr_blocks = compr_blocks, @@ -2855,13 +2864,7 @@ write: goto done; } - if (!wbc->for_reclaim) - need_balance_fs = true; - else if (has_not_enough_free_secs(sbi, 0, 0)) - goto redirty_out; - else - set_inode_flag(inode, FI_HOT_DATA); - + need_balance_fs = true; err = -EAGAIN; if (f2fs_has_inline_data(inode)) { err = f2fs_write_inline_data(inode, folio); @@ -2895,14 +2898,7 @@ out: inode_dec_dirty_pages(inode); if (err) { folio_clear_uptodate(folio); - clear_page_private_gcing(page); - } - - if (wbc->for_reclaim) { - f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA); - clear_inode_flag(inode, FI_HOT_DATA); - f2fs_remove_dirty_inode(inode); - submitted = NULL; + folio_clear_f2fs_gcing(folio); } folio_unlock(folio); if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && @@ -2929,9 +2925,9 @@ redirty_out: * file_write_and_wait_range() will see EIO error, which is critical * to return value of fsync() followed by atomic_write failure to user. */ - if (!err || wbc->for_reclaim) - return AOP_WRITEPAGE_ACTIVATE; folio_unlock(folio); + if (!err) + return 1; return err; } @@ -3128,7 +3124,7 @@ continue_unlock: if (folio_test_writeback(folio)) { if (wbc->sync_mode == WB_SYNC_NONE) goto continue_unlock; - f2fs_wait_on_page_writeback(&folio->page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); } if (!folio_clear_dirty_for_io(folio)) @@ -3145,8 +3141,6 @@ continue_unlock: ret = f2fs_write_single_data_page(folio, &submitted, &bio, &last_block, wbc, io_type, 0, true); - if (ret == AOP_WRITEPAGE_ACTIVATE) - folio_unlock(folio); #ifdef CONFIG_F2FS_FS_COMPRESSION result: #endif @@ -3158,7 +3152,7 @@ result: * keep nr_to_write, since vfs uses this to * get # of written pages. */ - if (ret == AOP_WRITEPAGE_ACTIVATE) { + if (ret == 1) { ret = 0; goto next; } else if (ret == -EAGAIN) { @@ -3352,7 +3346,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, struct inode *inode = folio->mapping->host; pgoff_t index = folio->index; struct dnode_of_data dn; - struct page *ipage; + struct folio *ifolio; bool locked = false; int flag = F2FS_GET_BLOCK_PRE_AIO; int err = 0; @@ -3377,23 +3371,23 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, restart: /* check inline_data */ - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) { + err = PTR_ERR(ifolio); goto unlock_out; } - set_new_dnode(&dn, inode, ipage, ipage, 0); + set_new_dnode(&dn, inode, ifolio, ifolio, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA(inode)) { - f2fs_do_read_inline_data(folio, ipage); + f2fs_do_read_inline_data(folio, ifolio); set_inode_flag(inode, FI_DATA_EXIST); if (inode->i_nlink) - set_page_private_inline(ipage); + folio_set_f2fs_inline(ifolio); goto out; } - err = f2fs_convert_inline_page(&dn, folio_page(folio, 0)); + err = f2fs_convert_inline_folio(&dn, folio); if (err || dn.data_blkaddr != NULL_ADDR) goto out; } @@ -3437,14 +3431,14 @@ static int __find_data_block(struct inode *inode, pgoff_t index, block_t *blk_addr) { struct dnode_of_data dn; - struct page *ipage; + struct folio *ifolio; int err = 0; - ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); - set_new_dnode(&dn, inode, ipage, ipage, 0); + set_new_dnode(&dn, inode, ifolio, ifolio, 0); if (!f2fs_lookup_read_extent_cache_block(inode, index, &dn.data_blkaddr)) { @@ -3465,17 +3459,17 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index, { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; - struct page *ipage; + struct folio *ifolio; int err = 0; f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) { + err = PTR_ERR(ifolio); goto unlock_out; } - set_new_dnode(&dn, inode, ipage, ipage, 0); + set_new_dnode(&dn, inode, ifolio, ifolio, 0); if (!f2fs_lookup_read_extent_cache_block(dn.inode, index, &dn.data_blkaddr)) @@ -3533,8 +3527,10 @@ reserve_block: return 0; } -static int f2fs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct folio **foliop, void **fsdata) +static int f2fs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, struct folio **foliop, + void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -3623,7 +3619,7 @@ repeat: } } - f2fs_wait_on_page_writeback(&folio->page, DATA, false, true); + f2fs_folio_wait_writeback(folio, DATA, false, true); if (len == folio_size(folio) || folio_test_uptodate(folio)) return 0; @@ -3670,7 +3666,7 @@ fail: return err; } -static int f2fs_write_end(struct file *file, +static int f2fs_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) @@ -3710,7 +3706,7 @@ static int f2fs_write_end(struct file *file, folio_mark_dirty(folio); if (f2fs_is_atomic_file(inode)) - set_page_private_atomic(folio_page(folio, 0)); + folio_set_f2fs_atomic(folio); if (pos + copied > i_size_read(inode) && !f2fs_verity_in_progress(inode)) { @@ -3745,7 +3741,7 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) f2fs_remove_dirty_inode(inode); } } - clear_page_private_all(&folio->page); + folio_detach_private(folio); } bool f2fs_release_folio(struct folio *folio, gfp_t wait) @@ -3754,7 +3750,7 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait) if (folio_test_dirty(folio)) return false; - clear_page_private_all(&folio->page); + folio_detach_private(folio); return true; } @@ -3878,18 +3874,18 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, set_inode_flag(inode, FI_SKIP_WRITES); for (blkofs = 0; blkofs <= blkofs_end; blkofs++) { - struct page *page; + struct folio *folio; unsigned int blkidx = secidx * blk_per_sec + blkofs; - page = f2fs_get_lock_data_page(inode, blkidx, true); - if (IS_ERR(page)) { + folio = f2fs_get_lock_data_folio(inode, blkidx, true); + if (IS_ERR(folio)) { f2fs_up_write(&sbi->pin_sem); - ret = PTR_ERR(page); + ret = PTR_ERR(folio); goto done; } - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } clear_inode_flag(inode, FI_SKIP_WRITES); @@ -3966,7 +3962,7 @@ retry: if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec || nr_pblocks % blks_per_sec || - !f2fs_valid_pinned_area(sbi, pblock)) { + f2fs_is_sequential_zone_area(sbi, pblock)) { bool last_extent = false; not_aligned++; @@ -4172,7 +4168,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { - struct f2fs_map_blocks map = {}; + struct f2fs_map_blocks map = { NULL, }; pgoff_t next_pgofs = 0; int err; @@ -4181,6 +4177,10 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, map.m_next_pgofs = &next_pgofs; map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), inode->i_write_hint); + if (flags & IOMAP_WRITE && iomap->private) { + map.m_last_pblk = (unsigned long)iomap->private; + iomap->private = NULL; + } /* * If the blocks being overwritten are already allocated, @@ -4219,6 +4219,9 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, iomap->flags |= IOMAP_F_MERGED; iomap->bdev = map.m_bdev; iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk); + + if (flags & IOMAP_WRITE && map.m_last_pblk) + iomap->private = (void *)map.m_last_pblk; } else { if (flags & IOMAP_WRITE) return -ENOTBLK; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 16c2dfb4f595..43a83bbd3bc5 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -21,7 +21,7 @@ #include "gc.h" static LIST_HEAD(f2fs_stat_list); -static DEFINE_RAW_SPINLOCK(f2fs_stat_lock); +static DEFINE_SPINLOCK(f2fs_stat_lock); #ifdef CONFIG_DEBUG_FS static struct dentry *f2fs_debugfs_root; #endif @@ -91,7 +91,7 @@ static void update_multidevice_stats(struct f2fs_sb_info *sbi) seg_blks = get_seg_entry(sbi, j)->valid_blocks; /* update segment stats */ - if (IS_CURSEG(sbi, j)) + if (is_curseg(sbi, j)) dev_stats[i].devstats[0][DEVSTAT_INUSE]++; else if (seg_blks == BLKS_PER_SEG(sbi)) dev_stats[i].devstats[0][DEVSTAT_FULL]++; @@ -109,7 +109,7 @@ static void update_multidevice_stats(struct f2fs_sb_info *sbi) sec_blks = get_sec_entry(sbi, j)->valid_blocks; /* update section stats */ - if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, j))) + if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, j))) dev_stats[i].devstats[1][DEVSTAT_INUSE]++; else if (sec_blks == BLKS_PER_SEC(sbi)) dev_stats[i].devstats[1][DEVSTAT_FULL]++; @@ -439,9 +439,8 @@ static int stat_show(struct seq_file *s, void *v) { struct f2fs_stat_info *si; int i = 0, j = 0; - unsigned long flags; - raw_spin_lock_irqsave(&f2fs_stat_lock, flags); + spin_lock(&f2fs_stat_lock); list_for_each_entry(si, &f2fs_stat_list, stat_list) { struct f2fs_sb_info *sbi = si->sbi; @@ -753,7 +752,7 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, " - paged : %llu KB\n", si->page_mem >> 10); } - raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags); + spin_unlock(&f2fs_stat_lock); return 0; } @@ -765,7 +764,6 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_stat_info *si; struct f2fs_dev_stats *dev_stats; - unsigned long flags; int i; si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL); @@ -817,9 +815,9 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) atomic_set(&sbi->max_aw_cnt, 0); - raw_spin_lock_irqsave(&f2fs_stat_lock, flags); + spin_lock(&f2fs_stat_lock); list_add_tail(&si->stat_list, &f2fs_stat_list); - raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags); + spin_unlock(&f2fs_stat_lock); return 0; } @@ -827,11 +825,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { struct f2fs_stat_info *si = F2FS_STAT(sbi); - unsigned long flags; - raw_spin_lock_irqsave(&f2fs_stat_lock, flags); + spin_lock(&f2fs_stat_lock); list_del(&si->stat_list); - raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags); + spin_unlock(&f2fs_stat_lock); kfree(si->dev_stats); kfree(si); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 5a63ff0df03b..fffd7749d6d1 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -173,7 +173,7 @@ static unsigned long dir_block_index(unsigned int level, } static struct f2fs_dir_entry *find_in_block(struct inode *dir, - struct page *dentry_page, + struct folio *dentry_folio, const struct f2fs_filename *fname, int *max_slots, bool use_hash) @@ -181,7 +181,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_ptr d; - dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); + dentry_blk = folio_address(dentry_folio); make_dentry_ptr_block(dir, &d, dentry_blk); return f2fs_find_target_dentry(&d, fname, max_slots, use_hash); @@ -260,13 +260,12 @@ found: static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, const struct f2fs_filename *fname, - struct page **res_page, + struct folio **res_folio, bool use_hash) { int s = GET_DENTRY_SLOTS(fname->disk_name.len); unsigned int nbucket, nblock; unsigned int bidx, end_block, bucket_no; - struct page *dentry_page; struct f2fs_dir_entry *de = NULL; pgoff_t next_pgofs; bool room = false; @@ -284,31 +283,32 @@ start_find_bucket: while (bidx < end_block) { /* no need to allocate new dentry pages to all the indices */ - dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs); - if (IS_ERR(dentry_page)) { - if (PTR_ERR(dentry_page) == -ENOENT) { + struct folio *dentry_folio; + dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs); + if (IS_ERR(dentry_folio)) { + if (PTR_ERR(dentry_folio) == -ENOENT) { room = true; bidx = next_pgofs; continue; } else { - *res_page = dentry_page; + *res_folio = dentry_folio; break; } } - de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash); + de = find_in_block(dir, dentry_folio, fname, &max_slots, use_hash); if (IS_ERR(de)) { - *res_page = ERR_CAST(de); + *res_folio = ERR_CAST(de); de = NULL; break; } else if (de) { - *res_page = dentry_page; + *res_folio = dentry_folio; break; } if (max_slots >= s) room = true; - f2fs_put_page(dentry_page, 0); + f2fs_folio_put(dentry_folio, false); bidx++; } @@ -329,7 +329,7 @@ start_find_bucket: struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page) + struct folio **res_folio) { unsigned long npages = dir_blocks(dir); struct f2fs_dir_entry *de = NULL; @@ -337,13 +337,13 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, unsigned int level; bool use_hash = true; - *res_page = NULL; + *res_folio = NULL; #if IS_ENABLED(CONFIG_UNICODE) start_find_entry: #endif if (f2fs_has_inline_dentry(dir)) { - de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash); + de = f2fs_find_in_inline_dir(dir, fname, res_folio, use_hash); goto out; } @@ -359,14 +359,15 @@ start_find_entry: } for (level = 0; level < max_depth; level++) { - de = find_in_level(dir, level, fname, res_page, use_hash); - if (de || IS_ERR(*res_page)) + de = find_in_level(dir, level, fname, res_folio, use_hash); + if (de || IS_ERR(*res_folio)) break; } out: #if IS_ENABLED(CONFIG_UNICODE) - if (IS_CASEFOLDED(dir) && !de && use_hash) { + if (!sb_no_casefold_compat_fallback(dir->i_sb) && + IS_CASEFOLDED(dir) && !de && use_hash) { use_hash = false; goto start_find_entry; } @@ -384,7 +385,7 @@ out: * Entry is guaranteed to be valid. */ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, - const struct qstr *child, struct page **res_page) + const struct qstr *child, struct folio **res_folio) { struct f2fs_dir_entry *de = NULL; struct f2fs_filename fname; @@ -393,67 +394,67 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, err = f2fs_setup_filename(dir, child, 1, &fname); if (err) { if (err == -ENOENT) - *res_page = NULL; + *res_folio = NULL; else - *res_page = ERR_PTR(err); + *res_folio = ERR_PTR(err); return NULL; } - de = __f2fs_find_entry(dir, &fname, res_page); + de = __f2fs_find_entry(dir, &fname, res_folio); f2fs_free_filename(&fname); return de; } -struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) +struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f) { - return f2fs_find_entry(dir, &dotdot_name, p); + return f2fs_find_entry(dir, &dotdot_name, f); } ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, - struct page **page) + struct folio **folio) { ino_t res = 0; struct f2fs_dir_entry *de; - de = f2fs_find_entry(dir, qstr, page); + de = f2fs_find_entry(dir, qstr, folio); if (de) { res = le32_to_cpu(de->ino); - f2fs_put_page(*page, 0); + f2fs_folio_put(*folio, false); } return res; } void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, - struct page *page, struct inode *inode) + struct folio *folio, struct inode *inode) { enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA; - lock_page(page); - f2fs_wait_on_page_writeback(page, type, true, true); + folio_lock(folio); + f2fs_folio_wait_writeback(folio, type, true, true); de->ino = cpu_to_le32(inode->i_ino); de->file_type = fs_umode_to_ftype(inode->i_mode); - set_page_dirty(page); + folio_mark_dirty(folio); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); f2fs_mark_inode_dirty_sync(dir, false); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } static void init_dent_inode(struct inode *dir, struct inode *inode, const struct f2fs_filename *fname, - struct page *ipage) + struct folio *ifolio) { struct f2fs_inode *ri; if (!fname) /* tmpfile case? */ return; - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); - /* copy name info. to this inode page */ - ri = F2FS_INODE(ipage); + /* copy name info. to this inode folio */ + ri = F2FS_INODE(ifolio); ri->i_namelen = cpu_to_le32(fname->disk_name.len); memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len); if (IS_ENCRYPTED(dir)) { @@ -474,7 +475,7 @@ static void init_dent_inode(struct inode *dir, struct inode *inode, file_lost_pino(inode); } } - set_page_dirty(ipage); + folio_mark_dirty(ifolio); } void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, @@ -491,72 +492,73 @@ void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, } static int make_empty_dir(struct inode *inode, - struct inode *parent, struct page *page) + struct inode *parent, struct folio *folio) { - struct page *dentry_page; + struct folio *dentry_folio; struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_ptr d; if (f2fs_has_inline_dentry(inode)) - return f2fs_make_empty_inline_dir(inode, parent, page); + return f2fs_make_empty_inline_dir(inode, parent, folio); - dentry_page = f2fs_get_new_data_page(inode, page, 0, true); - if (IS_ERR(dentry_page)) - return PTR_ERR(dentry_page); + dentry_folio = f2fs_get_new_data_folio(inode, folio, 0, true); + if (IS_ERR(dentry_folio)) + return PTR_ERR(dentry_folio); - dentry_blk = page_address(dentry_page); + dentry_blk = folio_address(dentry_folio); make_dentry_ptr_block(NULL, &d, dentry_blk); f2fs_do_make_empty_dir(inode, parent, &d); - set_page_dirty(dentry_page); - f2fs_put_page(dentry_page, 1); + folio_mark_dirty(dentry_folio); + f2fs_folio_put(dentry_folio, true); return 0; } -struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct f2fs_filename *fname, struct page *dpage) +struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, + const struct f2fs_filename *fname, struct folio *dfolio) { - struct page *page; + struct folio *folio; int err; if (is_inode_flag_set(inode, FI_NEW_INODE)) { - page = f2fs_new_inode_page(inode); - if (IS_ERR(page)) - return page; + folio = f2fs_new_inode_folio(inode); + if (IS_ERR(folio)) + return folio; if (S_ISDIR(inode->i_mode)) { /* in order to handle error case */ - get_page(page); - err = make_empty_dir(inode, dir, page); + folio_get(folio); + err = make_empty_dir(inode, dir, folio); if (err) { - lock_page(page); + folio_lock(folio); goto put_error; } - put_page(page); + folio_put(folio); } - err = f2fs_init_acl(inode, dir, page, dpage); + err = f2fs_init_acl(inode, dir, folio, dfolio); if (err) goto put_error; err = f2fs_init_security(inode, dir, - fname ? fname->usr_fname : NULL, page); + fname ? fname->usr_fname : NULL, + folio); if (err) goto put_error; if (IS_ENCRYPTED(inode)) { - err = fscrypt_set_context(inode, page); + err = fscrypt_set_context(inode, folio); if (err) goto put_error; } } else { - page = f2fs_get_inode_page(F2FS_I_SB(dir), inode->i_ino); - if (IS_ERR(page)) - return page; + folio = f2fs_get_inode_folio(F2FS_I_SB(dir), inode->i_ino); + if (IS_ERR(folio)) + return folio; } - init_dent_inode(dir, inode, fname, page); + init_dent_inode(dir, inode, fname, folio); /* * This file should be checkpointed during fsync. @@ -573,12 +575,12 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino); f2fs_i_links_write(inode, true); } - return page; + return folio; put_error: clear_nlink(inode); - f2fs_update_inode(inode, page); - f2fs_put_page(page, 1); + f2fs_update_inode(inode, folio); + f2fs_folio_put(folio, true); return ERR_PTR(err); } @@ -620,14 +622,14 @@ next: goto next; } -bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, +bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio, const struct f2fs_filename *fname) { struct f2fs_dentry_ptr d; unsigned int bit_pos; int slots = GET_DENTRY_SLOTS(fname->disk_name.len); - make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage)); + make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ifolio)); bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max); @@ -664,10 +666,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, unsigned int current_depth; unsigned long bidx, block; unsigned int nbucket, nblock; - struct page *dentry_page = NULL; + struct folio *dentry_folio = NULL; struct f2fs_dentry_block *dentry_blk = NULL; struct f2fs_dentry_ptr d; - struct page *page = NULL; + struct folio *folio = NULL; int slots, err = 0; level = 0; @@ -697,30 +699,30 @@ start: (le32_to_cpu(fname->hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { - dentry_page = f2fs_get_new_data_page(dir, NULL, block, true); - if (IS_ERR(dentry_page)) - return PTR_ERR(dentry_page); + dentry_folio = f2fs_get_new_data_folio(dir, NULL, block, true); + if (IS_ERR(dentry_folio)) + return PTR_ERR(dentry_folio); - dentry_blk = page_address(dentry_page); + dentry_blk = folio_address(dentry_folio); bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap, slots, NR_DENTRY_IN_BLOCK); if (bit_pos < NR_DENTRY_IN_BLOCK) goto add_dentry; - f2fs_put_page(dentry_page, 1); + f2fs_folio_put(dentry_folio, true); } /* Move to next level to find the empty slot for new dentry */ ++level; goto start; add_dentry: - f2fs_wait_on_page_writeback(dentry_page, DATA, true, true); + f2fs_folio_wait_writeback(dentry_folio, DATA, true, true); if (inode) { f2fs_down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, fname, NULL); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_init_inode_metadata(inode, dir, fname, NULL); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto fail; } } @@ -729,16 +731,16 @@ add_dentry: f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, bit_pos); - set_page_dirty(dentry_page); + folio_mark_dirty(dentry_folio); if (inode) { f2fs_i_pino_write(inode, dir->i_ino); /* synchronize inode page's data from inode cache */ if (is_inode_flag_set(inode, FI_NEW_INODE)) - f2fs_update_inode(inode, page); + f2fs_update_inode(inode, folio); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } f2fs_update_parent_metadata(dir, inode, current_depth); @@ -746,7 +748,7 @@ fail: if (inode) f2fs_up_write(&F2FS_I(inode)->i_sem); - f2fs_put_page(dentry_page, 1); + f2fs_folio_put(dentry_folio, true); return err; } @@ -780,7 +782,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_filename fname; - struct page *page = NULL; + struct folio *folio = NULL; struct f2fs_dir_entry *de = NULL; int err; @@ -796,14 +798,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, * consistency more. */ if (current != F2FS_I(dir)->task) { - de = __f2fs_find_entry(dir, &fname, &page); + de = __f2fs_find_entry(dir, &fname, &folio); F2FS_I(dir)->task = NULL; } if (de) { - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); err = -EEXIST; - } else if (IS_ERR(page)) { - err = PTR_ERR(page); + } else if (IS_ERR(folio)) { + err = PTR_ERR(folio); } else { err = f2fs_add_dentry(dir, &fname, inode, ino, mode); } @@ -814,16 +816,16 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, int f2fs_do_tmpfile(struct inode *inode, struct inode *dir, struct f2fs_filename *fname) { - struct page *page; + struct folio *folio; int err = 0; f2fs_down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, fname, NULL); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_init_inode_metadata(inode, dir, fname, NULL); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto fail; } - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); clear_inode_flag(inode, FI_NEW_INODE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); @@ -859,13 +861,13 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode) * It only removes the dentry from the dentry page, corresponding name * entry in name page does not need to be touched during deletion. */ -void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, +void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio, struct inode *dir, struct inode *inode) { - struct f2fs_dentry_block *dentry_blk; + struct f2fs_dentry_block *dentry_blk; unsigned int bit_pos; int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); - pgoff_t index = page_folio(page)->index; + pgoff_t index = folio->index; int i; f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); @@ -874,12 +876,12 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO); if (f2fs_has_inline_dentry(dir)) - return f2fs_delete_inline_entry(dentry, page, dir, inode); + return f2fs_delete_inline_entry(dentry, folio, dir, inode); - lock_page(page); - f2fs_wait_on_page_writeback(page, DATA, true, true); + folio_lock(folio); + f2fs_folio_wait_writeback(folio, DATA, true, true); - dentry_blk = page_address(page); + dentry_blk = folio_address(folio); bit_pos = dentry - dentry_blk->dentry; for (i = 0; i < slots; i++) __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); @@ -888,19 +890,19 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, 0); - set_page_dirty(page); + folio_mark_dirty(folio); if (bit_pos == NR_DENTRY_IN_BLOCK && !f2fs_truncate_hole(dir, index, index + 1)) { - f2fs_clear_page_cache_dirty_tag(page_folio(page)); - clear_page_dirty_for_io(page); - ClearPageUptodate(page); - clear_page_private_all(page); + f2fs_clear_page_cache_dirty_tag(folio); + folio_clear_dirty_for_io(folio); + folio_clear_uptodate(folio); + folio_detach_private(folio); inode_dec_dirty_pages(dir); f2fs_remove_dirty_inode(dir); } - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); f2fs_mark_inode_dirty_sync(dir, false); @@ -912,7 +914,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, bool f2fs_empty_dir(struct inode *dir) { unsigned long bidx = 0; - struct page *dentry_page; unsigned int bit_pos; struct f2fs_dentry_block *dentry_blk; unsigned long nblock = dir_blocks(dir); @@ -922,10 +923,11 @@ bool f2fs_empty_dir(struct inode *dir) while (bidx < nblock) { pgoff_t next_pgofs; + struct folio *dentry_folio; - dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs); - if (IS_ERR(dentry_page)) { - if (PTR_ERR(dentry_page) == -ENOENT) { + dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs); + if (IS_ERR(dentry_folio)) { + if (PTR_ERR(dentry_folio) == -ENOENT) { bidx = next_pgofs; continue; } else { @@ -933,7 +935,7 @@ bool f2fs_empty_dir(struct inode *dir) } } - dentry_blk = page_address(dentry_page); + dentry_blk = folio_address(dentry_folio); if (bidx == 0) bit_pos = 2; else @@ -942,7 +944,7 @@ bool f2fs_empty_dir(struct inode *dir) NR_DENTRY_IN_BLOCK, bit_pos); - f2fs_put_page(dentry_page, 0); + f2fs_folio_put(dentry_folio, false); if (bit_pos < NR_DENTRY_IN_BLOCK) return false; @@ -1041,7 +1043,6 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) struct inode *inode = file_inode(file); unsigned long npages = dir_blocks(inode); struct f2fs_dentry_block *dentry_blk = NULL; - struct page *dentry_page = NULL; struct file_ra_state *ra = &file->f_ra; loff_t start_pos = ctx->pos; unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); @@ -1065,6 +1066,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) } for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) { + struct folio *dentry_folio; pgoff_t next_pgofs; /* allow readdir() to be interrupted */ @@ -1079,9 +1081,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) page_cache_sync_readahead(inode->i_mapping, ra, file, n, min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); - dentry_page = f2fs_find_data_page(inode, n, &next_pgofs); - if (IS_ERR(dentry_page)) { - err = PTR_ERR(dentry_page); + dentry_folio = f2fs_find_data_folio(inode, n, &next_pgofs); + if (IS_ERR(dentry_folio)) { + err = PTR_ERR(dentry_folio); if (err == -ENOENT) { err = 0; n = next_pgofs; @@ -1091,18 +1093,15 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) } } - dentry_blk = page_address(dentry_page); + dentry_blk = folio_address(dentry_folio); make_dentry_ptr_block(inode, &d, dentry_blk); err = f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr); - if (err) { - f2fs_put_page(dentry_page, 0); + f2fs_folio_put(dentry_folio, false); + if (err) break; - } - - f2fs_put_page(dentry_page, 0); n++; } diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 347b3b647834..199c1e7a83ef 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -19,10 +19,10 @@ #include "node.h" #include <trace/events/f2fs.h> -bool sanity_check_extent_cache(struct inode *inode, struct page *ipage) +bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext; + struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext; struct extent_info ei; int devi; @@ -407,21 +407,21 @@ static void __drop_largest_extent(struct extent_tree *et, } } -void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage) +void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct extent_tree_info *eti = &sbi->extent_tree[EX_READ]; - struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext; + struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext; struct extent_tree *et; struct extent_node *en; - struct extent_info ei; + struct extent_info ei = {0}; if (!__may_extent_tree(inode, EX_READ)) { /* drop largest read extent */ if (i_ext->len) { - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); i_ext->len = 0; - set_page_dirty(ipage); + folio_mark_dirty(ifolio); } set_inode_flag(inode, FI_NO_EXTENT); return; @@ -934,7 +934,7 @@ static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type typ if (!__may_extent_tree(dn->inode, type)) return; - ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + + ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), dn->inode) + dn->ofs_in_node; ei.len = 1; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index f1576dc6ec67..46be7560548c 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -63,16 +63,25 @@ enum { FAULT_BLKADDR_CONSISTENCE, FAULT_NO_SEGMENT, FAULT_INCONSISTENT_FOOTER, + FAULT_TIMEOUT, + FAULT_VMALLOC, FAULT_MAX, }; -#ifdef CONFIG_F2FS_FAULT_INJECTION -#define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0)) +/* indicate which option to update */ +enum fault_option { + FAULT_RATE = 1, /* only update fault rate */ + FAULT_TYPE = 2, /* only update fault type */ + FAULT_ALL = 4, /* reset all fault injection options/stats */ +}; +#ifdef CONFIG_F2FS_FAULT_INJECTION struct f2fs_fault_info { atomic_t inject_ops; int inject_rate; unsigned int inject_type; + /* Used to account total count of injection for each type */ + unsigned int inject_count[FAULT_MAX]; }; extern const char *f2fs_fault_name[FAULT_MAX]; @@ -317,7 +326,7 @@ struct inode_entry { struct fsync_node_entry { struct list_head list; /* list head */ - struct page *page; /* warm node page pointer */ + struct folio *folio; /* warm node folio pointer */ unsigned int seq_id; /* sequence id */ }; @@ -377,7 +386,7 @@ struct discard_cmd { struct rb_node rb_node; /* rb node located in rb-tree */ struct discard_info di; /* discard info */ struct list_head list; /* command list */ - struct completion wait; /* compleation */ + struct completion wait; /* completion */ struct block_device *bdev; /* bdev */ unsigned short ref; /* reference count */ unsigned char state; /* state */ @@ -606,6 +615,9 @@ enum { /* congestion wait timeout value, default: 20ms */ #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) +/* timeout value injected, default: 1000ms */ +#define DEFAULT_FAULT_TIMEOUT (msecs_to_jiffies(1000)) + /* maximum retry quota flush count */ #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 @@ -720,6 +732,7 @@ struct f2fs_map_blocks { block_t m_lblk; unsigned int m_len; unsigned int m_flags; + unsigned long m_last_pblk; /* last allocated block, only used for DIO in LFS mode */ pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ pgoff_t *m_next_extent; /* point to next possible extent */ int m_seg_type; @@ -821,6 +834,7 @@ enum { FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */ FI_ATOMIC_REPLACE, /* indicate atomic replace */ FI_OPENED_FILE, /* indicate file has been opened */ + FI_DONATE_FINISHED, /* indicate page donation of file has been finished */ FI_MAX, /* max flag, never be used */ }; @@ -862,6 +876,7 @@ struct f2fs_inode_info { /* linked in global inode list for cache donation */ struct list_head gdonate_list; pgoff_t donate_start, donate_end; /* inclusive */ + atomic_t open_count; /* # of open files */ struct task_struct *atomic_write_task; /* store atomic write task */ struct extent_tree *extent_tree[NR_EXTENT_CACHES]; @@ -994,11 +1009,11 @@ struct f2fs_nm_info { */ struct dnode_of_data { struct inode *inode; /* vfs inode pointer */ - struct page *inode_page; /* its inode page, NULL is possible */ - struct page *node_page; /* cached direct node page */ + struct folio *inode_folio; /* its inode folio, NULL is possible */ + struct folio *node_folio; /* cached direct node folio */ nid_t nid; /* node id of the direct node block */ unsigned int ofs_in_node; /* data offset in the node page */ - bool inode_page_locked; /* inode page is locked or not */ + bool inode_folio_locked; /* inode folio is locked or not */ bool node_changed; /* is node block changed */ char cur_level; /* level of hole node page */ char max_level; /* level of current page located */ @@ -1006,12 +1021,12 @@ struct dnode_of_data { }; static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, - struct page *ipage, struct page *npage, nid_t nid) + struct folio *ifolio, struct folio *nfolio, nid_t nid) { memset(dn, 0, sizeof(*dn)); dn->inode = inode; - dn->inode_page = ipage; - dn->node_page = npage; + dn->inode_folio = ifolio; + dn->node_folio = nfolio; dn->nid = nid; } @@ -1110,8 +1125,8 @@ struct f2fs_sm_info { * f2fs monitors the number of several block types such as on-writeback, * dirty dentry blocks, dirty node blocks, and dirty meta blocks. */ -#define WB_DATA_TYPE(p, f) \ - (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) +#define WB_DATA_TYPE(folio, f) \ + (f || f2fs_is_cp_guaranteed(folio) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) enum count_type { F2FS_DIRTY_DENTS, F2FS_DIRTY_DATA, @@ -1227,7 +1242,10 @@ struct f2fs_io_info { blk_opf_t op_flags; /* req_flag_bits */ block_t new_blkaddr; /* new block address to be written */ block_t old_blkaddr; /* old block address before Cow */ - struct page *page; /* page to be written */ + union { + struct page *page; /* page to be written */ + struct folio *folio; + }; struct page *encrypted_page; /* encrypted page */ struct page *compressed_page; /* compressed page */ struct list_head list; /* serialize IOs */ @@ -1273,7 +1291,7 @@ struct f2fs_bio_info { struct f2fs_dev_info { struct file *bdev_file; struct block_device *bdev; - char path[MAX_PATH_LEN]; + char path[MAX_PATH_LEN + 1]; unsigned int total_segments; block_t start_blk; block_t end_blk; @@ -1414,7 +1432,7 @@ enum { enum { MEMORY_MODE_NORMAL, /* memory mode for normal devices */ - MEMORY_MODE_LOW, /* memory mode for low memry devices */ + MEMORY_MODE_LOW, /* memory mode for low memory devices */ }; enum errors_option { @@ -1478,7 +1496,7 @@ enum compress_flag { #define COMPRESS_DATA_RESERVED_SIZE 4 struct compress_data { __le32 clen; /* compressed data size */ - __le32 chksum; /* compressed data chksum */ + __le32 chksum; /* compressed data checksum */ __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ u8 cdata[]; /* compressed data */ }; @@ -1523,6 +1541,7 @@ struct compress_io_ctx { struct decompress_io_ctx { u32 magic; /* magic number to indicate page is compressed */ struct inode *inode; /* inode the context belong to */ + struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ pgoff_t cluster_idx; /* cluster index number */ unsigned int cluster_size; /* page count in cluster */ unsigned int log_cluster_size; /* log of cluster size */ @@ -1563,6 +1582,7 @@ struct decompress_io_ctx { bool failed; /* IO error occurred before decompression? */ bool need_verity; /* need fs-verity verification after decompression? */ + unsigned char compress_algorithm; /* backup algorithm type */ void *private; /* payload buffer for specified decompression algorithm */ void *private2; /* extra payload buffer */ struct work_struct verity_work; /* work to verify the decompressed pages */ @@ -1711,6 +1731,9 @@ struct f2fs_sb_info { /* for skip statistic */ unsigned long long skipped_gc_rwsem; /* FG_GC only */ + /* free sections reserved for pinned file */ + unsigned int reserved_pin_section; + /* threshold for gc trials on pinned files */ unsigned short gc_pin_file_threshold; struct f2fs_rwsem pin_sem; @@ -1780,7 +1803,7 @@ struct f2fs_sb_info { unsigned int dirty_device; /* for checkpoint data flush */ spinlock_t dev_lock; /* protect dirty_device */ bool aligned_blksize; /* all devices has the same logical blksize */ - unsigned int first_zoned_segno; /* first zoned segno */ + unsigned int first_seq_zone_segno; /* first segno in sequential zone */ /* For write statistics */ u64 sectors_written_start; @@ -1902,6 +1925,7 @@ static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type, atomic_inc(&ffi->inject_ops); if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { atomic_set(&ffi->inject_ops, 0); + ffi->inject_count[type]++; f2fs_info_ratelimited(sbi, "inject %s in %s of %pS", f2fs_fault_name[type], func, parent_func); return true; @@ -1963,28 +1987,20 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, /* * Inline functions */ -static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, - const void *address, unsigned int length) +static inline u32 __f2fs_crc32(u32 crc, const void *address, + unsigned int length) { return crc32(crc, address, length); } -static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, - unsigned int length) -{ - return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); -} - -static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, - void *buf, size_t buf_size) +static inline u32 f2fs_crc32(const void *address, unsigned int length) { - return f2fs_crc32(sbi, buf, buf_size) == blk_crc; + return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length); } -static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, - const void *address, unsigned int length) +static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length) { - return __f2fs_crc32(sbi, crc, address, length); + return __f2fs_crc32(crc, address, length); } static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) @@ -2007,16 +2023,11 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) return F2FS_I_SB(mapping->host); } -static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio) +static inline struct f2fs_sb_info *F2FS_F_SB(const struct folio *folio) { return F2FS_M_SB(folio->mapping); } -static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) -{ - return F2FS_F_SB(page_folio(page)); -} - static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) { return (struct f2fs_super_block *)(sbi->raw_super); @@ -2037,14 +2048,14 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) return (struct f2fs_checkpoint *)(sbi->ckpt); } -static inline struct f2fs_node *F2FS_NODE(const struct page *page) +static inline struct f2fs_node *F2FS_NODE(const struct folio *folio) { - return (struct f2fs_node *)page_address(page); + return (struct f2fs_node *)folio_address(folio); } -static inline struct f2fs_inode *F2FS_INODE(struct page *page) +static inline struct f2fs_inode *F2FS_INODE(const struct folio *folio) { - return &((struct f2fs_node *)page_address(page))->i; + return &((struct f2fs_node *)folio_address(folio))->i; } static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) @@ -2082,6 +2093,16 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) return sbi->node_inode->i_mapping; } +static inline bool is_meta_folio(struct folio *folio) +{ + return folio->mapping == META_MAPPING(F2FS_F_SB(folio)); +} + +static inline bool is_node_folio(struct folio *folio) +{ + return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio)); +} + static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) { return test_bit(type, &sbi->s_flag); @@ -2437,6 +2458,13 @@ release_quota: } #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ +static inline bool folio_test_f2fs_##name(const struct folio *folio) \ +{ \ + unsigned long priv = (unsigned long)folio->private; \ + unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ + (1UL << PAGE_PRIVATE_##flagname); \ + return (priv & v) == v; \ +} \ static inline bool page_private_##name(struct page *page) \ { \ return PagePrivate(page) && \ @@ -2445,6 +2473,17 @@ static inline bool page_private_##name(struct page *page) \ } #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ +static inline void folio_set_f2fs_##name(struct folio *folio) \ +{ \ + unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \ + (1UL << PAGE_PRIVATE_##flagname); \ + if (!folio->private) \ + folio_attach_private(folio, (void *)v); \ + else { \ + v |= (unsigned long)folio->private; \ + folio->private = (void *)v; \ + } \ +} \ static inline void set_page_private_##name(struct page *page) \ { \ if (!PagePrivate(page)) \ @@ -2454,6 +2493,16 @@ static inline void set_page_private_##name(struct page *page) \ } #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ +static inline void folio_clear_f2fs_##name(struct folio *folio) \ +{ \ + unsigned long v = (unsigned long)folio->private; \ + \ + v &= ~(1UL << PAGE_PRIVATE_##flagname); \ + if (v == (1UL << PAGE_PRIVATE_NOT_POINTER)) \ + folio_detach_private(folio); \ + else \ + folio->private = (void *)v; \ +} \ static inline void clear_page_private_##name(struct page *page) \ { \ clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ @@ -2476,39 +2525,23 @@ PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); -static inline unsigned long get_page_private_data(struct page *page) +static inline unsigned long folio_get_f2fs_data(struct folio *folio) { - unsigned long data = page_private(page); + unsigned long data = (unsigned long)folio->private; if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) return 0; return data >> PAGE_PRIVATE_MAX; } -static inline void set_page_private_data(struct page *page, unsigned long data) +static inline void folio_set_f2fs_data(struct folio *folio, unsigned long data) { - if (!PagePrivate(page)) - attach_page_private(page, (void *)0); - set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); - page_private(page) |= data << PAGE_PRIVATE_MAX; -} + data = (1UL << PAGE_PRIVATE_NOT_POINTER) | (data << PAGE_PRIVATE_MAX); -static inline void clear_page_private_data(struct page *page) -{ - page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); - if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) - detach_page_private(page); -} - -static inline void clear_page_private_all(struct page *page) -{ - clear_page_private_data(page); - clear_page_private_reference(page); - clear_page_private_gcing(page); - clear_page_private_inline(page); - clear_page_private_atomic(page); - - f2fs_bug_on(F2FS_P_SB(page), page_private(page)); + if (!folio_test_private(folio)) + folio_attach_private(folio, (void *)data); + else + folio->private = (void *)((unsigned long)folio->private | data); } static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, @@ -2518,8 +2551,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; spin_lock(&sbi->stat_lock); - f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); - sbi->total_valid_block_count -= (block_t)count; + if (unlikely(sbi->total_valid_block_count < count)) { + f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u", + sbi->total_valid_block_count, inode->i_ino, count); + sbi->total_valid_block_count = 0; + set_sbi_flag(sbi, SBI_NEED_FSCK); + } else { + sbi->total_valid_block_count -= count; + } if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks = min(sbi->reserved_blocks, @@ -2849,14 +2888,14 @@ static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping, return folio; } -static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, - pgoff_t index, bool for_write) +static inline struct folio *f2fs_filemap_get_folio( + struct address_space *mapping, pgoff_t index, + fgf_t fgp_flags, gfp_t gfp_mask) { - struct folio *folio = f2fs_grab_cache_folio(mapping, index, for_write); + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) + return ERR_PTR(-ENOMEM); - if (IS_ERR(folio)) - return NULL; - return &folio->page; + return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask); } static inline struct page *f2fs_pagecache_get_page( @@ -2871,7 +2910,7 @@ static inline struct page *f2fs_pagecache_get_page( static inline void f2fs_folio_put(struct folio *folio, bool unlock) { - if (!folio) + if (IS_ERR_OR_NULL(folio)) return; if (unlock) { @@ -2890,12 +2929,12 @@ static inline void f2fs_put_page(struct page *page, int unlock) static inline void f2fs_put_dnode(struct dnode_of_data *dn) { - if (dn->node_page) - f2fs_put_page(dn->node_page, 1); - if (dn->inode_page && dn->node_page != dn->inode_page) - f2fs_put_page(dn->inode_page, 0); - dn->node_page = NULL; - dn->inode_page = NULL; + if (dn->node_folio) + f2fs_folio_put(dn->node_folio, true); + if (dn->inode_folio && dn->node_folio != dn->inode_folio) + f2fs_folio_put(dn->inode_folio, false); + dn->node_folio = NULL; + dn->inode_folio = NULL; } static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, @@ -2989,9 +3028,9 @@ static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) -static inline bool IS_INODE(struct page *page) +static inline bool IS_INODE(const struct folio *folio) { - struct f2fs_node *p = F2FS_NODE(page); + struct f2fs_node *p = F2FS_NODE(folio); return RAW_IS_INODE(p); } @@ -3009,31 +3048,31 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node) static inline int f2fs_has_extra_attr(struct inode *inode); static inline unsigned int get_dnode_base(struct inode *inode, - struct page *node_page) + struct folio *node_folio) { - if (!IS_INODE(node_page)) + if (!IS_INODE(node_folio)) return 0; return inode ? get_extra_isize(inode) : - offset_in_addr(&F2FS_NODE(node_page)->i); + offset_in_addr(&F2FS_NODE(node_folio)->i); } static inline __le32 *get_dnode_addr(struct inode *inode, - struct page *node_page) + struct folio *node_folio) { - return blkaddr_in_node(F2FS_NODE(node_page)) + - get_dnode_base(inode, node_page); + return blkaddr_in_node(F2FS_NODE(node_folio)) + + get_dnode_base(inode, node_folio); } static inline block_t data_blkaddr(struct inode *inode, - struct page *node_page, unsigned int offset) + struct folio *node_folio, unsigned int offset) { - return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset)); + return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset)); } static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) { - return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); + return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node); } static inline int f2fs_test_bit(unsigned int nr, char *addr) @@ -3344,9 +3383,10 @@ static inline unsigned int addrs_per_page(struct inode *inode, return addrs; } -static inline void *inline_xattr_addr(struct inode *inode, struct page *page) +static inline +void *inline_xattr_addr(struct inode *inode, const struct folio *folio) { - struct f2fs_inode *ri = F2FS_INODE(page); + struct f2fs_inode *ri = F2FS_INODE(folio); return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - get_inline_xattr_addrs(inode)]); @@ -3361,7 +3401,7 @@ static inline int inline_xattr_size(struct inode *inode) /* * Notice: check inline_data flag without inode page lock is unsafe. - * It could change at any time by f2fs_convert_inline_page(). + * It could change at any time by f2fs_convert_inline_folio(). */ static inline int f2fs_has_inline_data(struct inode *inode) { @@ -3393,9 +3433,9 @@ static inline bool f2fs_is_cow_file(struct inode *inode) return is_inode_flag_set(inode, FI_COW_FILE); } -static inline void *inline_data_addr(struct inode *inode, struct page *page) +static inline void *inline_data_addr(struct inode *inode, struct folio *folio) { - __le32 *addr = get_dnode_addr(inode, page); + __le32 *addr = get_dnode_addr(inode, folio); return (void *)(addr + DEF_INLINE_RESERVED_SIZE); } @@ -3521,6 +3561,14 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); } +static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size) +{ + if (time_to_inject(sbi, FAULT_VMALLOC)) + return NULL; + + return vmalloc(size); +} + static inline int get_extra_isize(struct inode *inode) { return F2FS_I(inode)->i_extra_isize / sizeof(__le32); @@ -3585,9 +3633,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, bool readonly, bool need_lock); int f2fs_precache_extents(struct inode *inode); -int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int f2fs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); @@ -3597,14 +3645,15 @@ int f2fs_pin_file_control(struct inode *inode, bool inc); * inode.c */ void f2fs_set_inode_flags(struct inode *inode); -bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); -void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); +bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio); +void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio); struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_update_inode(struct inode *inode, struct page *node_page); +void f2fs_update_inode(struct inode *inode, struct folio *node_folio); void f2fs_update_inode_page(struct inode *inode); int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); +void f2fs_remove_donate_inode(struct inode *inode); void f2fs_evict_inode(struct inode *inode); void f2fs_handle_failed_inode(struct inode *inode); @@ -3648,23 +3697,22 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d); -struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct f2fs_filename *fname, struct page *dpage); +struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, + const struct f2fs_filename *fname, struct folio *dfolio); void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth); int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); void f2fs_drop_nlink(struct inode *dir, struct inode *inode); struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, - const struct f2fs_filename *fname, - struct page **res_page); + const struct f2fs_filename *fname, struct folio **res_folio); struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, - const struct qstr *child, struct page **res_page); -struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); + const struct qstr *child, struct folio **res_folio); +struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f); ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, - struct page **page); + struct folio **folio); void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, - struct page *page, struct inode *inode); -bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, + struct folio *folio, struct inode *inode); +bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio, const struct f2fs_filename *fname); void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, const struct fscrypt_str *name, f2fs_hash_t name_hash, @@ -3675,7 +3723,7 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode); -void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, +void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio, struct inode *dir, struct inode *inode); int f2fs_do_tmpfile(struct inode *inode, struct inode *dir, struct f2fs_filename *fname); @@ -3719,10 +3767,9 @@ struct node_info; int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); -bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, - const struct folio *folio); +bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio); void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); -void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); +void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio); void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); @@ -3736,15 +3783,13 @@ int f2fs_truncate_xattr_node(struct inode *inode); int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, unsigned int seq_id); int f2fs_remove_inode_page(struct inode *inode); -struct page *f2fs_new_inode_page(struct inode *inode); -struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); +struct folio *f2fs_new_inode_folio(struct inode *inode); +struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs); void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); -struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); +struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid); struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino); -struct page *f2fs_get_inode_page(struct f2fs_sb_info *sbi, pgoff_t ino); -struct page *f2fs_get_xnode_page(struct f2fs_sb_info *sbi, pgoff_t xnid); -struct page *f2fs_get_node_page_ra(struct page *parent, int start); -int f2fs_move_node_page(struct page *node_page, int gc_type); +struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid); +int f2fs_move_node_folio(struct folio *node_folio, int gc_type); void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic, @@ -3757,9 +3802,9 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); -int f2fs_recover_xattr_data(struct inode *inode, struct page *page); -int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio); +int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio); +int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum); int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); @@ -3807,7 +3852,7 @@ int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); -struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); +struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno); void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr); void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio, @@ -3826,7 +3871,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, bool recover_newaddr); enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi, enum log_type seg_type); -int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, +int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio); @@ -3858,6 +3903,11 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi, unsigned int segno); +static inline struct inode *fio_inode(struct f2fs_io_info *fio) +{ + return fio->folio->mapping->host; +} + #define DEF_FRAGMENT_SIZE 4 #define MIN_FRAGMENT_SIZE 1 #define MAX_FRAGMENT_SIZE 512 @@ -3874,10 +3924,10 @@ static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, unsigned char reason); void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); -struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); -struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); -struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); -struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index); bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, @@ -3922,7 +3972,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); */ int __init f2fs_init_bioset(void); void f2fs_destroy_bioset(void); -bool f2fs_is_cp_guaranteed(struct page *page); +bool f2fs_is_cp_guaranteed(const struct folio *folio); int f2fs_init_bio_entry_cache(void); void f2fs_destroy_bio_entry_cache(void); void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, @@ -3930,10 +3980,10 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, - struct inode *inode, struct page *page, + struct inode *inode, struct folio *folio, nid_t ino, enum page_type type); void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, - struct bio **bio, struct page *page); + struct bio **bio, struct folio *folio); void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); int f2fs_submit_page_bio(struct f2fs_io_info *fio); int f2fs_merge_page_bio(struct f2fs_io_info *fio); @@ -3953,8 +4003,8 @@ struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index, pgoff_t *next_pgofs); struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, bool for_write); -struct page *f2fs_get_new_data_page(struct inode *inode, - struct page *ipage, pgoff_t index, bool new_i_size); +struct folio *f2fs_get_new_data_folio(struct inode *inode, + struct folio *ifolio, pgoff_t index, bool new_i_size); int f2fs_do_write_data_page(struct f2fs_io_info *fio); int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -3978,22 +4028,6 @@ int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); extern const struct iomap_ops f2fs_iomap_ops; -static inline struct page *f2fs_find_data_page(struct inode *inode, - pgoff_t index, pgoff_t *next_pgofs) -{ - struct folio *folio = f2fs_find_data_folio(inode, index, next_pgofs); - - return &folio->page; -} - -static inline struct page *f2fs_get_lock_data_page(struct inode *inode, - pgoff_t index, bool for_write) -{ - struct folio *folio = f2fs_get_lock_data_folio(inode, index, for_write); - - return &folio->page; -} - /* * gc.c */ @@ -4288,28 +4322,26 @@ extern struct kmem_cache *f2fs_inode_entry_slab; * inline.c */ bool f2fs_may_inline_data(struct inode *inode); -bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage); +bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio); bool f2fs_may_inline_dentry(struct inode *inode); -void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage); -void f2fs_truncate_inline_inode(struct inode *inode, - struct page *ipage, u64 from); +void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio); +void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio, + u64 from); int f2fs_read_inline_data(struct inode *inode, struct folio *folio); -int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); +int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct folio *folio); -int f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, - const struct f2fs_filename *fname, - struct page **res_page, - bool use_hash); + const struct f2fs_filename *fname, struct folio **res_folio, + bool use_hash); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, - struct page *ipage); + struct folio *ifolio); int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, - struct page *page, struct inode *dir, - struct inode *inode); + struct folio *folio, struct inode *dir, struct inode *inode); bool f2fs_empty_inline_dir(struct inode *dir); int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, struct fscrypt_str *fstr); @@ -4332,7 +4364,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); /* * extent_cache.c */ -bool sanity_check_extent_cache(struct inode *inode, struct page *ipage); +bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio); void f2fs_init_extent_tree(struct inode *inode); void f2fs_drop_extent_tree(struct inode *inode); void f2fs_destroy_extent_node(struct inode *inode); @@ -4342,7 +4374,7 @@ int __init f2fs_create_extent_cache(void); void f2fs_destroy_extent_cache(void); /* read extent cache ops */ -void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); +void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio); bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, struct extent_info *ei); bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, @@ -4422,20 +4454,20 @@ enum cluster_check_type { CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ }; -bool f2fs_is_compressed_page(struct page *page); -struct page *f2fs_compress_control_page(struct page *page); +bool f2fs_is_compressed_page(struct folio *folio); +struct folio *f2fs_compress_control_folio(struct folio *folio); int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata); bool f2fs_compress_write_end(struct inode *inode, void *fsdata, pgoff_t index, unsigned copied); int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); -void f2fs_compress_write_end_io(struct bio *bio, struct page *page); +void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio); bool f2fs_is_compress_backend_ready(struct inode *inode); bool f2fs_is_compress_level_valid(int alg, int lvl); int __init f2fs_init_compress_mempool(void); void f2fs_destroy_compress_mempool(void); void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); -void f2fs_end_read_compressed_page(struct page *page, bool failed, +void f2fs_end_read_compressed_page(struct folio *folio, bool failed, block_t blkaddr, bool in_task); bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); @@ -4458,7 +4490,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, bool in_task); -void f2fs_put_page_dic(struct page *page, bool in_task); +void f2fs_put_folio_dic(struct folio *folio, bool in_task); unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, unsigned int ofs_in_node); int f2fs_init_compress_ctx(struct compress_ctx *cc); @@ -4473,9 +4505,7 @@ void f2fs_destroy_compress_cache(void); struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, block_t blkaddr, unsigned int len); -void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, - nid_t ino, block_t blkaddr); -bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, +bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio, block_t blkaddr); void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); #define inc_compr_inode_stat(inode) \ @@ -4491,7 +4521,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); sbi->compr_saved_block += diff; \ } while (0) #else -static inline bool f2fs_is_compressed_page(struct page *page) { return false; } +static inline bool f2fs_is_compressed_page(struct folio *folio) { return false; } static inline bool f2fs_is_compress_backend_ready(struct inode *inode) { if (!f2fs_compressed_file(inode)) @@ -4500,7 +4530,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode) return false; } static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; } -static inline struct page *f2fs_compress_control_page(struct page *page) +static inline struct folio *f2fs_compress_control_folio(struct folio *folio) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); @@ -4509,12 +4539,12 @@ static inline int __init f2fs_init_compress_mempool(void) { return 0; } static inline void f2fs_destroy_compress_mempool(void) { } static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { } -static inline void f2fs_end_read_compressed_page(struct page *page, +static inline void f2fs_end_read_compressed_page(struct folio *folio, bool failed, block_t blkaddr, bool in_task) { WARN_ON_ONCE(1); } -static inline void f2fs_put_page_dic(struct page *page, bool in_task) +static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task) { WARN_ON_ONCE(1); } @@ -4529,10 +4559,8 @@ static inline int __init f2fs_init_compress_cache(void) { return 0; } static inline void f2fs_destroy_compress_cache(void) { } static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, block_t blkaddr, unsigned int len) { } -static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, - struct page *page, nid_t ino, block_t blkaddr) { } -static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, - struct page *page, block_t blkaddr) { return false; } +static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, + struct folio *folio, block_t blkaddr) { return false; } static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { } #define inc_compr_inode_stat(inode) do { } while (0) @@ -4622,12 +4650,16 @@ F2FS_FEATURE_FUNCS(readonly, RO); F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS); #ifdef CONFIG_BLK_DEV_ZONED -static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, - block_t blkaddr) +static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi, + unsigned int zone) { - unsigned int zno = blkaddr / sbi->blocks_per_blkz; + return test_bit(zone, FDEV(devi).blkz_seq); +} - return test_bit(zno, FDEV(devi).blkz_seq); +static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, + block_t blkaddr) +{ + return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz); } #endif @@ -4699,15 +4731,31 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; } -static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi, +static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi, block_t blkaddr) { if (f2fs_sb_has_blkzoned(sbi)) { +#ifdef CONFIG_BLK_DEV_ZONED int devi = f2fs_target_device_index(sbi, blkaddr); - return !bdev_is_zoned(FDEV(devi).bdev); + if (!bdev_is_zoned(FDEV(devi).bdev)) + return false; + + if (f2fs_is_multi_device(sbi)) { + if (blkaddr < FDEV(devi).start_blk || + blkaddr > FDEV(devi).end_blk) { + f2fs_err(sbi, "Invalid block %x", blkaddr); + return false; + } + blkaddr -= FDEV(devi).start_blk; + } + + return f2fs_blkz_is_seq(sbi, devi, blkaddr); +#else + return false; +#endif } - return true; + return false; } static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) @@ -4762,10 +4810,11 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) #ifdef CONFIG_F2FS_FAULT_INJECTION extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, - unsigned long type); + unsigned long type, enum fault_option fo); #else static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, - unsigned long rate, unsigned long type) + unsigned long rate, unsigned long type, + enum fault_option fo) { return 0; } @@ -4795,6 +4844,19 @@ static inline void f2fs_io_schedule_timeout(long timeout) io_schedule_timeout(timeout); } +static inline void f2fs_io_schedule_timeout_killable(long timeout) +{ + while (timeout) { + if (fatal_signal_pending(current)) + return; + set_current_state(TASK_UNINTERRUPTIBLE); + io_schedule_timeout(DEFAULT_IO_TIMEOUT); + if (timeout <= DEFAULT_IO_TIMEOUT) + return; + timeout -= DEFAULT_IO_TIMEOUT; + } +} + static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, struct folio *folio, enum page_type type) { @@ -4824,13 +4886,13 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi, int i = 0; do { - struct page *page; + struct folio *folio; - page = find_get_page(META_MAPPING(sbi), blkaddr + i); - if (page) { - if (folio_test_writeback(page_folio(page))) + folio = filemap_get_folio(META_MAPPING(sbi), blkaddr + i); + if (!IS_ERR(folio)) { + if (folio_test_writeback(folio)) need_submit = true; - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); } } while (++i < cnt && !need_submit); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index abbcbb5865a3..42faaed6a02d 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -35,6 +35,17 @@ #include <trace/events/f2fs.h> #include <uapi/linux/f2fs.h> +static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size) +{ + loff_t old_size = i_size_read(inode); + + if (old_size >= new_size) + return; + + /* zero or drop pages only in range of [old_size, new_size] */ + truncate_pagecache(inode, old_size); +} + static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); @@ -103,8 +114,13 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT); + filemap_invalidate_unlock(inode->i_mapping); + file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(inode->i_mapping); + folio_lock(folio); if (unlikely(folio->mapping != inode->i_mapping || folio_pos(folio) > i_size_read(inode) || @@ -131,7 +147,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) goto out_sem; } - f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true); + f2fs_folio_wait_writeback(folio, DATA, false, true); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); @@ -226,12 +242,13 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) { - struct page *i = find_get_page(NODE_MAPPING(sbi), ino); + struct folio *i = filemap_get_folio(NODE_MAPPING(sbi), ino); bool ret = false; /* But we need to avoid that there are some inode updates */ - if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) + if ((!IS_ERR(i) && folio_test_dirty(i)) || + f2fs_need_inode_block_update(sbi, ino)) ret = true; - f2fs_put_page(i, 0); + f2fs_folio_put(i, false); return ret; } @@ -260,7 +277,6 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, - .for_reclaim = 0, }; unsigned int seq_id = 0; @@ -403,7 +419,7 @@ static bool __found_offset(struct address_space *mapping, bool compressed_cluster = false; if (f2fs_compressed_file(inode)) { - block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, + block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size)); compressed_cluster = first_blkaddr == COMPRESS_ADDR; @@ -473,7 +489,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) } } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; @@ -532,8 +548,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) return -EINVAL; } -static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int f2fs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) @@ -543,7 +560,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) return -EOPNOTSUPP; file_accessed(file); - vma->vm_ops = &f2fs_file_vm_ops; + desc->vm_ops = &f2fs_file_vm_ops; f2fs_down_read(&F2FS_I(inode)->i_sem); set_inode_flag(inode, FI_MMAP_FILE); @@ -554,19 +571,21 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) static int finish_preallocate_blocks(struct inode *inode) { - int ret; + int ret = 0; + bool opened; - inode_lock(inode); - if (is_inode_flag_set(inode, FI_OPENED_FILE)) { - inode_unlock(inode); + f2fs_down_read(&F2FS_I(inode)->i_sem); + opened = is_inode_flag_set(inode, FI_OPENED_FILE); + f2fs_up_read(&F2FS_I(inode)->i_sem); + if (opened) return 0; - } - if (!file_should_truncate(inode)) { - set_inode_flag(inode, FI_OPENED_FILE); - inode_unlock(inode); - return 0; - } + inode_lock(inode); + if (is_inode_flag_set(inode, FI_OPENED_FILE)) + goto out_unlock; + + if (!file_should_truncate(inode)) + goto out_update; f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); @@ -576,16 +595,17 @@ static int finish_preallocate_blocks(struct inode *inode) filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - - if (!ret) - set_inode_flag(inode, FI_OPENED_FILE); - - inode_unlock(inode); if (ret) - return ret; + goto out_unlock; file_dont_truncate(inode); - return 0; +out_update: + f2fs_down_write(&F2FS_I(inode)->i_sem); + set_inode_flag(inode, FI_OPENED_FILE); + f2fs_up_write(&F2FS_I(inode)->i_sem); +out_unlock: + inode_unlock(inode); + return ret; } static int f2fs_file_open(struct inode *inode, struct file *filp) @@ -609,7 +629,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) if (err) return err; - return finish_preallocate_blocks(inode); + err = finish_preallocate_blocks(inode); + if (!err) + atomic_inc(&F2FS_I(inode)->open_count); + return err; } void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) @@ -624,7 +647,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) block_t blkstart; int blklen = 0; - addr = get_dnode_addr(dn->inode, dn->node_page) + ofs; + addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs; blkstart = le32_to_cpu(*addr); /* Assumption: truncation starts with cluster */ @@ -688,7 +711,7 @@ next: * once we invalidate valid blkaddr in range [ofs, ofs + count], * we will invalidate all blkaddr in the whole range. */ - fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), + fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), dn->inode) + ofs; f2fs_update_read_extent_cache_range(dn, fofs, 0, len); f2fs_update_age_extent_cache_range(dn, fofs, len); @@ -743,7 +766,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) struct dnode_of_data dn; pgoff_t free_from; int count = 0, err = 0; - struct page *ipage; + struct folio *ifolio; bool truncate_page = false; trace_f2fs_truncate_blocks_enter(inode, from); @@ -761,9 +784,9 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) if (lock) f2fs_lock_op(sbi); - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) { + err = PTR_ERR(ifolio); goto out; } @@ -776,18 +799,18 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) dec_valid_block_count(sbi, inode, ei.len); f2fs_update_time(sbi, REQ_TIME); - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); goto out; } if (f2fs_has_inline_data(inode)) { - f2fs_truncate_inline_inode(inode, ipage, from); - f2fs_put_page(ipage, 1); + f2fs_truncate_inline_inode(inode, ifolio, from); + f2fs_folio_put(ifolio, true); truncate_page = true; goto out; } - set_new_dnode(&dn, inode, ipage, NULL, 0); + set_new_dnode(&dn, inode, ifolio, NULL, 0); err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); if (err) { if (err == -ENOENT) @@ -795,12 +818,12 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) goto out; } - count = ADDRS_PER_PAGE(dn.node_page, inode); + count = ADDRS_PER_PAGE(dn.node_folio, inode); count -= dn.ofs_in_node; f2fs_bug_on(sbi, count < 0); - if (dn.ofs_in_node || IS_INODE(dn.node_page)) { + if (dn.ofs_in_node || IS_INODE(dn.node_folio)) { f2fs_truncate_data_blocks_range(&dn, count); free_from += count; } @@ -1023,11 +1046,24 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; - if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + if (unlikely(f2fs_cp_error(sbi))) return -EIO; + err = setattr_prepare(idmap, dentry, attr); + if (err) + return err; + + err = fscrypt_prepare_setattr(dentry, attr); + if (err) + return err; + + err = fsverity_prepare_setattr(dentry, attr); + if (err) + return err; + if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; @@ -1044,20 +1080,19 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, !IS_ALIGNED(attr->ia_size, F2FS_BLK_TO_BYTES(fi->i_cluster_size))) return -EINVAL; + /* + * To prevent scattered pin block generation, we don't allow + * smaller/equal size unaligned truncation for pinned file. + * We only support overwrite IO to pinned file, so don't + * care about larger size truncation. + */ + if (f2fs_is_pinned_file(inode) && + attr->ia_size <= i_size_read(inode) && + !IS_ALIGNED(attr->ia_size, + F2FS_BLK_TO_BYTES(CAP_BLKS_PER_SEC(sbi)))) + return -EINVAL; } - err = setattr_prepare(idmap, dentry, attr); - if (err) - return err; - - err = fscrypt_prepare_setattr(dentry, attr); - if (err) - return err; - - err = fsverity_prepare_setattr(dentry, attr); - if (err) - return err; - if (is_quota_modification(idmap, inode, attr)) { err = f2fs_dquot_initialize(inode); if (err) @@ -1065,12 +1100,11 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, } if (i_uid_needs_update(idmap, attr, inode) || i_gid_needs_update(idmap, attr, inode)) { - f2fs_lock_op(F2FS_I_SB(inode)); + f2fs_lock_op(sbi); err = dquot_transfer(idmap, inode, attr); if (err) { - set_sbi_flag(F2FS_I_SB(inode), - SBI_QUOTA_NEED_REPAIR); - f2fs_unlock_op(F2FS_I_SB(inode)); + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + f2fs_unlock_op(sbi); return err; } /* @@ -1080,7 +1114,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, i_uid_update(idmap, attr, inode); i_gid_update(idmap, attr, inode); f2fs_mark_inode_dirty_sync(inode, true); - f2fs_unlock_op(F2FS_I_SB(inode)); + f2fs_unlock_op(sbi); } if (attr->ia_valid & ATTR_SIZE) { @@ -1106,6 +1140,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); + if (attr->ia_size > old_size) + f2fs_zero_post_eof_page(inode, attr->ia_size); truncate_setsize(inode, attr->ia_size); if (attr->ia_size <= old_size) @@ -1141,7 +1177,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, f2fs_mark_inode_dirty_sync(inode, true); /* inode change will produce dirty node pages flushed by checkpoint */ - f2fs_balance_fs(F2FS_I_SB(inode), true); + f2fs_balance_fs(sbi, true); return err; } @@ -1161,7 +1197,7 @@ static int fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct page *page; + struct folio *folio; if (!len) return 0; @@ -1169,16 +1205,16 @@ static int fill_zero(struct inode *inode, pgoff_t index, f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); - page = f2fs_get_new_data_page(inode, NULL, index, false); + folio = f2fs_get_new_data_folio(inode, NULL, index, false); f2fs_unlock_op(sbi); - if (IS_ERR(page)) - return PTR_ERR(page); + if (IS_ERR(folio)) + return PTR_ERR(folio); - f2fs_wait_on_page_writeback(page, DATA, true, true); - zero_user(page, start, len); - set_page_dirty(page); - f2fs_put_page(page, 1); + f2fs_folio_wait_writeback(folio, DATA, true, true); + folio_zero_range(folio, start, len); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); return 0; } @@ -1201,7 +1237,7 @@ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) return err; } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); @@ -1224,6 +1260,10 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(inode->i_mapping); + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1296,7 +1336,7 @@ next_dnode: goto next; } - done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - + done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, inode) - dn.ofs_in_node, len); for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { *blkaddr = f2fs_data_blkaddr(&dn); @@ -1385,7 +1425,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, } ilen = min((pgoff_t) - ADDRS_PER_PAGE(dn.node_page, dst_inode) - + ADDRS_PER_PAGE(dn.node_folio, dst_inode) - dn.ofs_in_node, len - i); do { dn.data_blkaddr = f2fs_data_blkaddr(&dn); @@ -1410,26 +1450,26 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, f2fs_put_dnode(&dn); } else { - struct page *psrc, *pdst; + struct folio *fsrc, *fdst; - psrc = f2fs_get_lock_data_page(src_inode, + fsrc = f2fs_get_lock_data_folio(src_inode, src + i, true); - if (IS_ERR(psrc)) - return PTR_ERR(psrc); - pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, + if (IS_ERR(fsrc)) + return PTR_ERR(fsrc); + fdst = f2fs_get_new_data_folio(dst_inode, NULL, dst + i, true); - if (IS_ERR(pdst)) { - f2fs_put_page(psrc, 1); - return PTR_ERR(pdst); + if (IS_ERR(fdst)) { + f2fs_folio_put(fsrc, true); + return PTR_ERR(fdst); } - f2fs_wait_on_page_writeback(pdst, DATA, true, true); + f2fs_folio_wait_writeback(fdst, DATA, true, true); - memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE); - set_page_dirty(pdst); - set_page_private_gcing(pdst); - f2fs_put_page(pdst, 1); - f2fs_put_page(psrc, 1); + memcpy_folio(fdst, 0, fsrc, 0, PAGE_SIZE); + folio_mark_dirty(fdst); + folio_set_f2fs_gcing(fdst); + f2fs_folio_put(fdst, true); + f2fs_folio_put(fsrc, true); ret = f2fs_truncate_hole(src_inode, src + i, src + i + 1); @@ -1507,6 +1547,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); truncate_pagecache(inode, offset); @@ -1628,6 +1670,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, if (ret) return ret; + filemap_invalidate_lock(mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(mapping); + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1675,7 +1721,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, goto out; } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); end = min(pg_end, end_offset - dn.ofs_in_node + index); ret = f2fs_do_zero_range(&dn, index, end); @@ -1759,6 +1805,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) /* avoid gc operation during block exchange */ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); + + f2fs_zero_post_eof_page(inode, offset + len); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { @@ -1816,6 +1864,10 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, if (err) return err; + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(inode->i_mapping); + f2fs_balance_fs(sbi, true); pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; @@ -1850,9 +1902,8 @@ next_alloc: } } - if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ? - ZONED_PIN_SEC_REQUIRED_COUNT : - GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { + if (has_not_enough_free_secs(sbi, 0, + sbi->reserved_pin_section)) { f2fs_down_write(&sbi->gc_lock); stat_inc_gc_call_count(sbi, FOREGROUND); err = f2fs_gc(sbi, &gc_control); @@ -1990,6 +2041,9 @@ out: static int f2fs_release_file(struct inode *inode, struct file *filp) { + if (atomic_dec_and_test(&F2FS_I(inode)->open_count)) + f2fs_remove_donate_inode(inode); + /* * f2fs_release_file is called at every close calls. So we should * not drop any inmemory pages by close called by other process. @@ -2464,19 +2518,20 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) return ret; } -static void f2fs_keep_noreuse_range(struct inode *inode, +static int f2fs_keep_noreuse_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); u64 start, end; + int ret = 0; if (!S_ISREG(inode->i_mode)) - return; + return 0; if (offset >= max_bytes || len > max_bytes || (offset + len) > max_bytes) - return; + return 0; start = offset >> PAGE_SHIFT; end = DIV_ROUND_UP(offset + len, PAGE_SIZE); @@ -2484,7 +2539,7 @@ static void f2fs_keep_noreuse_range(struct inode *inode, inode_lock(inode); if (f2fs_is_atomic_file(inode)) { inode_unlock(inode); - return; + return 0; } spin_lock(&sbi->inode_lock[DONATE_INODE]); @@ -2493,7 +2548,12 @@ static void f2fs_keep_noreuse_range(struct inode *inode, if (!list_empty(&F2FS_I(inode)->gdonate_list)) { list_del_init(&F2FS_I(inode)->gdonate_list); sbi->donate_files--; - } + if (is_inode_flag_set(inode, FI_DONATE_FINISHED)) + ret = -EALREADY; + else + set_inode_flag(inode, FI_DONATE_FINISHED); + } else + ret = -ENOENT; } else { if (list_empty(&F2FS_I(inode)->gdonate_list)) { list_add_tail(&F2FS_I(inode)->gdonate_list, @@ -2505,9 +2565,12 @@ static void f2fs_keep_noreuse_range(struct inode *inode, } F2FS_I(inode)->donate_start = start; F2FS_I(inode)->donate_end = end - 1; + clear_inode_flag(inode, FI_DONATE_FINISHED); } spin_unlock(&sbi->inode_lock[DONATE_INODE]); inode_unlock(inode); + + return ret; } static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) @@ -2920,19 +2983,19 @@ do_map: idx = map.m_lblk; while (idx < map.m_lblk + map.m_len && cnt < BLKS_PER_SEG(sbi)) { - struct page *page; + struct folio *folio; - page = f2fs_get_lock_data_page(inode, idx, true); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_lock_data_folio(inode, idx, true); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto clear_out; } - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); - set_page_dirty(page); - set_page_private_gcing(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + folio_set_f2fs_gcing(folio); + f2fs_folio_put(folio, true); idx++; cnt++; @@ -3344,7 +3407,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) } #endif -int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); @@ -3368,7 +3431,7 @@ int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int f2fs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL; @@ -3711,7 +3774,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) int i; for (i = 0; i < count; i++) { - blkaddr = data_blkaddr(dn->inode, dn->node_page, + blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) @@ -3829,7 +3892,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) break; } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); count = round_up(count, fi->i_cluster_size); @@ -3880,7 +3943,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count, int i; for (i = 0; i < count; i++) { - blkaddr = data_blkaddr(dn->inode, dn->node_page, + blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) @@ -3897,7 +3960,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count, int ret; for (i = 0; i < cluster_size; i++) { - blkaddr = data_blkaddr(dn->inode, dn->node_page, + blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); if (i == 0) { @@ -4007,7 +4070,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) break; } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); count = round_up(count, fi->i_cluster_size); @@ -4171,7 +4234,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) goto out; } - end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); count = min(end_offset - dn.ofs_in_node, pg_end - index); for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { struct block_device *cur_bdev; @@ -4343,34 +4406,36 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) { DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx); struct address_space *mapping = inode->i_mapping; - struct page *page; + struct folio *folio; pgoff_t redirty_idx = page_idx; - int i, page_len = 0, ret = 0; + int page_len = 0, ret = 0; page_cache_ra_unbounded(&ractl, len, 0); - for (i = 0; i < len; i++, page_idx++) { - page = read_cache_page(mapping, page_idx, NULL, NULL); - if (IS_ERR(page)) { - ret = PTR_ERR(page); + do { + folio = read_cache_folio(mapping, page_idx, NULL, NULL); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); break; } - page_len++; - } + page_len += folio_nr_pages(folio) - (page_idx - folio->index); + page_idx = folio_next_index(folio); + } while (page_len < len); - for (i = 0; i < page_len; i++, redirty_idx++) { - page = find_lock_page(mapping, redirty_idx); + do { + folio = filemap_lock_folio(mapping, redirty_idx); - /* It will never fail, when page has pinned above */ - f2fs_bug_on(F2FS_I_SB(inode), !page); + /* It will never fail, when folio has pinned above */ + f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(folio)); - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); - set_page_dirty(page); - set_page_private_gcing(page); - f2fs_put_page(page, 1); - f2fs_put_page(page, 0); - } + folio_mark_dirty(folio); + folio_set_f2fs_gcing(folio); + redirty_idx = folio_next_index(folio); + folio_unlock(folio); + folio_put_refs(folio, 2); + } while (redirty_idx < page_idx); return ret; } @@ -4776,6 +4841,7 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(iocb->ki_filp); const loff_t pos = iocb->ki_pos; ssize_t ret; + bool dio; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; @@ -4784,12 +4850,15 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, iov_iter_count(to), READ); + dio = f2fs_should_use_dio(inode, iocb, to); + /* In LFS mode, if there is inflight dio, wait for its completion */ if (f2fs_lfs_mode(F2FS_I_SB(inode)) && - get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE)) + get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE) && + (!f2fs_is_pinned_file(inode) || !dio)) inode_dio_wait(inode); - if (f2fs_should_use_dio(inode, iocb, to)) { + if (dio) { ret = f2fs_dio_read_iter(iocb, to); } else { ret = filemap_read(iocb, to, 0); @@ -4797,8 +4866,7 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_READ_IO, ret); } - if (trace_f2fs_dataread_end_enabled()) - trace_f2fs_dataread_end(inode, pos, ret); + trace_f2fs_dataread_end(inode, pos, ret); return ret; } @@ -4821,8 +4889,7 @@ static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos, f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_READ_IO, ret); - if (trace_f2fs_dataread_end_enabled()) - trace_f2fs_dataread_end(inode, pos, ret); + trace_f2fs_dataread_end(inode, pos, ret); return ret; } @@ -4846,6 +4913,10 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) err = file_modified(file); if (err) return err; + + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from)); + filemap_invalidate_unlock(inode->i_mapping); return count; } @@ -5163,8 +5234,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) f2fs_dio_write_iter(iocb, from, &may_need_sync) : f2fs_buffered_write_iter(iocb, from); - if (trace_f2fs_datawrite_end_enabled()) - trace_f2fs_datawrite_end(inode, orig_pos, ret); + trace_f2fs_datawrite_end(inode, orig_pos, ret); } /* Don't leave any preallocated blocks around past i_size. */ @@ -5236,8 +5306,8 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len, f2fs_compressed_file(inode))) f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino); else if (advice == POSIX_FADV_NOREUSE) - f2fs_keep_noreuse_range(inode, offset, len); - return 0; + err = f2fs_keep_noreuse_range(inode, offset, len); + return err; } #ifdef CONFIG_COMPAT @@ -5362,7 +5432,7 @@ const struct file_operations f2fs_file_operations = { .iopoll = iocb_bio_iopoll, .open = f2fs_file_open, .release = f2fs_release_file, - .mmap = f2fs_file_mmap, + .mmap_prepare = f2fs_file_mmap_prepare, .flush = f2fs_file_flush, .fsync = f2fs_sync_file, .fallocate = f2fs_fallocate, diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 2b8f9239bede..098e9f71421e 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -141,10 +141,10 @@ do_gc: FOREGROUND : BACKGROUND); sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) || - gc_control.one_time; + (gc_control.one_time && gc_th->boost_gc_greedy); /* foreground GC was been triggered via f2fs_balance_fs() */ - if (foreground) + if (foreground && !f2fs_sb_has_blkzoned(sbi)) sync_mode = false; gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC; @@ -197,6 +197,8 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO; + gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE; + gc_th->boost_gc_greedy = GC_GREEDY; if (f2fs_sb_has_blkzoned(sbi)) { gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED; @@ -278,12 +280,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - if (p->alloc_mode == SSR) { - p->gc_mode = GC_GREEDY; - p->dirty_bitmap = dirty_i->dirty_segmap[type]; - p->max_search = dirty_i->nr_dirty[type]; - p->ofs_unit = 1; - } else if (p->alloc_mode == AT_SSR) { + if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) { p->gc_mode = GC_GREEDY; p->dirty_bitmap = dirty_i->dirty_segmap[type]; p->max_search = dirty_i->nr_dirty[type]; @@ -389,14 +386,15 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) } static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, - unsigned int segno, struct victim_sel_policy *p) + unsigned int segno, struct victim_sel_policy *p, + unsigned int valid_thresh_ratio) { if (p->alloc_mode == SSR) return get_seg_entry(sbi, segno)->ckpt_valid_blocks; - if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >= - CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio / - 100)) + if (p->one_time_gc && (valid_thresh_ratio < 100) && + (get_valid_blocks(sbi, segno, true) >= + CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100)) return UINT_MAX; /* alloc_mode == LFS */ @@ -777,6 +775,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, unsigned int secno, last_victim; unsigned int last_segment; unsigned int nsearched; + unsigned int valid_thresh_ratio = 100; bool is_atgc; int ret = 0; @@ -786,7 +785,11 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, p.alloc_mode = alloc_mode; p.age = age; p.age_threshold = sbi->am.age_threshold; - p.one_time_gc = one_time; + if (one_time) { + p.one_time_gc = one_time; + if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG)) + valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio; + } retry: select_policy(sbi, gc_type, type, &p); @@ -912,7 +915,7 @@ retry: goto next; } - cost = get_gc_cost(sbi, segno, &p); + cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio); if (p.min_cost > cost) { p.min_segno = segno; @@ -1045,7 +1048,7 @@ next_step: for (off = 0; off < usable_blks_in_seg; off++, entry++) { nid_t nid = le32_to_cpu(entry->nid); - struct page *node_page; + struct folio *node_folio; struct node_info ni; int err; @@ -1068,27 +1071,27 @@ next_step: } /* phase == 2 */ - node_page = f2fs_get_node_page(sbi, nid); - if (IS_ERR(node_page)) + node_folio = f2fs_get_node_folio(sbi, nid); + if (IS_ERR(node_folio)) continue; - /* block may become invalid during f2fs_get_node_page */ + /* block may become invalid during f2fs_get_node_folio */ if (check_valid_map(sbi, segno, off) == 0) { - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); continue; } if (f2fs_get_node_info(sbi, nid, &ni, false)) { - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); continue; } if (ni.blk_addr != start_addr + off) { - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); continue; } - err = f2fs_move_node_page(node_page, gc_type); + err = f2fs_move_node_folio(node_folio, gc_type); if (!err && gc_type == FG_GC) submitted++; stat_inc_node_blk_count(sbi, 1, gc_type); @@ -1134,7 +1137,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, struct node_info *dni, block_t blkaddr, unsigned int *nofs) { - struct page *node_page; + struct folio *node_folio; nid_t nid; unsigned int ofs_in_node, max_addrs, base; block_t source_blkaddr; @@ -1142,12 +1145,12 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, nid = le32_to_cpu(sum->nid); ofs_in_node = le16_to_cpu(sum->ofs_in_node); - node_page = f2fs_get_node_page(sbi, nid); - if (IS_ERR(node_page)) + node_folio = f2fs_get_node_folio(sbi, nid); + if (IS_ERR(node_folio)) return false; if (f2fs_get_node_info(sbi, nid, dni, false)) { - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); return false; } @@ -1158,12 +1161,12 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, } if (f2fs_check_nid_range(sbi, dni->ino)) { - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); return false; } - if (IS_INODE(node_page)) { - base = offset_in_addr(F2FS_INODE(node_page)); + if (IS_INODE(node_folio)) { + base = offset_in_addr(F2FS_INODE(node_folio)); max_addrs = DEF_ADDRS_PER_INODE; } else { base = 0; @@ -1173,13 +1176,13 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, if (base + ofs_in_node >= max_addrs) { f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", base, ofs_in_node, max_addrs, dni->ino, dni->nid); - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); return false; } - *nofs = ofs_of_node(node_page); - source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); - f2fs_put_page(node_page, 1); + *nofs = ofs_of_node(node_folio); + source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node); + f2fs_folio_put(node_folio, true); if (source_blkaddr != blkaddr) { #ifdef CONFIG_F2FS_CHECK_FS @@ -1205,7 +1208,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) struct address_space *mapping = f2fs_is_cow_file(inode) ? F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; struct dnode_of_data dn; - struct page *page; + struct folio *folio; struct f2fs_io_info fio = { .sbi = sbi, .ino = inode->i_ino, @@ -1218,16 +1221,16 @@ static int ra_data_block(struct inode *inode, pgoff_t index) }; int err; - page = f2fs_grab_cache_page(mapping, index, true); - if (!page) - return -ENOMEM; + folio = f2fs_grab_cache_folio(mapping, index, true); + if (IS_ERR(folio)) + return PTR_ERR(folio); if (f2fs_lookup_read_extent_cache_block(inode, index, &dn.data_blkaddr)) { if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ))) { err = -EFSCORRUPTED; - goto put_page; + goto put_folio; } goto got_it; } @@ -1235,28 +1238,28 @@ static int ra_data_block(struct inode *inode, pgoff_t index) set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) - goto put_page; + goto put_folio; f2fs_put_dnode(&dn); if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { err = -ENOENT; - goto put_page; + goto put_folio; } if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC_ENHANCE))) { err = -EFSCORRUPTED; - goto put_page; + goto put_folio; } got_it: - /* read page */ - fio.page = page; + /* read folio */ + fio.folio = folio; fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; /* * don't cache encrypted data into meta inode until previous dirty * data were writebacked to avoid racing between GC and flush. */ - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); @@ -1265,14 +1268,14 @@ got_it: FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - goto put_page; + goto put_folio; } err = f2fs_submit_page_bio(&fio); if (err) goto put_encrypted_page; f2fs_put_page(fio.encrypted_page, 0); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); @@ -1280,8 +1283,8 @@ got_it: return 0; put_encrypted_page: f2fs_put_page(fio.encrypted_page, 1); -put_page: - f2fs_put_page(page, 1); +put_folio: + f2fs_folio_put(folio, true); return err; } @@ -1307,7 +1310,7 @@ static int move_data_block(struct inode *inode, block_t bidx, struct dnode_of_data dn; struct f2fs_summary sum; struct node_info ni; - struct page *page, *mpage; + struct folio *folio, *mfolio; block_t newaddr; int err = 0; bool lfs_mode = f2fs_lfs_mode(fio.sbi); @@ -1316,9 +1319,9 @@ static int move_data_block(struct inode *inode, block_t bidx, CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; /* do not read out */ - page = f2fs_grab_cache_page(mapping, bidx, false); - if (!page) - return -ENOMEM; + folio = f2fs_grab_cache_folio(mapping, bidx, false); + if (IS_ERR(folio)) + return PTR_ERR(folio); if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { err = -ENOENT; @@ -1335,7 +1338,7 @@ static int move_data_block(struct inode *inode, block_t bidx, goto out; if (unlikely(dn.data_blkaddr == NULL_ADDR)) { - ClearPageUptodate(page); + folio_clear_uptodate(folio); err = -ENOENT; goto put_out; } @@ -1344,7 +1347,7 @@ static int move_data_block(struct inode *inode, block_t bidx, * don't cache encrypted data into meta inode until previous dirty * data were writebacked to avoid racing between GC and flush. */ - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); @@ -1353,26 +1356,26 @@ static int move_data_block(struct inode *inode, block_t bidx, goto put_out; /* read page */ - fio.page = page; + fio.folio = folio; fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; if (lfs_mode) f2fs_down_write(&fio.sbi->io_order_lock); - mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi), fio.old_blkaddr, false); - if (!mpage) { - err = -ENOMEM; + if (IS_ERR(mfolio)) { + err = PTR_ERR(mfolio); goto up_out; } - fio.encrypted_page = mpage; + fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr); - /* read source block in mpage */ - if (!PageUptodate(mpage)) { + /* read source block in mfolio */ + if (!folio_test_uptodate(mfolio)) { err = f2fs_submit_page_bio(&fio); if (err) { - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto up_out; } @@ -1381,11 +1384,11 @@ static int move_data_block(struct inode *inode, block_t bidx, f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); - lock_page(mpage); - if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || - !PageUptodate(mpage))) { + folio_lock(mfolio); + if (unlikely(!is_meta_folio(mfolio) || + !folio_test_uptodate(mfolio))) { err = -EIO; - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto up_out; } } @@ -1396,7 +1399,7 @@ static int move_data_block(struct inode *inode, block_t bidx, err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, type, NULL); if (err) { - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); /* filesystem should shutdown, no need to recovery block */ goto up_out; } @@ -1405,15 +1408,15 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto recover_block; } /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - f2fs_put_page(mpage, 1); + folio_address(mfolio), PAGE_SIZE); + f2fs_folio_put(mfolio, true); f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1); @@ -1444,7 +1447,7 @@ up_out: put_out: f2fs_put_dnode(&dn); out: - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return err; } @@ -1473,7 +1476,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type, goto out; } folio_mark_dirty(folio); - set_page_private_gcing(&folio->page); + folio_set_f2fs_gcing(folio); } else { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), @@ -1483,7 +1486,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC, .old_blkaddr = NULL_ADDR, - .page = &folio->page, + .folio = folio, .encrypted_page = NULL, .need_lock = LOCK_REQ, .io_type = FS_GC_DATA_IO, @@ -1499,11 +1502,11 @@ retry: f2fs_remove_dirty_inode(inode); } - set_page_private_gcing(&folio->page); + folio_set_f2fs_gcing(folio); err = f2fs_do_write_data_page(&fio); if (err) { - clear_page_private_gcing(&folio->page); + folio_clear_f2fs_gcing(folio); if (err == -ENOMEM) { memalloc_retry_wait(GFP_NOFS); goto retry; @@ -1718,8 +1721,6 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, struct gc_inode_list *gc_list, int gc_type, bool force_migrate, bool one_time) { - struct page *sum_page; - struct f2fs_summary_block *sum; struct blk_plug plug; unsigned int segno = start_segno; unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi); @@ -1751,7 +1752,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, !has_enough_free_blocks(sbi, sbi->gc_thread->boost_zoned_gc_percent)) window_granularity *= - BOOST_GC_MULTIPLE; + sbi->gc_thread->boost_gc_multiple; end_segno = start_segno + window_granularity; } @@ -1769,40 +1770,40 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, /* reference all summary page */ while (segno < end_segno) { - sum_page = f2fs_get_sum_page(sbi, segno++); - if (IS_ERR(sum_page)) { - int err = PTR_ERR(sum_page); + struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno++); + if (IS_ERR(sum_folio)) { + int err = PTR_ERR(sum_folio); end_segno = segno - 1; for (segno = start_segno; segno < end_segno; segno++) { - sum_page = find_get_page(META_MAPPING(sbi), + sum_folio = filemap_get_folio(META_MAPPING(sbi), GET_SUM_BLOCK(sbi, segno)); - f2fs_put_page(sum_page, 0); - f2fs_put_page(sum_page, 0); + folio_put_refs(sum_folio, 2); } return err; } - unlock_page(sum_page); + folio_unlock(sum_folio); } blk_start_plug(&plug); for (segno = start_segno; segno < end_segno; segno++) { + struct f2fs_summary_block *sum; /* find segment summary of victim */ - sum_page = find_get_page(META_MAPPING(sbi), + struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi), GET_SUM_BLOCK(sbi, segno)); - f2fs_put_page(sum_page, 0); if (get_valid_blocks(sbi, segno, false) == 0) goto freed; if (gc_type == BG_GC && __is_large_section(sbi) && migrated >= sbi->migration_granularity) goto skip; - if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) + if (!folio_test_uptodate(sum_folio) || + unlikely(f2fs_cp_error(sbi))) goto skip; - sum = page_address(sum_page); + sum = folio_address(sum_folio); if (type != GET_SUM_TYPE((&sum->footer))) { f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", segno, type, GET_SUM_TYPE((&sum->footer))); @@ -1840,7 +1841,7 @@ freed: (segno + 1 < sec_end_segno) ? segno + 1 : NULL_SEGNO; skip: - f2fs_put_page(sum_page, 0); + folio_put_refs(sum_folio, 2); } if (submitted) @@ -1893,6 +1894,7 @@ gc_more: /* Let's run FG_GC, if we don't have enough space. */ if (has_not_enough_free_secs(sbi, 0, 0)) { gc_type = FG_GC; + gc_control->one_time = false; /* * For example, if there are many prefree_segments below given @@ -2066,6 +2068,9 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi, .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), }; + if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) + continue; + do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false); put_gc_inode(&gc_list); @@ -2271,12 +2276,12 @@ out_drop_write: if (err) return err; - err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); if (err) return err; if (f2fs_readonly(sbi->sb)) { - err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); if (err) return err; return -EROFS; @@ -2333,6 +2338,6 @@ recover_out: out_err: f2fs_up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->gc_lock); - thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); return err; } diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 5c1eaf55e127..24e8b1c27acc 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -68,6 +68,8 @@ struct f2fs_gc_kthread { unsigned int no_zoned_gc_percent; unsigned int boost_zoned_gc_percent; unsigned int valid_thresh_ratio; + unsigned int boost_gc_multiple; + unsigned int boost_gc_greedy; }; struct gc_inode_list { @@ -194,6 +196,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi) { if (f2fs_sb_has_blkzoned(sbi)) - return !has_enough_free_blocks(sbi, LIMIT_BOOST_ZONED_GC); + return !has_enough_free_blocks(sbi, + sbi->gc_thread->boost_zoned_gc_percent); return has_enough_invalid_blocks(sbi); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index ad92e9008781..58ac831ef704 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -33,9 +33,9 @@ bool f2fs_may_inline_data(struct inode *inode) return !f2fs_post_read_required(inode); } -static bool inode_has_blocks(struct inode *inode, struct page *ipage) +static bool inode_has_blocks(struct inode *inode, struct folio *ifolio) { - struct f2fs_inode *ri = F2FS_INODE(ipage); + struct f2fs_inode *ri = F2FS_INODE(ifolio); int i; if (F2FS_HAS_BLOCKS(inode)) @@ -48,12 +48,12 @@ static bool inode_has_blocks(struct inode *inode, struct page *ipage) return false; } -bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage) +bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio) { if (!f2fs_has_inline_data(inode)) return false; - if (inode_has_blocks(inode, ipage)) + if (inode_has_blocks(inode, ifolio)) return false; if (!support_inline_data(inode)) @@ -79,37 +79,37 @@ bool f2fs_may_inline_dentry(struct inode *inode) return true; } -void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage) +void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio) { struct inode *inode = folio->mapping->host; if (folio_test_uptodate(folio)) return; - f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio)); + f2fs_bug_on(F2FS_I_SB(inode), folio->index); folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio)); /* Copy the whole inline data block */ - memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage), + memcpy_to_folio(folio, 0, inline_data_addr(inode, ifolio), MAX_INLINE_DATA(inode)); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); } -void f2fs_truncate_inline_inode(struct inode *inode, - struct page *ipage, u64 from) +void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio, + u64 from) { void *addr; if (from >= MAX_INLINE_DATA(inode)) return; - addr = inline_data_addr(inode, ipage); + addr = inline_data_addr(inode, ifolio); - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); memset(addr + from, 0, MAX_INLINE_DATA(inode) - from); - set_page_dirty(ipage); + folio_mark_dirty(ifolio); if (from == 0) clear_inode_flag(inode, FI_DATA_EXIST); @@ -117,32 +117,32 @@ void f2fs_truncate_inline_inode(struct inode *inode, int f2fs_read_inline_data(struct inode *inode, struct folio *folio) { - struct page *ipage; + struct folio *ifolio; - ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); - if (IS_ERR(ipage)) { + ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); + if (IS_ERR(ifolio)) { folio_unlock(folio); - return PTR_ERR(ipage); + return PTR_ERR(ifolio); } if (!f2fs_has_inline_data(inode)) { - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return -EAGAIN; } - if (folio_index(folio)) + if (folio->index) folio_zero_segment(folio, 0, folio_size(folio)); else - f2fs_do_read_inline_data(folio, ipage); + f2fs_do_read_inline_data(folio, ifolio); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); folio_unlock(folio); return 0; } -int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) +int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio) { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), @@ -150,7 +150,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, - .page = page, + .folio = folio, .encrypted_page = NULL, .io_type = FS_DATA_IO, }; @@ -182,20 +182,20 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) return -EFSCORRUPTED; } - f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page))); + f2fs_bug_on(F2FS_F_SB(folio), folio_test_writeback(folio)); - f2fs_do_read_inline_data(page_folio(page), dn->inode_page); - set_page_dirty(page); + f2fs_do_read_inline_data(folio, dn->inode_folio); + folio_mark_dirty(folio); /* clear dirty state */ - dirty = clear_page_dirty_for_io(page); + dirty = folio_clear_dirty_for_io(folio); /* write data page to try to make data consistent */ - set_page_writeback(page); + folio_start_writeback(folio); fio.old_blkaddr = dn->data_blkaddr; set_inode_flag(dn->inode, FI_HOT_DATA); f2fs_outplace_write_data(dn, &fio); - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); if (dirty) { inode_dec_dirty_pages(dn->inode); f2fs_remove_dirty_inode(dn->inode); @@ -205,8 +205,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) set_inode_flag(dn->inode, FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ - f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0); - clear_page_private_inline(dn->inode_page); + f2fs_truncate_inline_inode(dn->inode, dn->inode_folio, 0); + folio_clear_f2fs_inline(dn->inode_folio); clear_out: stat_dec_inline_inode(dn->inode); clear_inode_flag(dn->inode, FI_INLINE_DATA); @@ -218,7 +218,7 @@ int f2fs_convert_inline_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; - struct page *ipage, *page; + struct folio *ifolio, *folio; int err = 0; if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb)) @@ -231,28 +231,28 @@ int f2fs_convert_inline_inode(struct inode *inode) if (err) return err; - page = f2fs_grab_cache_page(inode->i_mapping, 0, false); - if (!page) - return -ENOMEM; + folio = f2fs_grab_cache_folio(inode->i_mapping, 0, false); + if (IS_ERR(folio)) + return PTR_ERR(folio); f2fs_lock_op(sbi); - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) { + err = PTR_ERR(ifolio); goto out; } - set_new_dnode(&dn, inode, ipage, ipage, 0); + set_new_dnode(&dn, inode, ifolio, ifolio, 0); if (f2fs_has_inline_data(inode)) - err = f2fs_convert_inline_page(&dn, page); + err = f2fs_convert_inline_folio(&dn, folio); f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); if (!err) f2fs_balance_fs(sbi, dn.node_changed); @@ -263,40 +263,39 @@ out: int f2fs_write_inline_data(struct inode *inode, struct folio *folio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct page *ipage; + struct folio *ifolio; - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); if (!f2fs_has_inline_data(inode)) { - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return -EAGAIN; } f2fs_bug_on(F2FS_I_SB(inode), folio->index); - f2fs_wait_on_page_writeback(ipage, NODE, true, true); - memcpy_from_folio(inline_data_addr(inode, ipage), + f2fs_folio_wait_writeback(ifolio, NODE, true, true); + memcpy_from_folio(inline_data_addr(inode, ifolio), folio, 0, MAX_INLINE_DATA(inode)); - set_page_dirty(ipage); + folio_mark_dirty(ifolio); f2fs_clear_page_cache_dirty_tag(folio); set_inode_flag(inode, FI_APPEND_WRITE); set_inode_flag(inode, FI_DATA_EXIST); - clear_page_private_inline(ipage); - f2fs_put_page(ipage, 1); + folio_clear_f2fs_inline(ifolio); + f2fs_folio_put(ifolio, 1); return 0; } -int f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; void *src_addr, *dst_addr; - struct page *ipage; /* * The inline_data recovery policy is as follows. @@ -306,38 +305,39 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage) * x o -> remove data blocks, and then recover inline_data * x x -> recover data blocks */ - if (IS_INODE(npage)) - ri = F2FS_INODE(npage); + if (IS_INODE(nfolio)) + ri = F2FS_INODE(nfolio); if (f2fs_has_inline_data(inode) && ri && (ri->i_inline & F2FS_INLINE_DATA)) { + struct folio *ifolio; process_inline: - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); - src_addr = inline_data_addr(inode, npage); - dst_addr = inline_data_addr(inode, ipage); + src_addr = inline_data_addr(inode, nfolio); + dst_addr = inline_data_addr(inode, ifolio); memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); set_inode_flag(inode, FI_INLINE_DATA); set_inode_flag(inode, FI_DATA_EXIST); - set_page_dirty(ipage); - f2fs_put_page(ipage, 1); + folio_mark_dirty(ifolio); + f2fs_folio_put(ifolio, true); return 1; } if (f2fs_has_inline_data(inode)) { - ipage = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); - f2fs_truncate_inline_inode(inode, ipage, 0); + struct folio *ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); + f2fs_truncate_inline_inode(inode, ifolio, 0); stat_dec_inline_inode(inode); clear_inode_flag(inode, FI_INLINE_DATA); - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { int ret; @@ -352,50 +352,50 @@ process_inline: struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page, + struct folio **res_folio, bool use_hash) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct f2fs_dir_entry *de; struct f2fs_dentry_ptr d; - struct page *ipage; + struct folio *ifolio; void *inline_dentry; - ipage = f2fs_get_inode_page(sbi, dir->i_ino); - if (IS_ERR(ipage)) { - *res_page = ipage; + ifolio = f2fs_get_inode_folio(sbi, dir->i_ino); + if (IS_ERR(ifolio)) { + *res_folio = ifolio; return NULL; } - inline_dentry = inline_data_addr(dir, ipage); + inline_dentry = inline_data_addr(dir, ifolio); make_dentry_ptr_inline(dir, &d, inline_dentry); de = f2fs_find_target_dentry(&d, fname, NULL, use_hash); - unlock_page(ipage); + folio_unlock(ifolio); if (IS_ERR(de)) { - *res_page = ERR_CAST(de); + *res_folio = ERR_CAST(de); de = NULL; } if (de) - *res_page = ipage; + *res_folio = ifolio; else - f2fs_put_page(ipage, 0); + f2fs_folio_put(ifolio, false); return de; } int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, - struct page *ipage) + struct folio *ifolio) { struct f2fs_dentry_ptr d; void *inline_dentry; - inline_dentry = inline_data_addr(inode, ipage); + inline_dentry = inline_data_addr(inode, ifolio); make_dentry_ptr_inline(inode, &d, inline_dentry); f2fs_do_make_empty_dir(inode, parent, &d); - set_page_dirty(ipage); + folio_mark_dirty(ifolio); /* update i_size to MAX_INLINE_DATA */ if (i_size_read(inode) < MAX_INLINE_DATA(inode)) @@ -407,39 +407,39 @@ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, * NOTE: ipage is grabbed by caller, but if any error occurs, we should * release ipage in this function. */ -static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, +static int f2fs_move_inline_dirents(struct inode *dir, struct folio *ifolio, void *inline_dentry) { - struct page *page; + struct folio *folio; struct dnode_of_data dn; struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_ptr src, dst; int err; - page = f2fs_grab_cache_page(dir->i_mapping, 0, true); - if (!page) { - f2fs_put_page(ipage, 1); - return -ENOMEM; + folio = f2fs_grab_cache_folio(dir->i_mapping, 0, true); + if (IS_ERR(folio)) { + f2fs_folio_put(ifolio, true); + return PTR_ERR(folio); } - set_new_dnode(&dn, dir, ipage, NULL, 0); + set_new_dnode(&dn, dir, ifolio, NULL, 0); err = f2fs_reserve_block(&dn, 0); if (err) goto out; if (unlikely(dn.data_blkaddr != NEW_ADDR)) { f2fs_put_dnode(&dn); - set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); - f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", + set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK); + f2fs_warn(F2FS_F_SB(folio), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", __func__, dir->i_ino, dn.data_blkaddr); - f2fs_handle_error(F2FS_P_SB(page), ERROR_INVALID_BLKADDR); + f2fs_handle_error(F2FS_F_SB(folio), ERROR_INVALID_BLKADDR); err = -EFSCORRUPTED; goto out; } - f2fs_wait_on_page_writeback(page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); - dentry_blk = page_address(page); + dentry_blk = folio_address(folio); /* * Start by zeroing the full block, to ensure that all unused space is @@ -455,12 +455,12 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max); memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN); - if (!PageUptodate(page)) - SetPageUptodate(page); - set_page_dirty(page); + if (!folio_test_uptodate(folio)) + folio_mark_uptodate(folio); + folio_mark_dirty(folio); /* clear inline dir and flag after data writeback */ - f2fs_truncate_inline_inode(dir, ipage, 0); + f2fs_truncate_inline_inode(dir, ifolio, 0); stat_dec_inline_dir(dir); clear_inode_flag(dir, FI_INLINE_DENTRY); @@ -477,7 +477,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, if (i_size_read(dir) < PAGE_SIZE) f2fs_i_size_write(dir, PAGE_SIZE); out: - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return err; } @@ -533,7 +533,7 @@ punch_dentry_pages: return err; } -static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, +static int f2fs_move_rehashed_dirents(struct inode *dir, struct folio *ifolio, void *inline_dentry) { void *backup_dentry; @@ -542,20 +542,20 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir), MAX_INLINE_DATA(dir), GFP_F2FS_ZERO); if (!backup_dentry) { - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return -ENOMEM; } memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir)); - f2fs_truncate_inline_inode(dir, ipage, 0); + f2fs_truncate_inline_inode(dir, ifolio, 0); - unlock_page(ipage); + folio_unlock(ifolio); err = f2fs_add_inline_entries(dir, backup_dentry); if (err) goto recover; - lock_page(ipage); + folio_lock(ifolio); stat_dec_inline_dir(dir); clear_inode_flag(dir, FI_INLINE_DENTRY); @@ -571,31 +571,31 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, kfree(backup_dentry); return 0; recover: - lock_page(ipage); - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + folio_lock(ifolio); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir)); f2fs_i_depth_write(dir, 0); f2fs_i_size_write(dir, MAX_INLINE_DATA(dir)); - set_page_dirty(ipage); - f2fs_put_page(ipage, 1); + folio_mark_dirty(ifolio); + f2fs_folio_put(ifolio, 1); kfree(backup_dentry); return err; } -static int do_convert_inline_dir(struct inode *dir, struct page *ipage, +static int do_convert_inline_dir(struct inode *dir, struct folio *ifolio, void *inline_dentry) { if (!F2FS_I(dir)->i_dir_level) - return f2fs_move_inline_dirents(dir, ipage, inline_dentry); + return f2fs_move_inline_dirents(dir, ifolio, inline_dentry); else - return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry); + return f2fs_move_rehashed_dirents(dir, ifolio, inline_dentry); } int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct page *ipage; + struct folio *ifolio; struct f2fs_filename fname; void *inline_dentry = NULL; int err = 0; @@ -609,22 +609,22 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) if (err) goto out; - ipage = f2fs_get_inode_page(sbi, dir->i_ino); - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, dir->i_ino); + if (IS_ERR(ifolio)) { + err = PTR_ERR(ifolio); goto out_fname; } - if (f2fs_has_enough_room(dir, ipage, &fname)) { - f2fs_put_page(ipage, 1); + if (f2fs_has_enough_room(dir, ifolio, &fname)) { + f2fs_folio_put(ifolio, true); goto out_fname; } - inline_dentry = inline_data_addr(dir, ipage); + inline_dentry = inline_data_addr(dir, ifolio); - err = do_convert_inline_dir(dir, ipage, inline_dentry); + err = do_convert_inline_dir(dir, ifolio, inline_dentry); if (!err) - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); out_fname: f2fs_free_filename(&fname); out: @@ -636,24 +636,24 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct page *ipage; + struct folio *ifolio; unsigned int bit_pos; void *inline_dentry = NULL; struct f2fs_dentry_ptr d; int slots = GET_DENTRY_SLOTS(fname->disk_name.len); - struct page *page = NULL; + struct folio *folio = NULL; int err = 0; - ipage = f2fs_get_inode_page(sbi, dir->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(sbi, dir->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); - inline_dentry = inline_data_addr(dir, ipage); + inline_dentry = inline_data_addr(dir, ifolio); make_dentry_ptr_inline(dir, &d, inline_dentry); bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max); if (bit_pos >= d.max) { - err = do_convert_inline_dir(dir, ipage, inline_dentry); + err = do_convert_inline_dir(dir, ifolio, inline_dentry); if (err) return err; err = -EAGAIN; @@ -663,19 +663,19 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, if (inode) { f2fs_down_write_nested(&F2FS_I(inode)->i_sem, SINGLE_DEPTH_NESTING); - page = f2fs_init_inode_metadata(inode, dir, fname, ipage); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_init_inode_metadata(inode, dir, fname, ifolio); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto fail; } } - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, bit_pos); - set_page_dirty(ipage); + folio_mark_dirty(ifolio); /* we don't need to mark_inode_dirty now */ if (inode) { @@ -683,9 +683,9 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, /* synchronize inode page's data from inode cache */ if (is_inode_flag_set(inode, FI_NEW_INODE)) - f2fs_update_inode(inode, page); + f2fs_update_inode(inode, folio); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } f2fs_update_parent_metadata(dir, inode, 0); @@ -693,12 +693,12 @@ fail: if (inode) f2fs_up_write(&F2FS_I(inode)->i_sem); out: - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return err; } -void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, - struct inode *dir, struct inode *inode) +void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, + struct folio *folio, struct inode *dir, struct inode *inode) { struct f2fs_dentry_ptr d; void *inline_dentry; @@ -706,18 +706,18 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, unsigned int bit_pos; int i; - lock_page(page); - f2fs_wait_on_page_writeback(page, NODE, true, true); + folio_lock(folio); + f2fs_folio_wait_writeback(folio, NODE, true, true); - inline_dentry = inline_data_addr(dir, page); + inline_dentry = inline_data_addr(dir, folio); make_dentry_ptr_inline(dir, &d, inline_dentry); bit_pos = dentry - d.dentry; for (i = 0; i < slots; i++) __clear_bit_le(bit_pos + i, d.bitmap); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); f2fs_mark_inode_dirty_sync(dir, false); @@ -729,21 +729,21 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, bool f2fs_empty_inline_dir(struct inode *dir) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct page *ipage; + struct folio *ifolio; unsigned int bit_pos = 2; void *inline_dentry; struct f2fs_dentry_ptr d; - ipage = f2fs_get_inode_page(sbi, dir->i_ino); - if (IS_ERR(ipage)) + ifolio = f2fs_get_inode_folio(sbi, dir->i_ino); + if (IS_ERR(ifolio)) return false; - inline_dentry = inline_data_addr(dir, ipage); + inline_dentry = inline_data_addr(dir, ifolio); make_dentry_ptr_inline(dir, &d, inline_dentry); bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos); - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); if (bit_pos < d.max) return false; @@ -755,7 +755,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, struct fscrypt_str *fstr) { struct inode *inode = file_inode(file); - struct page *ipage = NULL; + struct folio *ifolio = NULL; struct f2fs_dentry_ptr d; void *inline_dentry = NULL; int err; @@ -765,17 +765,17 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, if (ctx->pos == d.max) return 0; - ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); /* * f2fs_readdir was protected by inode.i_rwsem, it is safe to access * ipage without page's lock held. */ - unlock_page(ipage); + folio_unlock(ifolio); - inline_dentry = inline_data_addr(inode, ipage); + inline_dentry = inline_data_addr(inode, ifolio); make_dentry_ptr_inline(inode, &d, inline_dentry); @@ -783,7 +783,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, if (!err) ctx->pos = d.max; - f2fs_put_page(ipage, 0); + f2fs_folio_put(ifolio, false); return err < 0 ? err : 0; } @@ -794,12 +794,12 @@ int f2fs_inline_data_fiemap(struct inode *inode, __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED | FIEMAP_EXTENT_LAST; struct node_info ni; - struct page *ipage; + struct folio *ifolio; int err = 0; - ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) && !f2fs_has_inline_data(inode)) { @@ -824,11 +824,11 @@ int f2fs_inline_data_fiemap(struct inode *inode, goto out; byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits; - byteaddr += (char *)inline_data_addr(inode, ipage) - - (char *)F2FS_INODE(ipage); + byteaddr += (char *)inline_data_addr(inode, ifolio) - + (char *)F2FS_INODE(ifolio); err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err); out: - f2fs_put_page(ipage, 1); + f2fs_folio_put(ifolio, true); return err; } diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 83f862578fc8..8c4eafe9ffac 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -34,7 +34,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (f2fs_inode_dirtied(inode, sync)) return; - if (f2fs_is_atomic_file(inode)) + /* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */ + if (f2fs_is_atomic_file(inode) && + !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) return; mark_inode_dirty_sync(inode); @@ -66,9 +68,9 @@ void f2fs_set_inode_flags(struct inode *inode) S_ENCRYPTED|S_VERITY|S_CASEFOLD); } -static void __get_inode_rdev(struct inode *inode, struct page *node_page) +static void __get_inode_rdev(struct inode *inode, struct folio *node_folio) { - __le32 *addr = get_dnode_addr(inode, node_page); + __le32 *addr = get_dnode_addr(inode, node_folio); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { @@ -79,9 +81,9 @@ static void __get_inode_rdev(struct inode *inode, struct page *node_page) } } -static void __set_inode_rdev(struct inode *inode, struct page *node_page) +static void __set_inode_rdev(struct inode *inode, struct folio *node_folio) { - __le32 *addr = get_dnode_addr(inode, node_page); + __le32 *addr = get_dnode_addr(inode, node_folio); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { @@ -95,33 +97,34 @@ static void __set_inode_rdev(struct inode *inode, struct page *node_page) } } -static void __recover_inline_status(struct inode *inode, struct page *ipage) +static void __recover_inline_status(struct inode *inode, struct folio *ifolio) { - void *inline_data = inline_data_addr(inode, ipage); + void *inline_data = inline_data_addr(inode, ifolio); __le32 *start = inline_data; __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); while (start < end) { if (*start++) { - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); set_inode_flag(inode, FI_DATA_EXIST); - set_raw_inline(inode, F2FS_INODE(ipage)); - set_page_dirty(ipage); + set_raw_inline(inode, F2FS_INODE(ifolio)); + folio_mark_dirty(ifolio); return; } } return; } -static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) +static +bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio) { - struct f2fs_inode *ri = &F2FS_NODE(page)->i; + struct f2fs_inode *ri = &F2FS_NODE(folio)->i; if (!f2fs_sb_has_inode_chksum(sbi)) return false; - if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) + if (!IS_INODE(folio) || !(ri->i_inline & F2FS_EXTRA_ATTR)) return false; if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), @@ -131,9 +134,9 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page return true; } -static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) +static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio) { - struct f2fs_node *node = F2FS_NODE(page); + struct f2fs_node *node = F2FS_NODE(folio); struct f2fs_inode *ri = &node->i; __le32 ino = node->footer.ino; __le32 gen = ri->i_generation; @@ -142,19 +145,18 @@ static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); unsigned int cs_size = sizeof(dummy_cs); - chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, - sizeof(ino)); - chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); + chksum = f2fs_chksum(sbi->s_chksum_seed, (__u8 *)&ino, sizeof(ino)); + chksum_seed = f2fs_chksum(chksum, (__u8 *)&gen, sizeof(gen)); - chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); - chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); + chksum = f2fs_chksum(chksum_seed, (__u8 *)ri, offset); + chksum = f2fs_chksum(chksum, (__u8 *)&dummy_cs, cs_size); offset += cs_size; - chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, - F2FS_BLKSIZE - offset); + chksum = f2fs_chksum(chksum, (__u8 *)ri + offset, + F2FS_BLKSIZE - offset); return chksum; } -bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) +bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio) { struct f2fs_inode *ri; __u32 provided, calculated; @@ -163,34 +165,34 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) return true; #ifdef CONFIG_F2FS_CHECK_FS - if (!f2fs_enable_inode_chksum(sbi, page)) + if (!f2fs_enable_inode_chksum(sbi, folio)) #else - if (!f2fs_enable_inode_chksum(sbi, page) || - PageDirty(page) || - folio_test_writeback(page_folio(page))) + if (!f2fs_enable_inode_chksum(sbi, folio) || + folio_test_dirty(folio) || + folio_test_writeback(folio)) #endif return true; - ri = &F2FS_NODE(page)->i; + ri = &F2FS_NODE(folio)->i; provided = le32_to_cpu(ri->i_inode_checksum); - calculated = f2fs_inode_chksum(sbi, page); + calculated = f2fs_inode_chksum(sbi, folio); if (provided != calculated) f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", - page_folio(page)->index, ino_of_node(page), + folio->index, ino_of_node(folio), provided, calculated); return provided == calculated; } -void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) +void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio) { - struct f2fs_inode *ri = &F2FS_NODE(page)->i; + struct f2fs_inode *ri = &F2FS_NODE(folio)->i; - if (!f2fs_enable_inode_chksum(sbi, page)) + if (!f2fs_enable_inode_chksum(sbi, folio)) return; - ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); + ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, folio)); } static bool sanity_check_compress_inode(struct inode *inode, @@ -265,24 +267,30 @@ err_level: return false; } -static bool sanity_check_inode(struct inode *inode, struct page *node_page) +static bool sanity_check_inode(struct inode *inode, struct folio *node_folio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); - struct f2fs_inode *ri = F2FS_INODE(node_page); + struct f2fs_inode *ri = F2FS_INODE(node_folio); unsigned long long iblocks; - iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); + iblocks = le64_to_cpu(F2FS_INODE(node_folio)->i_blocks); if (!iblocks) { f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", __func__, inode->i_ino, iblocks); return false; } - if (ino_of_node(node_page) != nid_of_node(node_page)) { + if (ino_of_node(node_folio) != nid_of_node(node_folio)) { f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", __func__, inode->i_ino, - ino_of_node(node_page), nid_of_node(node_page)); + ino_of_node(node_folio), nid_of_node(node_folio)); + return false; + } + + if (ino_of_node(node_folio) == fi->i_xattr_nid) { + f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.", + __func__, inode->i_ino, fi->i_xattr_nid); return false; } @@ -347,7 +355,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) } } - if (f2fs_sanity_check_inline_data(inode, node_page)) { + if (f2fs_sanity_check_inline_data(inode, node_folio)) { f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", __func__, inode->i_ino, inode->i_mode); return false; @@ -400,7 +408,7 @@ static int do_read_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); - struct page *node_page; + struct folio *node_folio; struct f2fs_inode *ri; projid_t i_projid; @@ -408,11 +416,11 @@ static int do_read_inode(struct inode *inode) if (f2fs_check_nid_range(sbi, inode->i_ino)) return -EINVAL; - node_page = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(node_page)) - return PTR_ERR(node_page); + node_folio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(node_folio)) + return PTR_ERR(node_folio); - ri = F2FS_INODE(node_page); + ri = F2FS_INODE(node_folio); inode->i_mode = le16_to_cpu(ri->i_mode); i_uid_write(inode, le32_to_cpu(ri->i_uid)); @@ -462,8 +470,8 @@ static int do_read_inode(struct inode *inode) fi->i_inline_xattr_size = 0; } - if (!sanity_check_inode(inode, node_page)) { - f2fs_put_page(node_page, 1); + if (!sanity_check_inode(inode, node_folio)) { + f2fs_folio_put(node_folio, true); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); return -EFSCORRUPTED; @@ -471,17 +479,17 @@ static int do_read_inode(struct inode *inode) /* check data exist */ if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) - __recover_inline_status(inode, node_page); + __recover_inline_status(inode, node_folio); /* try to recover cold bit for non-dir inode */ - if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { - f2fs_wait_on_page_writeback(node_page, NODE, true, true); - set_cold_node(node_page, false); - set_page_dirty(node_page); + if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_folio)) { + f2fs_folio_wait_writeback(node_folio, NODE, true, true); + set_cold_node(node_folio, false); + folio_mark_dirty(node_folio); } /* get rdev by using inline_info */ - __get_inode_rdev(inode, node_page); + __get_inode_rdev(inode, node_folio); if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) fi->last_disk_size = inode->i_size; @@ -524,17 +532,17 @@ static int do_read_inode(struct inode *inode) init_idisk_time(inode); - if (!sanity_check_extent_cache(inode, node_page)) { - f2fs_put_page(node_page, 1); + if (!sanity_check_extent_cache(inode, node_folio)) { + f2fs_folio_put(node_folio, true); f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); return -EFSCORRUPTED; } /* Need all the flag bits */ - f2fs_init_read_extent_tree(inode, node_page); + f2fs_init_read_extent_tree(inode, node_folio); f2fs_init_age_extent_tree(inode); - f2fs_put_page(node_page, 1); + f2fs_folio_put(node_folio, true); stat_inc_inline_xattr(inode); stat_inc_inline_inode(inode); @@ -651,18 +659,18 @@ retry: return inode; } -void f2fs_update_inode(struct inode *inode, struct page *node_page) +void f2fs_update_inode(struct inode *inode, struct folio *node_folio) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode *ri; struct extent_tree *et = fi->extent_tree[EX_READ]; - f2fs_wait_on_page_writeback(node_page, NODE, true, true); - set_page_dirty(node_page); + f2fs_folio_wait_writeback(node_folio, NODE, true, true); + folio_mark_dirty(node_folio); f2fs_inode_synced(inode); - ri = F2FS_INODE(node_page); + ri = F2FS_INODE(node_folio); ri->i_mode = cpu_to_le16(inode->i_mode); ri->i_advise = fi->i_advise; @@ -737,27 +745,27 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) } } - __set_inode_rdev(inode, node_page); + __set_inode_rdev(inode, node_folio); /* deleted inode */ if (inode->i_nlink == 0) - clear_page_private_inline(node_page); + folio_clear_f2fs_inline(node_folio); init_idisk_time(inode); #ifdef CONFIG_F2FS_CHECK_FS - f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); + f2fs_inode_chksum_set(F2FS_I_SB(inode), node_folio); #endif } void f2fs_update_inode_page(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct page *node_page; + struct folio *node_folio; int count = 0; retry: - node_page = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(node_page)) { - int err = PTR_ERR(node_page); + node_folio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(node_folio)) { + int err = PTR_ERR(node_folio); /* The node block was truncated. */ if (err == -ENOENT) @@ -772,8 +780,8 @@ stop_checkpoint: f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE); return; } - f2fs_update_inode(inode, node_page); - f2fs_put_page(node_page, 1); + f2fs_update_inode(inode, node_folio); + f2fs_folio_put(node_folio, true); } int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) @@ -813,7 +821,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; } -static void f2fs_remove_donate_inode(struct inode *inode) +void f2fs_remove_donate_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -926,6 +934,19 @@ retry: f2fs_update_inode_page(inode); if (dquot_initialize_needed(inode)) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + + /* + * If both f2fs_truncate() and f2fs_update_inode_page() failed + * due to fuzzed corrupted inode, call f2fs_inode_synced() to + * avoid triggering later f2fs_bug_on(). + */ + if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { + f2fs_warn(sbi, + "f2fs_evict_inode: inode is dirty, ino:%lu", + inode->i_ino); + f2fs_inode_synced(inode); + set_sbi_flag(sbi, SBI_NEED_FSCK); + } } if (freeze_protected) sb_end_intwrite(inode->i_sb); @@ -942,8 +963,12 @@ no_delete: if (likely(!f2fs_cp_error(sbi) && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); - else - f2fs_inode_synced(inode); + + /* + * anyway, it needs to remove the inode from sbi->inode_list[DIRTY_META] + * list to avoid UAF in f2fs_sync_inode_meta() during checkpoint. + */ + f2fs_inode_synced(inode); /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */ if (inode->i_ino) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 8f8b9b843bdf..b882771e4699 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -414,7 +414,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, if (is_inode_flag_set(dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid))) + F2FS_I(inode)->i_projid))) return -EXDEV; err = f2fs_dquot_initialize(dir); @@ -447,12 +447,12 @@ out: struct dentry *f2fs_get_parent(struct dentry *child) { - struct page *page; - unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &page); + struct folio *folio; + unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &folio); if (!ino) { - if (IS_ERR(page)) - return ERR_CAST(page); + if (IS_ERR(folio)) + return ERR_CAST(folio); return ERR_PTR(-ENOENT); } return d_obtain_alias(f2fs_iget(child->d_sb, ino)); @@ -463,7 +463,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, { struct inode *inode = NULL; struct f2fs_dir_entry *de; - struct page *page; + struct folio *folio; struct dentry *new; nid_t ino = -1; int err = 0; @@ -481,12 +481,12 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out_splice; if (err) goto out; - de = __f2fs_find_entry(dir, &fname, &page); + de = __f2fs_find_entry(dir, &fname, &folio); f2fs_free_filename(&fname); if (!de) { - if (IS_ERR(page)) { - err = PTR_ERR(page); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); goto out; } err = -ENOENT; @@ -494,7 +494,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, } ino = le32_to_cpu(de->ino); - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) { @@ -545,7 +545,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; - struct page *page; + struct folio *folio; int err; trace_f2fs_unlink_enter(dir, dentry); @@ -562,10 +562,19 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) if (err) goto fail; - de = f2fs_find_entry(dir, &dentry->d_name, &page); + de = f2fs_find_entry(dir, &dentry->d_name, &folio); if (!de) { - if (IS_ERR(page)) - err = PTR_ERR(page); + if (IS_ERR(folio)) + err = PTR_ERR(folio); + goto fail; + } + + if (unlikely(inode->i_nlink == 0)) { + f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink", + __func__, inode->i_ino); + err = -EFSCORRUPTED; + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + f2fs_folio_put(folio, false); goto fail; } @@ -575,10 +584,10 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) err = f2fs_acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); goto fail; } - f2fs_delete_entry(de, page, dir, inode); + f2fs_delete_entry(de, folio, dir, inode); f2fs_unlock_op(sbi); /* VFS negative dentries are incompatible with Encoding and @@ -899,8 +908,8 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct inode *whiteout = NULL; - struct page *old_dir_page = NULL; - struct page *old_page, *new_page = NULL; + struct folio *old_dir_folio = NULL; + struct folio *old_folio, *new_folio = NULL; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; @@ -914,7 +923,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(new_dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid))) + F2FS_I(old_inode)->i_projid))) return -EXDEV; /* @@ -959,18 +968,18 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } err = -ENOENT; - old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); + old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio); if (!old_entry) { - if (IS_ERR(old_page)) - err = PTR_ERR(old_page); + if (IS_ERR(old_folio)) + err = PTR_ERR(old_folio); goto out; } if (old_is_dir && old_dir != new_dir) { - old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); + old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_folio); if (!old_dir_entry) { - if (IS_ERR(old_dir_page)) - err = PTR_ERR(old_dir_page); + if (IS_ERR(old_dir_folio)) + err = PTR_ERR(old_dir_folio); goto out_old; } } @@ -983,10 +992,10 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, - &new_page); + &new_folio); if (!new_entry) { - if (IS_ERR(new_page)) - err = PTR_ERR(new_page); + if (IS_ERR(new_folio)) + err = PTR_ERR(new_folio); goto out_dir; } @@ -998,8 +1007,8 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (err) goto put_out_dir; - f2fs_set_link(new_dir, new_entry, new_page, old_inode); - new_page = NULL; + f2fs_set_link(new_dir, new_entry, new_folio, old_inode); + new_folio = NULL; inode_set_ctime_current(new_inode); f2fs_down_write(&F2FS_I(new_inode)->i_sem); @@ -1038,8 +1047,8 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, inode_set_ctime_current(old_inode); f2fs_mark_inode_dirty_sync(old_inode, false); - f2fs_delete_entry(old_entry, old_page, old_dir, NULL); - old_page = NULL; + f2fs_delete_entry(old_entry, old_folio, old_dir, NULL); + old_folio = NULL; if (whiteout) { set_inode_flag(whiteout, FI_INC_LINK); @@ -1055,7 +1064,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } if (old_dir_entry) - f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); + f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir); if (old_is_dir) f2fs_i_links_write(old_dir, false); @@ -1076,12 +1085,12 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, put_out_dir: f2fs_unlock_op(sbi); - f2fs_put_page(new_page, 0); + f2fs_folio_put(new_folio, false); out_dir: if (old_dir_entry) - f2fs_put_page(old_dir_page, 0); + f2fs_folio_put(old_dir_folio, false); out_old: - f2fs_put_page(old_page, 0); + f2fs_folio_put(old_folio, false); out: iput(whiteout); return err; @@ -1093,8 +1102,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); - struct page *old_dir_page, *new_dir_page; - struct page *old_page, *new_page; + struct folio *old_dir_folio, *new_dir_folio; + struct folio *old_folio, *new_folio; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; struct f2fs_dir_entry *old_entry, *new_entry; int old_nlink = 0, new_nlink = 0; @@ -1107,10 +1116,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(new_dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid)) || - (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && + F2FS_I(old_inode)->i_projid)) || + (is_inode_flag_set(old_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(old_dir)->i_projid, - F2FS_I(new_dentry->d_inode)->i_projid))) + F2FS_I(new_inode)->i_projid))) return -EXDEV; err = f2fs_dquot_initialize(old_dir); @@ -1122,17 +1131,17 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, goto out; err = -ENOENT; - old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); + old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio); if (!old_entry) { - if (IS_ERR(old_page)) - err = PTR_ERR(old_page); + if (IS_ERR(old_folio)) + err = PTR_ERR(old_folio); goto out; } - new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); + new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_folio); if (!new_entry) { - if (IS_ERR(new_page)) - err = PTR_ERR(new_page); + if (IS_ERR(new_folio)) + err = PTR_ERR(new_folio); goto out_old; } @@ -1140,20 +1149,20 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_dir != new_dir) { if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, - &old_dir_page); + &old_dir_folio); if (!old_dir_entry) { - if (IS_ERR(old_dir_page)) - err = PTR_ERR(old_dir_page); + if (IS_ERR(old_dir_folio)) + err = PTR_ERR(old_dir_folio); goto out_new; } } if (S_ISDIR(new_inode->i_mode)) { new_dir_entry = f2fs_parent_dir(new_inode, - &new_dir_page); + &new_dir_folio); if (!new_dir_entry) { - if (IS_ERR(new_dir_page)) - err = PTR_ERR(new_dir_page); + if (IS_ERR(new_dir_folio)) + err = PTR_ERR(new_dir_folio); goto out_old_dir; } } @@ -1180,14 +1189,14 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, /* update ".." directory entry info of old dentry */ if (old_dir_entry) - f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); + f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir); /* update ".." directory entry info of new dentry */ if (new_dir_entry) - f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir); + f2fs_set_link(new_inode, new_dir_entry, new_dir_folio, old_dir); /* update directory entry info of old dir inode */ - f2fs_set_link(old_dir, old_entry, old_page, new_inode); + f2fs_set_link(old_dir, old_entry, old_folio, new_inode); f2fs_down_write(&F2FS_I(old_inode)->i_sem); if (!old_dir_entry) @@ -1206,7 +1215,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_mark_inode_dirty_sync(old_dir, false); /* update directory entry info of new dir inode */ - f2fs_set_link(new_dir, new_entry, new_page, old_inode); + f2fs_set_link(new_dir, new_entry, new_folio, old_inode); f2fs_down_write(&F2FS_I(new_inode)->i_sem); if (!new_dir_entry) @@ -1238,16 +1247,16 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, return 0; out_new_dir: if (new_dir_entry) { - f2fs_put_page(new_dir_page, 0); + f2fs_folio_put(new_dir_folio, 0); } out_old_dir: if (old_dir_entry) { - f2fs_put_page(old_dir_page, 0); + f2fs_folio_put(old_dir_folio, 0); } out_new: - f2fs_put_page(new_page, 0); + f2fs_folio_put(new_folio, false); out_old: - f2fs_put_page(old_page, 0); + f2fs_folio_put(old_folio, false); out: return err; } @@ -1289,19 +1298,19 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - struct page *page; + struct folio *folio; const char *target; if (!dentry) return ERR_PTR(-ECHILD); - page = read_mapping_page(inode->i_mapping, 0, NULL); - if (IS_ERR(page)) - return ERR_CAST(page); + folio = read_mapping_folio(inode->i_mapping, 0, NULL); + if (IS_ERR(folio)) + return ERR_CAST(folio); - target = fscrypt_get_symlink(inode, page_address(page), + target = fscrypt_get_symlink(inode, folio_address(folio), inode->i_sb->s_blocksize, done); - put_page(page); + folio_put(folio); return target; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 5f15c224bf78..27743b93e186 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -120,25 +120,25 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) return res; } -static void clear_node_page_dirty(struct page *page) +static void clear_node_folio_dirty(struct folio *folio) { - if (PageDirty(page)) { - f2fs_clear_page_cache_dirty_tag(page_folio(page)); - clear_page_dirty_for_io(page); - dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); + if (folio_test_dirty(folio)) { + f2fs_clear_page_cache_dirty_tag(folio); + folio_clear_dirty_for_io(folio); + dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES); } - ClearPageUptodate(page); + folio_clear_uptodate(folio); } -static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) +static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) { - return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); + return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid)); } -static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) +static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) { - struct page *src_page; - struct page *dst_page; + struct folio *src_folio; + struct folio *dst_folio; pgoff_t dst_off; void *src_addr; void *dst_addr; @@ -147,21 +147,21 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); /* get current nat block page with lock */ - src_page = get_current_nat_page(sbi, nid); - if (IS_ERR(src_page)) - return src_page; - dst_page = f2fs_grab_meta_page(sbi, dst_off); - f2fs_bug_on(sbi, PageDirty(src_page)); - - src_addr = page_address(src_page); - dst_addr = page_address(dst_page); + src_folio = get_current_nat_folio(sbi, nid); + if (IS_ERR(src_folio)) + return src_folio; + dst_folio = f2fs_grab_meta_folio(sbi, dst_off); + f2fs_bug_on(sbi, folio_test_dirty(src_folio)); + + src_addr = folio_address(src_folio); + dst_addr = folio_address(dst_folio); memcpy(dst_addr, src_addr, PAGE_SIZE); - set_page_dirty(dst_page); - f2fs_put_page(src_page, 1); + folio_mark_dirty(dst_folio); + f2fs_folio_put(src_folio, true); set_to_next_nat(nm_i, nid); - return dst_page; + return dst_folio; } static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, @@ -185,7 +185,7 @@ static void __free_nat_entry(struct nat_entry *e) /* must be locked by nat_tree_lock */ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, - struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) + struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty) { if (no_fail) f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); @@ -195,6 +195,12 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, if (raw_ne) node_info_from_raw_nat(&ne->ni, raw_ne); + if (init_dirty) { + INIT_LIST_HEAD(&ne->list); + nm_i->nat_cnt[TOTAL_NAT]++; + return ne; + } + spin_lock(&nm_i->nat_list_lock); list_add_tail(&ne->list, &nm_i->nat_entries); spin_unlock(&nm_i->nat_list_lock); @@ -204,14 +210,17 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, return ne; } -static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) +static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty) { struct nat_entry *ne; ne = radix_tree_lookup(&nm_i->nat_root, n); - /* for recent accessed nat entry, move it to tail of lru list */ - if (ne && !get_nat_flag(ne, IS_DIRTY)) { + /* + * for recent accessed nat entry which will not be dirtied soon + * later, move it to tail of lru list. + */ + if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) { spin_lock(&nm_i->nat_list_lock); if (!list_empty(&ne->list)) list_move_tail(&ne->list, &nm_i->nat_entries); @@ -256,7 +265,7 @@ static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, } static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, - struct nat_entry *ne) + struct nat_entry *ne, bool init_dirty) { struct nat_entry_set *head; bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; @@ -279,7 +288,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, goto refresh_list; nm_i->nat_cnt[DIRTY_NAT]++; - nm_i->nat_cnt[RECLAIMABLE_NAT]--; + if (!init_dirty) + nm_i->nat_cnt[RECLAIMABLE_NAT]--; set_nat_flag(ne, IS_DIRTY, true); refresh_list: spin_lock(&nm_i->nat_list_lock); @@ -310,10 +320,9 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, start, nr); } -bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, const struct folio *folio) +bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio) { - return NODE_MAPPING(sbi) == folio->mapping && - IS_DNODE(&folio->page) && is_cold_node(&folio->page); + return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio); } void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) @@ -325,7 +334,7 @@ void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) } static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, - struct page *page) + struct folio *folio) { struct fsync_node_entry *fn; unsigned long flags; @@ -334,8 +343,8 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS, true, NULL); - get_page(page); - fn->page = page; + folio_get(folio); + fn->folio = folio; INIT_LIST_HEAD(&fn->list); spin_lock_irqsave(&sbi->fsync_node_lock, flags); @@ -348,19 +357,19 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, return seq_id; } -void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) +void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio) { struct fsync_node_entry *fn; unsigned long flags; spin_lock_irqsave(&sbi->fsync_node_lock, flags); list_for_each_entry(fn, &sbi->fsync_node_list, list) { - if (fn->page == page) { + if (fn->folio == folio) { list_del(&fn->list); sbi->fsync_node_num--; spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); kmem_cache_free(fsync_node_entry_slab, fn); - put_page(page); + folio_put(folio); return; } } @@ -384,7 +393,7 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) bool need = false; f2fs_down_read(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, nid); + e = __lookup_nat_cache(nm_i, nid, false); if (e) { if (!get_nat_flag(e, IS_CHECKPOINTED) && !get_nat_flag(e, HAS_FSYNCED_INODE)) @@ -401,7 +410,7 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) bool is_cp = true; f2fs_down_read(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, nid); + e = __lookup_nat_cache(nm_i, nid, false); if (e && !get_nat_flag(e, IS_CHECKPOINTED)) is_cp = false; f2fs_up_read(&nm_i->nat_tree_lock); @@ -415,7 +424,7 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) bool need_update = true; f2fs_down_read(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, ino); + e = __lookup_nat_cache(nm_i, ino, false); if (e && get_nat_flag(e, HAS_LAST_FSYNC) && (get_nat_flag(e, IS_CHECKPOINTED) || get_nat_flag(e, HAS_FSYNCED_INODE))) @@ -440,9 +449,9 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, return; f2fs_down_write(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, nid); + e = __lookup_nat_cache(nm_i, nid, false); if (!e) - e = __init_nat_entry(nm_i, new, ne, false); + e = __init_nat_entry(nm_i, new, ne, false, false); else f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != @@ -459,11 +468,13 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); + bool init_dirty = false; f2fs_down_write(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, ni->nid); + e = __lookup_nat_cache(nm_i, ni->nid, true); if (!e) { - e = __init_nat_entry(nm_i, new, NULL, true); + init_dirty = true; + e = __init_nat_entry(nm_i, new, NULL, true, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { @@ -499,11 +510,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, nat_set_blkaddr(e, new_blkaddr); if (!__is_valid_data_blkaddr(new_blkaddr)) set_nat_flag(e, IS_CHECKPOINTED, false); - __set_nat_cache_dirty(nm_i, e); + __set_nat_cache_dirty(nm_i, e, init_dirty); /* update fsync_mark if its inode nat entry is still alive */ if (ni->nid != ni->ino) - e = __lookup_nat_cache(nm_i, ni->ino); + e = __lookup_nat_cache(nm_i, ni->ino, false); if (e) { if (fsync_done && ni->nid == ni->ino) set_nat_flag(e, HAS_FSYNCED_INODE, true); @@ -551,24 +562,28 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_journal *journal = curseg->journal; nid_t start_nid = START_NID(nid); struct f2fs_nat_block *nat_blk; - struct page *page = NULL; + struct folio *folio = NULL; struct f2fs_nat_entry ne; struct nat_entry *e; pgoff_t index; - block_t blkaddr; int i; + bool need_cache = true; ni->flag = 0; ni->nid = nid; retry: /* Check nat cache */ f2fs_down_read(&nm_i->nat_tree_lock); - e = __lookup_nat_cache(nm_i, nid); + e = __lookup_nat_cache(nm_i, nid, false); if (e) { ni->ino = nat_get_ino(e); ni->blk_addr = nat_get_blkaddr(e); ni->version = nat_get_version(e); f2fs_up_read(&nm_i->nat_tree_lock); + if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) { + need_cache = false; + goto sanity_check; + } return 0; } @@ -594,38 +609,47 @@ retry: up_read(&curseg->journal_rwsem); if (i >= 0) { f2fs_up_read(&nm_i->nat_tree_lock); - goto cache; + goto sanity_check; } /* Fill node_info from nat page */ index = current_nat_addr(sbi, nid); f2fs_up_read(&nm_i->nat_tree_lock); - page = f2fs_get_meta_page(sbi, index); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = f2fs_get_meta_folio(sbi, index); + if (IS_ERR(folio)) + return PTR_ERR(folio); - nat_blk = (struct f2fs_nat_block *)page_address(page); + nat_blk = folio_address(folio); ne = nat_blk->entries[nid - start_nid]; node_info_from_raw_nat(ni, &ne); - f2fs_put_page(page, 1); -cache: - blkaddr = le32_to_cpu(ne.block_addr); - if (__is_valid_data_blkaddr(blkaddr) && - !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) - return -EFAULT; + f2fs_folio_put(folio, true); +sanity_check: + if (__is_valid_data_blkaddr(ni->blk_addr) && + !f2fs_is_valid_blkaddr(sbi, ni->blk_addr, + DATA_GENERIC_ENHANCE)) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_err_ratelimited(sbi, + "f2fs_get_node_info of %pS: inconsistent nat entry, " + "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u", + __builtin_return_address(0), + ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag); + f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); + return -EFSCORRUPTED; + } /* cache nat entry */ - cache_nat_entry(sbi, nid, &ne); + if (need_cache) + cache_nat_entry(sbi, nid, &ne); return 0; } /* * readahead MAX_RA_NODE number of node pages. */ -static void f2fs_ra_node_pages(struct page *parent, int start, int n) +static void f2fs_ra_node_pages(struct folio *parent, int start, int n) { - struct f2fs_sb_info *sbi = F2FS_P_SB(parent); + struct f2fs_sb_info *sbi = F2FS_F_SB(parent); struct blk_plug plug; int i, end; nid_t nid; @@ -754,6 +778,8 @@ got: return level; } +static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start); + /* * Caller should call f2fs_put_dnode(dn). * Also, it should grab and release a rwsem by calling f2fs_lock_op() and @@ -762,8 +788,8 @@ got: int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); - struct page *npage[4]; - struct page *parent = NULL; + struct folio *nfolio[4]; + struct folio *parent = NULL; int offset[4]; unsigned int noffset[4]; nid_t nids[4]; @@ -775,31 +801,42 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) return level; nids[0] = dn->inode->i_ino; - npage[0] = dn->inode_page; - if (!npage[0]) { - npage[0] = f2fs_get_inode_page(sbi, nids[0]); - if (IS_ERR(npage[0])) - return PTR_ERR(npage[0]); + if (!dn->inode_folio) { + nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]); + if (IS_ERR(nfolio[0])) + return PTR_ERR(nfolio[0]); + } else { + nfolio[0] = dn->inode_folio; } /* if inline_data is set, should not report any block indices */ if (f2fs_has_inline_data(dn->inode) && index) { err = -ENOENT; - f2fs_put_page(npage[0], 1); + f2fs_folio_put(nfolio[0], true); goto release_out; } - parent = npage[0]; + parent = nfolio[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); - dn->inode_page = npage[0]; - dn->inode_page_locked = true; + dn->inode_folio = nfolio[0]; + dn->inode_folio_locked = true; /* get indirect or direct nodes */ for (i = 1; i <= level; i++) { bool done = false; + if (nids[i] && nids[i] == dn->inode->i_ino) { + err = -EFSCORRUPTED; + f2fs_err_ratelimited(sbi, + "inode mapping table is corrupted, run fsck to fix it, " + "ino:%lu, nid:%u, level:%d, offset:%d", + dn->inode->i_ino, nids[i], level, offset[level]); + set_sbi_flag(sbi, SBI_NEED_FSCK); + goto release_pages; + } + if (!nids[i] && mode == ALLOC_NODE) { /* alloc new node */ if (!f2fs_alloc_nid(sbi, &(nids[i]))) { @@ -808,10 +845,10 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) } dn->nid = nids[i]; - npage[i] = f2fs_new_node_page(dn, noffset[i]); - if (IS_ERR(npage[i])) { + nfolio[i] = f2fs_new_node_folio(dn, noffset[i]); + if (IS_ERR(nfolio[i])) { f2fs_alloc_nid_failed(sbi, nids[i]); - err = PTR_ERR(npage[i]); + err = PTR_ERR(nfolio[i]); goto release_pages; } @@ -819,36 +856,36 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) f2fs_alloc_nid_done(sbi, nids[i]); done = true; } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { - npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); - if (IS_ERR(npage[i])) { - err = PTR_ERR(npage[i]); + nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]); + if (IS_ERR(nfolio[i])) { + err = PTR_ERR(nfolio[i]); goto release_pages; } done = true; } if (i == 1) { - dn->inode_page_locked = false; - unlock_page(parent); + dn->inode_folio_locked = false; + folio_unlock(parent); } else { - f2fs_put_page(parent, 1); + f2fs_folio_put(parent, true); } if (!done) { - npage[i] = f2fs_get_node_page(sbi, nids[i]); - if (IS_ERR(npage[i])) { - err = PTR_ERR(npage[i]); - f2fs_put_page(npage[0], 0); + nfolio[i] = f2fs_get_node_folio(sbi, nids[i]); + if (IS_ERR(nfolio[i])) { + err = PTR_ERR(nfolio[i]); + f2fs_folio_put(nfolio[0], false); goto release_out; } } if (i < level) { - parent = npage[i]; + parent = nfolio[i]; nids[i + 1] = get_nid(parent, offset[i], false); } } dn->nid = nids[level]; dn->ofs_in_node = offset[level]; - dn->node_page = npage[level]; + dn->node_folio = nfolio[level]; dn->data_blkaddr = f2fs_data_blkaddr(dn); if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && @@ -869,9 +906,9 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) if (!c_len) goto out; - blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node); + blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node); if (blkaddr == COMPRESS_ADDR) - blkaddr = data_blkaddr(dn->inode, dn->node_page, + blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node + 1); f2fs_update_read_extent_tree_range_compressed(dn->inode, @@ -881,12 +918,12 @@ out: return 0; release_pages: - f2fs_put_page(parent, 1); + f2fs_folio_put(parent, true); if (i > 1) - f2fs_put_page(npage[0], 0); + f2fs_folio_put(nfolio[0], false); release_out: - dn->inode_page = NULL; - dn->node_page = NULL; + dn->inode_folio = NULL; + dn->node_folio = NULL; if (err == -ENOENT) { dn->cur_level = i; dn->max_level = level; @@ -927,16 +964,16 @@ static int truncate_node(struct dnode_of_data *dn) f2fs_inode_synced(dn->inode); } - clear_node_page_dirty(dn->node_page); + clear_node_folio_dirty(dn->node_folio); set_sbi_flag(sbi, SBI_IS_DIRTY); - index = page_folio(dn->node_page)->index; - f2fs_put_page(dn->node_page, 1); + index = dn->node_folio->index; + f2fs_folio_put(dn->node_folio, true); invalidate_mapping_pages(NODE_MAPPING(sbi), index, index); - dn->node_page = NULL; + dn->node_folio = NULL; trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); return 0; @@ -945,35 +982,35 @@ static int truncate_node(struct dnode_of_data *dn) static int truncate_dnode(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); - struct page *page; + struct folio *folio; int err; if (dn->nid == 0) return 1; /* get direct node */ - page = f2fs_get_node_page(sbi, dn->nid); - if (PTR_ERR(page) == -ENOENT) + folio = f2fs_get_node_folio(sbi, dn->nid); + if (PTR_ERR(folio) == -ENOENT) return 1; - else if (IS_ERR(page)) - return PTR_ERR(page); + else if (IS_ERR(folio)) + return PTR_ERR(folio); - if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) { + if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) { f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u", - dn->inode->i_ino, dn->nid, ino_of_node(page)); + dn->inode->i_ino, dn->nid, ino_of_node(folio)); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return -EFSCORRUPTED; } /* Make dnode_of_data for parameter */ - dn->node_page = page; + dn->node_folio = folio; dn->ofs_in_node = 0; f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); err = truncate_node(dn); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return err; } @@ -984,7 +1021,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth) { struct dnode_of_data rdn = *dn; - struct page *page; + struct folio *folio; struct f2fs_node *rn; nid_t child_nid; unsigned int child_nofs; @@ -996,15 +1033,15 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); - page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); - if (IS_ERR(page)) { - trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); - return PTR_ERR(page); + folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid); + if (IS_ERR(folio)) { + trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio)); + return PTR_ERR(folio); } - f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); + f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK); - rn = F2FS_NODE(page); + rn = F2FS_NODE(folio); if (depth < 3) { for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { child_nid = le32_to_cpu(rn->in.nid[i]); @@ -1014,7 +1051,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, ret = truncate_dnode(&rdn); if (ret < 0) goto out_err; - if (set_nid(page, i, 0, false)) + if (set_nid(folio, i, 0, false)) dn->node_changed = true; } } else { @@ -1028,7 +1065,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, rdn.nid = child_nid; ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); if (ret == (NIDS_PER_BLOCK + 1)) { - if (set_nid(page, i, 0, false)) + if (set_nid(folio, i, 0, false)) dn->node_changed = true; child_nofs += ret; } else if (ret < 0 && ret != -ENOENT) { @@ -1040,19 +1077,19 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, if (!ofs) { /* remove current indirect node */ - dn->node_page = page; + dn->node_folio = folio; ret = truncate_node(dn); if (ret) goto out_err; freed++; } else { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } trace_f2fs_truncate_nodes_exit(dn->inode, freed); return freed; out_err: - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); trace_f2fs_truncate_nodes_exit(dn->inode, ret); return ret; } @@ -1060,59 +1097,59 @@ out_err: static int truncate_partial_nodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset, int depth) { - struct page *pages[2]; + struct folio *folios[2]; nid_t nid[3]; nid_t child_nid; int err = 0; int i; int idx = depth - 2; - nid[0] = get_nid(dn->inode_page, offset[0], true); + nid[0] = get_nid(dn->inode_folio, offset[0], true); if (!nid[0]) return 0; /* get indirect nodes in the path */ for (i = 0; i < idx + 1; i++) { /* reference count'll be increased */ - pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); - if (IS_ERR(pages[i])) { - err = PTR_ERR(pages[i]); + folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i]); + if (IS_ERR(folios[i])) { + err = PTR_ERR(folios[i]); idx = i - 1; goto fail; } - nid[i + 1] = get_nid(pages[i], offset[i + 1], false); + nid[i + 1] = get_nid(folios[i], offset[i + 1], false); } - f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); + f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK); /* free direct nodes linked to a partial indirect node */ for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { - child_nid = get_nid(pages[idx], i, false); + child_nid = get_nid(folios[idx], i, false); if (!child_nid) continue; dn->nid = child_nid; err = truncate_dnode(dn); if (err < 0) goto fail; - if (set_nid(pages[idx], i, 0, false)) + if (set_nid(folios[idx], i, 0, false)) dn->node_changed = true; } if (offset[idx + 1] == 0) { - dn->node_page = pages[idx]; + dn->node_folio = folios[idx]; dn->nid = nid[idx]; err = truncate_node(dn); if (err) goto fail; } else { - f2fs_put_page(pages[idx], 1); + f2fs_folio_put(folios[idx], true); } offset[idx]++; offset[idx + 1] = 0; idx--; fail: for (i = idx; i >= 0; i--) - f2fs_put_page(pages[i], 1); + f2fs_folio_put(folios[i], true); trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); @@ -1153,10 +1190,10 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) return PTR_ERR(folio); } - set_new_dnode(&dn, inode, &folio->page, NULL, 0); + set_new_dnode(&dn, inode, folio, NULL, 0); folio_unlock(folio); - ri = F2FS_INODE(&folio->page); + ri = F2FS_INODE(folio); switch (level) { case 0: case 1: @@ -1185,7 +1222,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) skip_partial: while (cont) { - dn.nid = get_nid(&folio->page, offset[0], true); + dn.nid = get_nid(folio, offset[0], true); switch (offset[0]) { case NODE_DIR1_BLOCK: case NODE_DIR2_BLOCK: @@ -1217,10 +1254,10 @@ skip_partial: } if (err < 0) goto fail; - if (offset[1] == 0 && get_nid(&folio->page, offset[0], true)) { + if (offset[1] == 0 && get_nid(folio, offset[0], true)) { folio_lock(folio); - BUG_ON(folio->mapping != NODE_MAPPING(sbi)); - set_nid(&folio->page, offset[0], 0, true); + BUG_ON(!is_node_folio(folio)); + set_nid(folio, offset[0], 0, true); folio_unlock(folio); } offset[1] = 0; @@ -1239,20 +1276,20 @@ int f2fs_truncate_xattr_node(struct inode *inode) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t nid = F2FS_I(inode)->i_xattr_nid; struct dnode_of_data dn; - struct page *npage; + struct folio *nfolio; int err; if (!nid) return 0; - npage = f2fs_get_xnode_page(sbi, nid); - if (IS_ERR(npage)) - return PTR_ERR(npage); + nfolio = f2fs_get_xnode_folio(sbi, nid); + if (IS_ERR(nfolio)) + return PTR_ERR(nfolio); - set_new_dnode(&dn, inode, NULL, npage, nid); + set_new_dnode(&dn, inode, NULL, nfolio, nid); err = truncate_node(&dn); if (err) { - f2fs_put_page(npage, 1); + f2fs_folio_put(nfolio, true); return err; } @@ -1309,30 +1346,30 @@ int f2fs_remove_inode_page(struct inode *inode) return 0; } -struct page *f2fs_new_inode_page(struct inode *inode) +struct folio *f2fs_new_inode_folio(struct inode *inode) { struct dnode_of_data dn; /* allocate inode page for new inode */ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); - /* caller should f2fs_put_page(page, 1); */ - return f2fs_new_node_page(&dn, 0); + /* caller should f2fs_folio_put(folio, true); */ + return f2fs_new_node_folio(&dn, 0); } -struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) +struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct node_info new_ni; - struct page *page; + struct folio *folio; int err; if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return ERR_PTR(-EPERM); - page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); - if (!page) - return ERR_PTR(-ENOMEM); + folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false); + if (IS_ERR(folio)) + return folio; if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) goto fail; @@ -1348,7 +1385,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) dec_valid_node_count(sbi, dn->inode, !ofs); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn_ratelimited(sbi, - "f2fs_new_node_page: inconsistent nat entry, " + "f2fs_new_node_folio: inconsistent nat entry, " "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u", new_ni.ino, new_ni.nid, new_ni.blk_addr, new_ni.version, new_ni.flag); @@ -1363,12 +1400,12 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) new_ni.version = 0; set_node_addr(sbi, &new_ni, NEW_ADDR, false); - f2fs_wait_on_page_writeback(page, NODE, true, true); - fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); - set_cold_node(page, S_ISDIR(dn->inode->i_mode)); - if (!PageUptodate(page)) - SetPageUptodate(page); - if (set_page_dirty(page)) + f2fs_folio_wait_writeback(folio, NODE, true, true); + fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true); + set_cold_node(folio, S_ISDIR(dn->inode->i_mode)); + if (!folio_test_uptodate(folio)) + folio_mark_uptodate(folio); + if (folio_mark_dirty(folio)) dn->node_changed = true; if (f2fs_has_xattr_block(ofs)) @@ -1376,35 +1413,34 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) if (ofs == 0) inc_valid_inode_count(sbi); - return page; + return folio; fail: - clear_node_page_dirty(page); - f2fs_put_page(page, 1); + clear_node_folio_dirty(folio); + f2fs_folio_put(folio, true); return ERR_PTR(err); } /* * Caller should do after getting the following values. - * 0: f2fs_put_page(page, 0) - * LOCKED_PAGE or error: f2fs_put_page(page, 1) + * 0: f2fs_folio_put(folio, false) + * LOCKED_PAGE or error: f2fs_folio_put(folio, true) */ -static int read_node_page(struct page *page, blk_opf_t op_flags) +static int read_node_folio(struct folio *folio, blk_opf_t op_flags) { - struct folio *folio = page_folio(page); - struct f2fs_sb_info *sbi = F2FS_P_SB(page); + struct f2fs_sb_info *sbi = F2FS_F_SB(folio); struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, .type = NODE, .op = REQ_OP_READ, .op_flags = op_flags, - .page = page, + .folio = folio, .encrypted_page = NULL, }; int err; if (folio_test_uptodate(folio)) { - if (!f2fs_inode_chksum_verify(sbi, page)) { + if (!f2fs_inode_chksum_verify(sbi, folio)) { folio_clear_uptodate(folio); return -EFSBADCRC; } @@ -1436,7 +1472,7 @@ static int read_node_page(struct page *page, blk_opf_t op_flags) */ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) { - struct page *apage; + struct folio *afolio; int err; if (!nid) @@ -1444,32 +1480,32 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) if (f2fs_check_nid_range(sbi, nid)) return; - apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); - if (apage) + afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); + if (afolio) return; - apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); - if (!apage) + afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); + if (IS_ERR(afolio)) return; - err = read_node_page(apage, REQ_RAHEAD); - f2fs_put_page(apage, err ? 1 : 0); + err = read_node_folio(afolio, REQ_RAHEAD); + f2fs_folio_put(afolio, err ? true : false); } static int sanity_check_node_footer(struct f2fs_sb_info *sbi, - struct page *page, pgoff_t nid, + struct folio *folio, pgoff_t nid, enum node_type ntype) { - if (unlikely(nid != nid_of_node(page) || - (ntype == NODE_TYPE_INODE && !IS_INODE(page)) || + if (unlikely(nid != nid_of_node(folio) || + (ntype == NODE_TYPE_INODE && !IS_INODE(folio)) || (ntype == NODE_TYPE_XATTR && - !f2fs_has_xattr_block(ofs_of_node(page))) || + !f2fs_has_xattr_block(ofs_of_node(folio))) || time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) { f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, " "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", - ntype, nid, nid_of_node(page), ino_of_node(page), - ofs_of_node(page), cpver_of_node(page), - next_blkaddr_of_node(page)); + ntype, nid, nid_of_node(folio), ino_of_node(folio), + ofs_of_node(folio), cpver_of_node(folio), + next_blkaddr_of_node(folio)); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); return -EFSCORRUPTED; @@ -1478,8 +1514,7 @@ static int sanity_check_node_footer(struct f2fs_sb_info *sbi, } static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid, - struct page *parent, int start, - enum node_type ntype) + struct folio *parent, int start, enum node_type ntype) { struct folio *folio; int err; @@ -1493,20 +1528,18 @@ repeat: if (IS_ERR(folio)) return folio; - err = read_node_page(&folio->page, 0); - if (err < 0) { + err = read_node_folio(folio, 0); + if (err < 0) goto out_put_err; - } else if (err == LOCKED_PAGE) { - err = 0; + if (err == LOCKED_PAGE) goto page_hit; - } if (parent) f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); folio_lock(folio); - if (unlikely(folio->mapping != NODE_MAPPING(sbi))) { + if (unlikely(!is_node_folio(folio))) { f2fs_folio_put(folio, true); goto repeat; } @@ -1516,30 +1549,27 @@ repeat: goto out_err; } - if (!f2fs_inode_chksum_verify(sbi, &folio->page)) { + if (!f2fs_inode_chksum_verify(sbi, folio)) { err = -EFSBADCRC; goto out_err; } page_hit: - err = sanity_check_node_footer(sbi, &folio->page, nid, ntype); + err = sanity_check_node_footer(sbi, folio, nid, ntype); if (!err) return folio; out_err: folio_clear_uptodate(folio); out_put_err: - /* ENOENT comes from read_node_page which is not an error. */ + /* ENOENT comes from read_node_folio which is not an error. */ if (err != -ENOENT) f2fs_handle_page_eio(sbi, folio, NODE); f2fs_folio_put(folio, true); return ERR_PTR(err); } -struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) +struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid) { - struct folio *folio = __get_node_folio(sbi, nid, NULL, 0, - NODE_TYPE_REGULAR); - - return &folio->page; + return __get_node_folio(sbi, nid, NULL, 0, NODE_TYPE_REGULAR); } struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino) @@ -1547,35 +1577,23 @@ struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino) return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE); } -struct page *f2fs_get_inode_page(struct f2fs_sb_info *sbi, pgoff_t ino) +struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid) { - struct folio *folio = f2fs_get_inode_folio(sbi, ino); - - return &folio->page; + return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR); } -struct page *f2fs_get_xnode_page(struct f2fs_sb_info *sbi, pgoff_t xnid) +static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start) { - struct folio *folio = __get_node_folio(sbi, xnid, NULL, 0, - NODE_TYPE_XATTR); - - return &folio->page; -} - -struct page *f2fs_get_node_page_ra(struct page *parent, int start) -{ - struct f2fs_sb_info *sbi = F2FS_P_SB(parent); + struct f2fs_sb_info *sbi = F2FS_F_SB(parent); nid_t nid = get_nid(parent, start, false); - struct folio *folio = __get_node_folio(sbi, nid, parent, start, - NODE_TYPE_REGULAR); - return &folio->page; + return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR); } static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode; - struct page *page; + struct folio *folio; int ret; /* should flush inline_data before evict_inode */ @@ -1583,27 +1601,27 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) if (!inode) return; - page = f2fs_pagecache_get_page(inode->i_mapping, 0, + folio = f2fs_filemap_get_folio(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); - if (!page) + if (IS_ERR(folio)) goto iput_out; - if (!PageUptodate(page)) - goto page_out; + if (!folio_test_uptodate(folio)) + goto folio_out; - if (!PageDirty(page)) - goto page_out; + if (!folio_test_dirty(folio)) + goto folio_out; - if (!clear_page_dirty_for_io(page)) - goto page_out; + if (!folio_clear_dirty_for_io(folio)) + goto folio_out; - ret = f2fs_write_inline_data(inode, page_folio(page)); + ret = f2fs_write_inline_data(inode, folio); inode_dec_dirty_pages(inode); f2fs_remove_dirty_inode(inode); if (ret) - set_page_dirty(page); -page_out: - f2fs_put_page(page, 1); + folio_mark_dirty(folio); +folio_out: + f2fs_folio_put(folio, true); iput_out: iput(inode); } @@ -1632,19 +1650,19 @@ static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) return ERR_PTR(-EIO); } - if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page)) + if (!IS_DNODE(folio) || !is_cold_node(folio)) continue; - if (ino_of_node(&folio->page) != ino) + if (ino_of_node(folio) != ino) continue; folio_lock(folio); - if (unlikely(folio->mapping != NODE_MAPPING(sbi))) { + if (unlikely(!is_node_folio(folio))) { continue_unlock: folio_unlock(folio); continue; } - if (ino_of_node(&folio->page) != ino) + if (ino_of_node(folio) != ino) goto continue_unlock; if (!folio_test_dirty(folio)) { @@ -1665,21 +1683,20 @@ continue_unlock: return last_folio; } -static int __write_node_page(struct page *page, bool atomic, bool *submitted, +static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted, struct writeback_control *wbc, bool do_balance, enum iostat_type io_type, unsigned int *seq_id) { - struct f2fs_sb_info *sbi = F2FS_P_SB(page); - struct folio *folio = page_folio(page); + struct f2fs_sb_info *sbi = F2FS_F_SB(folio); nid_t nid; struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, - .ino = ino_of_node(page), + .ino = ino_of_node(folio), .type = NODE, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), - .page = page, + .folio = folio, .encrypted_page = NULL, .submitted = 0, .io_type = io_type, @@ -1696,7 +1713,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, folio_clear_uptodate(folio); dec_page_count(sbi, F2FS_DIRTY_NODES); folio_unlock(folio); - return 0; + return true; } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) @@ -1704,22 +1721,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && wbc->sync_mode == WB_SYNC_NONE && - IS_DNODE(page) && is_cold_node(page)) + IS_DNODE(folio) && is_cold_node(folio)) goto redirty_out; /* get old block addr of this node page */ - nid = nid_of_node(page); + nid = nid_of_node(folio); f2fs_bug_on(sbi, folio->index != nid); if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) goto redirty_out; - if (wbc->for_reclaim) { - if (!f2fs_down_read_trylock(&sbi->node_write)) - goto redirty_out; - } else { - f2fs_down_read(&sbi->node_write); - } + f2fs_down_read(&sbi->node_write); /* This page is already truncated */ if (unlikely(ni.blk_addr == NULL_ADDR)) { @@ -1727,7 +1739,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, dec_page_count(sbi, F2FS_DIRTY_NODES); f2fs_up_read(&sbi->node_write); folio_unlock(folio); - return 0; + return true; } if (__is_valid_data_blkaddr(ni.blk_addr) && @@ -1742,7 +1754,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, /* should add to global list before clearing PAGECACHE status */ if (f2fs_in_warm_node_list(sbi, folio)) { - seq = f2fs_add_fsync_node_entry(sbi, page); + seq = f2fs_add_fsync_node_entry(sbi, folio); if (seq_id) *seq_id = seq; } @@ -1751,15 +1763,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, fio.old_blkaddr = ni.blk_addr; f2fs_do_write_node_page(nid, &fio); - set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); + set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio)); dec_page_count(sbi, F2FS_DIRTY_NODES); f2fs_up_read(&sbi->node_write); - if (wbc->for_reclaim) { - f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); - submitted = NULL; - } - folio_unlock(folio); if (unlikely(f2fs_cp_error(sbi))) { @@ -1771,14 +1778,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, if (do_balance) f2fs_balance_fs(sbi, false); - return 0; + return true; redirty_out: folio_redirty_for_writepage(wbc, folio); - return AOP_WRITEPAGE_ACTIVATE; + folio_unlock(folio); + return false; } -int f2fs_move_node_page(struct page *node_page, int gc_type) +int f2fs_move_node_folio(struct folio *node_folio, int gc_type) { int err = 0; @@ -1786,33 +1794,30 @@ int f2fs_move_node_page(struct page *node_page, int gc_type) struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, - .for_reclaim = 0, }; - f2fs_wait_on_page_writeback(node_page, NODE, true, true); + f2fs_folio_wait_writeback(node_folio, NODE, true, true); - set_page_dirty(node_page); + folio_mark_dirty(node_folio); - if (!clear_page_dirty_for_io(node_page)) { + if (!folio_clear_dirty_for_io(node_folio)) { err = -EAGAIN; goto out_page; } - if (__write_node_page(node_page, false, NULL, - &wbc, false, FS_GC_NODE_IO, NULL)) { + if (!__write_node_folio(node_folio, false, NULL, + &wbc, false, FS_GC_NODE_IO, NULL)) err = -EAGAIN; - unlock_page(node_page); - } goto release_page; } else { /* set page dirty and write it */ - if (!folio_test_writeback(page_folio(node_page))) - set_page_dirty(node_page); + if (!folio_test_writeback(node_folio)) + folio_mark_dirty(node_folio); } out_page: - unlock_page(node_page); + folio_unlock(node_folio); release_page: - f2fs_put_page(node_page, 0); + f2fs_folio_put(node_folio, false); return err; } @@ -1854,19 +1859,19 @@ retry: goto out; } - if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page)) + if (!IS_DNODE(folio) || !is_cold_node(folio)) continue; - if (ino_of_node(&folio->page) != ino) + if (ino_of_node(folio) != ino) continue; folio_lock(folio); - if (unlikely(folio->mapping != NODE_MAPPING(sbi))) { + if (unlikely(!is_node_folio(folio))) { continue_unlock: folio_unlock(folio); continue; } - if (ino_of_node(&folio->page) != ino) + if (ino_of_node(folio) != ino) goto continue_unlock; if (!folio_test_dirty(folio) && folio != last_folio) { @@ -1876,17 +1881,17 @@ continue_unlock: f2fs_folio_wait_writeback(folio, NODE, true, true); - set_fsync_mark(&folio->page, 0); - set_dentry_mark(&folio->page, 0); + set_fsync_mark(folio, 0); + set_dentry_mark(folio, 0); if (!atomic || folio == last_folio) { - set_fsync_mark(&folio->page, 1); + set_fsync_mark(folio, 1); percpu_counter_inc(&sbi->rf_node_block_count); - if (IS_INODE(&folio->page)) { + if (IS_INODE(folio)) { if (is_inode_flag_set(inode, FI_DIRTY_INODE)) - f2fs_update_inode(inode, &folio->page); - set_dentry_mark(&folio->page, + f2fs_update_inode(inode, folio); + set_dentry_mark(folio, f2fs_need_dentry_mark(sbi, ino)); } /* may be written by other thread */ @@ -1897,31 +1902,29 @@ continue_unlock: if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - ret = __write_node_page(&folio->page, atomic && + if (!__write_node_folio(folio, atomic && folio == last_folio, &submitted, wbc, true, - FS_NODE_IO, seq_id); - if (ret) { - folio_unlock(folio); + FS_NODE_IO, seq_id)) { f2fs_folio_put(last_folio, false); - break; - } else if (submitted) { - nwritten++; + folio_batch_release(&fbatch); + ret = -EIO; + goto out; } + if (submitted) + nwritten++; if (folio == last_folio) { f2fs_folio_put(folio, false); + folio_batch_release(&fbatch); marked = true; - break; + goto out; } } folio_batch_release(&fbatch); cond_resched(); - - if (ret || marked) - break; } - if (!ret && atomic && !marked) { + if (atomic && !marked) { f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", ino, last_folio->index); folio_lock(last_folio); @@ -1933,7 +1936,7 @@ continue_unlock: out: if (nwritten) f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); - return ret ? -EIO : 0; + return ret; } static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) @@ -1964,13 +1967,13 @@ static bool flush_dirty_inode(struct folio *folio) { struct f2fs_sb_info *sbi = F2FS_F_SB(folio); struct inode *inode; - nid_t ino = ino_of_node(&folio->page); + nid_t ino = ino_of_node(folio); inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); if (!inode) return false; - f2fs_update_inode(inode, &folio->page); + f2fs_update_inode(inode, folio); folio_unlock(folio); iput(inode); @@ -1993,21 +1996,21 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; - if (!IS_INODE(&folio->page)) + if (!IS_INODE(folio)) continue; folio_lock(folio); - if (unlikely(folio->mapping != NODE_MAPPING(sbi))) + if (unlikely(!is_node_folio(folio))) goto unlock; if (!folio_test_dirty(folio)) goto unlock; /* flush inline_data, if it's async context. */ - if (page_private_inline(&folio->page)) { - clear_page_private_inline(&folio->page); + if (folio_test_f2fs_inline(folio)) { + folio_clear_f2fs_inline(folio); folio_unlock(folio); - flush_inline_data(sbi, ino_of_node(&folio->page)); + flush_inline_data(sbi, ino_of_node(folio)); continue; } unlock: @@ -2056,13 +2059,13 @@ next_step: * 1. dentry dnodes * 2. file dnodes */ - if (step == 0 && IS_DNODE(&folio->page)) + if (step == 0 && IS_DNODE(folio)) continue; - if (step == 1 && (!IS_DNODE(&folio->page) || - is_cold_node(&folio->page))) + if (step == 1 && (!IS_DNODE(folio) || + is_cold_node(folio))) continue; - if (step == 2 && (!IS_DNODE(&folio->page) || - !is_cold_node(&folio->page))) + if (step == 2 && (!IS_DNODE(folio) || + !is_cold_node(folio))) continue; lock_node: if (wbc->sync_mode == WB_SYNC_ALL) @@ -2070,7 +2073,7 @@ lock_node: else if (!folio_trylock(folio)) continue; - if (unlikely(folio->mapping != NODE_MAPPING(sbi))) { + if (unlikely(!is_node_folio(folio))) { continue_unlock: folio_unlock(folio); continue; @@ -2086,15 +2089,15 @@ continue_unlock: goto write_node; /* flush inline_data */ - if (page_private_inline(&folio->page)) { - clear_page_private_inline(&folio->page); + if (folio_test_f2fs_inline(folio)) { + folio_clear_f2fs_inline(folio); folio_unlock(folio); - flush_inline_data(sbi, ino_of_node(&folio->page)); + flush_inline_data(sbi, ino_of_node(folio)); goto lock_node; } /* flush dirty inode */ - if (IS_INODE(&folio->page) && flush_dirty_inode(folio)) + if (IS_INODE(folio) && flush_dirty_inode(folio)) goto lock_node; write_node: f2fs_folio_wait_writeback(folio, NODE, true, true); @@ -2102,14 +2105,16 @@ write_node: if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - set_fsync_mark(&folio->page, 0); - set_dentry_mark(&folio->page, 0); + set_fsync_mark(folio, 0); + set_dentry_mark(folio, 0); - ret = __write_node_page(&folio->page, false, &submitted, - wbc, do_balance, io_type, NULL); - if (ret) - folio_unlock(folio); - else if (submitted) + if (!__write_node_folio(folio, false, &submitted, + wbc, do_balance, io_type, NULL)) { + folio_batch_release(&fbatch); + ret = -EIO; + goto out; + } + if (submitted) nwritten++; if (--wbc->nr_to_write == 0) @@ -2144,12 +2149,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, unsigned int seq_id) { struct fsync_node_entry *fn; - struct page *page; struct list_head *head = &sbi->fsync_node_list; unsigned long flags; unsigned int cur_seq_id = 0; while (seq_id && cur_seq_id < seq_id) { + struct folio *folio; + spin_lock_irqsave(&sbi->fsync_node_lock, flags); if (list_empty(head)) { spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); @@ -2161,13 +2167,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, break; } cur_seq_id = fn->seq_id; - page = fn->page; - get_page(page); + folio = fn->folio; + folio_get(folio); spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); - f2fs_wait_on_page_writeback(page, NODE, true, false); + f2fs_folio_wait_writeback(folio, NODE, true, false); - put_page(page); + folio_put(folio); } return filemap_check_errors(NODE_MAPPING(sbi)); @@ -2227,12 +2233,12 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping, if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); #ifdef CONFIG_F2FS_CHECK_FS - if (IS_INODE(&folio->page)) - f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); + if (IS_INODE(folio)) + f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio); #endif if (filemap_dirty_folio(mapping, folio)) { inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); - set_page_private_reference(&folio->page); + folio_set_f2fs_reference(folio); return true; } return false; @@ -2334,7 +2340,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *e; struct nat_entry *ne; - int err = -EINVAL; + int err; bool ret = false; /* 0 nid should not be used */ @@ -2348,7 +2354,10 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, i->nid = nid; i->state = FREE_NID; - radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); + err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); + f2fs_bug_on(sbi, err); + + err = -EINVAL; spin_lock(&nm_i->nid_list_lock); @@ -2367,14 +2376,14 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, * - __lookup_nat_cache * - f2fs_add_link * - f2fs_init_inode_metadata - * - f2fs_new_inode_page - * - f2fs_new_node_page + * - f2fs_new_inode_folio + * - f2fs_new_node_folio * - set_node_addr * - f2fs_alloc_nid_done * - __remove_nid_from_list(PREALLOC_NID) * - __insert_nid_to_list(FREE_NID) */ - ne = __lookup_nat_cache(nm_i, nid); + ne = __lookup_nat_cache(nm_i, nid, false); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) goto err_out; @@ -2421,10 +2430,9 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) } static int scan_nat_page(struct f2fs_sb_info *sbi, - struct page *nat_page, nid_t start_nid) + struct f2fs_nat_block *nat_blk, nid_t start_nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct f2fs_nat_block *nat_blk = page_address(nat_page); block_t blk_addr; unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); int i; @@ -2544,13 +2552,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, while (1) { if (!test_bit_le(NAT_BLOCK_OFFSET(nid), nm_i->nat_block_bitmap)) { - struct page *page = get_current_nat_page(sbi, nid); + struct folio *folio = get_current_nat_folio(sbi, nid); - if (IS_ERR(page)) { - ret = PTR_ERR(page); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); } else { - ret = scan_nat_page(sbi, page, nid); - f2fs_put_page(page, 1); + ret = scan_nat_page(sbi, folio_address(folio), + nid); + f2fs_folio_put(folio, true); } if (ret) { @@ -2726,18 +2735,18 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio) { void *src_addr, *dst_addr; size_t inline_size; - struct page *ipage; + struct folio *ifolio; struct f2fs_inode *ri; - ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); - if (IS_ERR(ipage)) - return PTR_ERR(ipage); + ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); + if (IS_ERR(ifolio)) + return PTR_ERR(ifolio); - ri = F2FS_INODE(page); + ri = F2FS_INODE(folio); if (ri->i_inline & F2FS_INLINE_XATTR) { if (!f2fs_has_inline_xattr(inode)) { set_inode_flag(inode, FI_INLINE_XATTR); @@ -2751,26 +2760,26 @@ int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) goto update_inode; } - dst_addr = inline_xattr_addr(inode, ipage); - src_addr = inline_xattr_addr(inode, page); + dst_addr = inline_xattr_addr(inode, ifolio); + src_addr = inline_xattr_addr(inode, folio); inline_size = inline_xattr_size(inode); - f2fs_wait_on_page_writeback(ipage, NODE, true, true); + f2fs_folio_wait_writeback(ifolio, NODE, true, true); memcpy(dst_addr, src_addr, inline_size); update_inode: - f2fs_update_inode(inode, ipage); - f2fs_put_page(ipage, 1); + f2fs_update_inode(inode, ifolio); + f2fs_folio_put(ifolio, true); return 0; } -int f2fs_recover_xattr_data(struct inode *inode, struct page *page) +int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; nid_t new_xnid; struct dnode_of_data dn; struct node_info ni; - struct page *xpage; + struct folio *xfolio; int err; if (!prev_xnid) @@ -2791,32 +2800,32 @@ recover_xnid: return -ENOSPC; set_new_dnode(&dn, inode, NULL, NULL, new_xnid); - xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); - if (IS_ERR(xpage)) { + xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET); + if (IS_ERR(xfolio)) { f2fs_alloc_nid_failed(sbi, new_xnid); - return PTR_ERR(xpage); + return PTR_ERR(xfolio); } f2fs_alloc_nid_done(sbi, new_xnid); f2fs_update_inode_page(inode); /* 3: update and set xattr node page dirty */ - if (page) { - memcpy(F2FS_NODE(xpage), F2FS_NODE(page), + if (folio) { + memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio), VALID_XATTR_BLOCK_SIZE); - set_page_dirty(xpage); + folio_mark_dirty(xfolio); } - f2fs_put_page(xpage, 1); + f2fs_folio_put(xfolio, true); return 0; } -int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) +int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio) { struct f2fs_inode *src, *dst; - nid_t ino = ino_of_node(page); + nid_t ino = ino_of_node(folio); struct node_info old_ni, new_ni; - struct page *ipage; + struct folio *ifolio; int err; err = f2fs_get_node_info(sbi, ino, &old_ni, false); @@ -2826,8 +2835,8 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) if (unlikely(old_ni.blk_addr != NULL_ADDR)) return -EINVAL; retry: - ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); - if (!ipage) { + ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false); + if (IS_ERR(ifolio)) { memalloc_retry_wait(GFP_NOFS); goto retry; } @@ -2835,13 +2844,13 @@ retry: /* Should not use this inode from free nid list */ remove_free_nid(sbi, ino); - if (!PageUptodate(ipage)) - SetPageUptodate(ipage); - fill_node_footer(ipage, ino, ino, 0, true); - set_cold_node(ipage, false); + if (!folio_test_uptodate(ifolio)) + folio_mark_uptodate(ifolio); + fill_node_footer(ifolio, ino, ino, 0, true); + set_cold_node(ifolio, false); - src = F2FS_INODE(page); - dst = F2FS_INODE(ipage); + src = F2FS_INODE(folio); + dst = F2FS_INODE(ifolio); memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); dst->i_size = 0; @@ -2877,8 +2886,8 @@ retry: WARN_ON(1); set_node_addr(sbi, &new_ni, NEW_ADDR, false); inc_valid_inode_count(sbi); - set_page_dirty(ipage); - f2fs_put_page(ipage, 1); + folio_mark_dirty(ifolio); + f2fs_folio_put(ifolio, true); return 0; } @@ -2902,17 +2911,17 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); for (idx = addr; idx < addr + nrpages; idx++) { - struct page *page = f2fs_get_tmp_page(sbi, idx); + struct folio *folio = f2fs_get_tmp_folio(sbi, idx); - if (IS_ERR(page)) - return PTR_ERR(page); + if (IS_ERR(folio)) + return PTR_ERR(folio); - rn = F2FS_NODE(page); + rn = F2FS_NODE(folio); sum_entry->nid = rn->footer.nid; sum_entry->version = 0; sum_entry->ofs_in_node = 0; sum_entry++; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } invalidate_mapping_pages(META_MAPPING(sbi), addr, @@ -2927,6 +2936,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; int i; + bool init_dirty; down_write(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { @@ -2937,12 +2947,15 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) if (f2fs_check_nid_range(sbi, nid)) continue; + init_dirty = false; + raw_ne = nat_in_journal(journal, i); - ne = __lookup_nat_cache(nm_i, nid); + ne = __lookup_nat_cache(nm_i, nid, true); if (!ne) { + init_dirty = true; ne = __alloc_nat_entry(sbi, nid, true); - __init_nat_entry(nm_i, ne, &raw_ne, true); + __init_nat_entry(nm_i, ne, &raw_ne, true, true); } /* @@ -2957,7 +2970,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) spin_unlock(&nm_i->nid_list_lock); } - __set_nat_cache_dirty(nm_i, ne); + __set_nat_cache_dirty(nm_i, ne, init_dirty); } update_nats_in_cursum(journal, -i); up_write(&curseg->journal_rwsem); @@ -2982,11 +2995,10 @@ add_out: } static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, - struct page *page) + const struct f2fs_nat_block *nat_blk) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; - struct f2fs_nat_block *nat_blk = page_address(page); int valid = 0; int i = 0; @@ -3023,7 +3035,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, bool to_journal = true; struct f2fs_nat_block *nat_blk; struct nat_entry *ne, *cur; - struct page *page = NULL; + struct folio *folio = NULL; /* * there are two steps to flush nat entries: @@ -3037,11 +3049,11 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, if (to_journal) { down_write(&curseg->journal_rwsem); } else { - page = get_next_nat_page(sbi, start_nid); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = get_next_nat_folio(sbi, start_nid); + if (IS_ERR(folio)) + return PTR_ERR(folio); - nat_blk = page_address(page); + nat_blk = folio_address(folio); f2fs_bug_on(sbi, !nat_blk); } @@ -3077,8 +3089,8 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, if (to_journal) { up_write(&curseg->journal_rwsem); } else { - __update_nat_bits(sbi, start_nid, page); - f2fs_put_page(page, 1); + __update_nat_bits(sbi, start_nid, nat_blk); + f2fs_folio_put(folio, true); } /* Allow dirty nats by node block allocation in write_begin */ @@ -3173,15 +3185,15 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks; for (i = 0; i < nm_i->nat_bits_blocks; i++) { - struct page *page; + struct folio *folio; - page = f2fs_get_meta_page(sbi, nat_bits_addr++); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = f2fs_get_meta_folio(sbi, nat_bits_addr++); + if (IS_ERR(folio)) + return PTR_ERR(folio); memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i), - page_address(page), F2FS_BLKSIZE); - f2fs_put_page(page, 1); + folio_address(folio), F2FS_BLKSIZE); + f2fs_folio_put(folio, true); } cp_ver |= (cur_cp_crc(ckpt) << 32); @@ -3418,10 +3430,10 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) } kvfree(nm_i->free_nid_count); - kvfree(nm_i->nat_bitmap); + kfree(nm_i->nat_bitmap); kvfree(nm_i->nat_bits); #ifdef CONFIG_F2FS_CHECK_FS - kvfree(nm_i->nat_bitmap_mir); + kfree(nm_i->nat_bitmap_mir); #endif sbi->nm_info = NULL; kfree(nm_i); diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 103a437e6425..030390543b54 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -31,7 +31,7 @@ /* control total # of nats */ #define DEF_NAT_CACHE_THRESHOLD 100000 -/* control total # of node writes used for roll-fowrad recovery */ +/* control total # of node writes used for roll-forward recovery */ #define DEF_RF_NODE_BLOCKS 0 /* vector size for gang look-up from nat cache that consists of radix tree */ @@ -243,41 +243,41 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) #endif } -static inline nid_t ino_of_node(struct page *node_page) +static inline nid_t ino_of_node(const struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(node_folio); return le32_to_cpu(rn->footer.ino); } -static inline nid_t nid_of_node(struct page *node_page) +static inline nid_t nid_of_node(const struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(node_folio); return le32_to_cpu(rn->footer.nid); } -static inline unsigned int ofs_of_node(const struct page *node_page) +static inline unsigned int ofs_of_node(const struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(node_folio); unsigned flag = le32_to_cpu(rn->footer.flag); return flag >> OFFSET_BIT_SHIFT; } -static inline __u64 cpver_of_node(struct page *node_page) +static inline __u64 cpver_of_node(const struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(node_folio); return le64_to_cpu(rn->footer.cp_ver); } -static inline block_t next_blkaddr_of_node(struct page *node_page) +static inline block_t next_blkaddr_of_node(const struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(node_folio); return le32_to_cpu(rn->footer.next_blkaddr); } -static inline void fill_node_footer(struct page *page, nid_t nid, +static inline void fill_node_footer(const struct folio *folio, nid_t nid, nid_t ino, unsigned int ofs, bool reset) { - struct f2fs_node *rn = F2FS_NODE(page); + struct f2fs_node *rn = F2FS_NODE(folio); unsigned int old_flag = 0; if (reset) @@ -293,17 +293,18 @@ static inline void fill_node_footer(struct page *page, nid_t nid, (old_flag & OFFSET_BIT_MASK)); } -static inline void copy_node_footer(struct page *dst, struct page *src) +static inline void copy_node_footer(const struct folio *dst, + const struct folio *src) { struct f2fs_node *src_rn = F2FS_NODE(src); struct f2fs_node *dst_rn = F2FS_NODE(dst); memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); } -static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) +static inline void fill_node_footer_blkaddr(struct folio *folio, block_t blkaddr) { - struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); - struct f2fs_node *rn = F2FS_NODE(page); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio)); + struct f2fs_node *rn = F2FS_NODE(folio); __u64 cp_ver = cur_cp_version(ckpt); if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) @@ -313,19 +314,19 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) rn->footer.next_blkaddr = cpu_to_le32(blkaddr); } -static inline bool is_recoverable_dnode(struct page *page) +static inline bool is_recoverable_dnode(const struct folio *folio) { - struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio)); __u64 cp_ver = cur_cp_version(ckpt); /* Don't care crc part, if fsck.f2fs sets it. */ if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG)) - return (cp_ver << 32) == (cpver_of_node(page) << 32); + return (cp_ver << 32) == (cpver_of_node(folio) << 32); if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) cp_ver |= (cur_cp_crc(ckpt) << 32); - return cp_ver == cpver_of_node(page); + return cp_ver == cpver_of_node(folio); } /* @@ -349,9 +350,9 @@ static inline bool is_recoverable_dnode(struct page *page) * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) * `- direct node */ -static inline bool IS_DNODE(const struct page *node_page) +static inline bool IS_DNODE(const struct folio *node_folio) { - unsigned int ofs = ofs_of_node(node_page); + unsigned int ofs = ofs_of_node(node_folio); if (f2fs_has_xattr_block(ofs)) return true; @@ -367,22 +368,22 @@ static inline bool IS_DNODE(const struct page *node_page) return true; } -static inline int set_nid(struct page *p, int off, nid_t nid, bool i) +static inline int set_nid(struct folio *folio, int off, nid_t nid, bool i) { - struct f2fs_node *rn = F2FS_NODE(p); + struct f2fs_node *rn = F2FS_NODE(folio); - f2fs_wait_on_page_writeback(p, NODE, true, true); + f2fs_folio_wait_writeback(folio, NODE, true, true); if (i) rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); else rn->in.nid[off] = cpu_to_le32(nid); - return set_page_dirty(p); + return folio_mark_dirty(folio); } -static inline nid_t get_nid(struct page *p, int off, bool i) +static inline nid_t get_nid(const struct folio *folio, int off, bool i) { - struct f2fs_node *rn = F2FS_NODE(p); + struct f2fs_node *rn = F2FS_NODE(folio); if (i) return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); @@ -396,19 +397,19 @@ static inline nid_t get_nid(struct page *p, int off, bool i) * - Mark cold data pages in page cache */ -static inline int is_node(const struct page *page, int type) +static inline int is_node(const struct folio *folio, int type) { - struct f2fs_node *rn = F2FS_NODE(page); + struct f2fs_node *rn = F2FS_NODE(folio); return le32_to_cpu(rn->footer.flag) & BIT(type); } -#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) -#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) -#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) +#define is_cold_node(folio) is_node(folio, COLD_BIT_SHIFT) +#define is_fsync_dnode(folio) is_node(folio, FSYNC_BIT_SHIFT) +#define is_dent_dnode(folio) is_node(folio, DENT_BIT_SHIFT) -static inline void set_cold_node(struct page *page, bool is_dir) +static inline void set_cold_node(const struct folio *folio, bool is_dir) { - struct f2fs_node *rn = F2FS_NODE(page); + struct f2fs_node *rn = F2FS_NODE(folio); unsigned int flag = le32_to_cpu(rn->footer.flag); if (is_dir) @@ -418,9 +419,9 @@ static inline void set_cold_node(struct page *page, bool is_dir) rn->footer.flag = cpu_to_le32(flag); } -static inline void set_mark(struct page *page, int mark, int type) +static inline void set_mark(struct folio *folio, int mark, int type) { - struct f2fs_node *rn = F2FS_NODE(page); + struct f2fs_node *rn = F2FS_NODE(folio); unsigned int flag = le32_to_cpu(rn->footer.flag); if (mark) flag |= BIT(type); @@ -429,8 +430,8 @@ static inline void set_mark(struct page *page, int mark, int type) rn->footer.flag = cpu_to_le32(flag); #ifdef CONFIG_F2FS_CHECK_FS - f2fs_inode_chksum_set(F2FS_P_SB(page), page); + f2fs_inode_chksum_set(F2FS_F_SB(folio), folio); #endif } -#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) -#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT) +#define set_dentry_mark(folio, mark) set_mark(folio, mark, DENT_BIT_SHIFT) +#define set_fsync_mark(folio, mark) set_mark(folio, mark, FSYNC_BIT_SHIFT) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 69a2027e3ebc..4cb3a91801b4 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -157,15 +157,15 @@ static int init_recovered_filename(const struct inode *dir, return 0; } -static int recover_dentry(struct inode *inode, struct page *ipage, +static int recover_dentry(struct inode *inode, struct folio *ifolio, struct list_head *dir_list) { - struct f2fs_inode *raw_inode = F2FS_INODE(ipage); + struct f2fs_inode *raw_inode = F2FS_INODE(ifolio); nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; struct f2fs_filename fname; struct qstr usr_fname; - struct page *page; + struct folio *folio; struct inode *dir, *einode; struct fsync_inode_entry *entry; int err = 0; @@ -187,7 +187,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage, if (err) goto out; retry: - de = __f2fs_find_entry(dir, &fname, &page); + de = __f2fs_find_entry(dir, &fname, &folio); if (de && inode->i_ino == le32_to_cpu(de->ino)) goto out_put; @@ -212,11 +212,11 @@ retry: iput(einode); goto out_put; } - f2fs_delete_entry(de, page, dir, einode); + f2fs_delete_entry(de, folio, dir, einode); iput(einode); goto retry; - } else if (IS_ERR(page)) { - err = PTR_ERR(page); + } else if (IS_ERR(folio)) { + err = PTR_ERR(folio); } else { err = f2fs_add_dentry(dir, &fname, inode, inode->i_ino, inode->i_mode); @@ -226,21 +226,21 @@ retry: goto out; out_put: - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); out: if (file_enc_name(inode)) name = "<encrypted>"; else name = raw_inode->i_name; f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d", - __func__, ino_of_node(ipage), name, + __func__, ino_of_node(ifolio), name, IS_ERR(dir) ? 0 : dir->i_ino, err); return err; } -static int recover_quota_data(struct inode *inode, struct page *page) +static int recover_quota_data(struct inode *inode, struct folio *folio) { - struct f2fs_inode *raw = F2FS_INODE(page); + struct f2fs_inode *raw = F2FS_INODE(folio); struct iattr attr; uid_t i_uid = le32_to_cpu(raw->i_uid); gid_t i_gid = le32_to_cpu(raw->i_gid); @@ -277,16 +277,16 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri) clear_inode_flag(inode, FI_DATA_EXIST); } -static int recover_inode(struct inode *inode, struct page *page) +static int recover_inode(struct inode *inode, struct folio *folio) { - struct f2fs_inode *raw = F2FS_INODE(page); + struct f2fs_inode *raw = F2FS_INODE(folio); struct f2fs_inode_info *fi = F2FS_I(inode); char *name; int err; inode->i_mode = le16_to_cpu(raw->i_mode); - err = recover_quota_data(inode, page); + err = recover_quota_data(inode, folio); if (err) return err; @@ -333,10 +333,10 @@ static int recover_inode(struct inode *inode, struct page *page) if (file_enc_name(inode)) name = "<encrypted>"; else - name = F2FS_INODE(page)->i_name; + name = F2FS_INODE(folio)->i_name; f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x", - ino_of_node(page), name, raw->i_inline); + ino_of_node(folio), name, raw->i_inline); return 0; } @@ -358,33 +358,34 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr, block_t *blkaddr_fast, bool *is_detecting) { unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; - struct page *page = NULL; int i; if (!*is_detecting) return 0; for (i = 0; i < 2; i++) { + struct folio *folio; + if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) { *is_detecting = false; return 0; } - page = f2fs_get_tmp_page(sbi, *blkaddr_fast); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, *blkaddr_fast); + if (IS_ERR(folio)) + return PTR_ERR(folio); - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(folio)) { + f2fs_folio_put(folio, true); *is_detecting = false; return 0; } ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast, - next_blkaddr_of_node(page)); + next_blkaddr_of_node(folio)); - *blkaddr_fast = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + *blkaddr_fast = next_blkaddr_of_node(folio); + f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks); } @@ -401,7 +402,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, bool check_only) { struct curseg_info *curseg; - struct page *page = NULL; block_t blkaddr, blkaddr_fast; bool is_detecting = true; int err = 0; @@ -413,33 +413,35 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, while (1) { struct fsync_inode_entry *entry; + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) return 0; - page = f2fs_get_tmp_page(sbi, blkaddr); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, blkaddr); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; } - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(folio)) { + f2fs_folio_put(folio, true); break; } - if (!is_fsync_dnode(page)) + if (!is_fsync_dnode(folio)) goto next; - entry = get_fsync_inode(head, ino_of_node(page)); + entry = get_fsync_inode(head, ino_of_node(folio)); if (!entry) { bool quota_inode = false; if (!check_only && - IS_INODE(page) && is_dent_dnode(page)) { - err = f2fs_recover_inode_page(sbi, page); + IS_INODE(folio) && + is_dent_dnode(folio)) { + err = f2fs_recover_inode_page(sbi, folio); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } quota_inode = true; @@ -449,24 +451,24 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, * CP | dnode(F) | inode(DF) * For this case, we should not give up now. */ - entry = add_fsync_inode(sbi, head, ino_of_node(page), + entry = add_fsync_inode(sbi, head, ino_of_node(folio), quota_inode); if (IS_ERR(entry)) { err = PTR_ERR(entry); if (err == -ENOENT) goto next; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } } entry->blkaddr = blkaddr; - if (IS_INODE(page) && is_dent_dnode(page)) + if (IS_INODE(folio) && is_dent_dnode(folio)) entry->last_dentry = blkaddr; next: /* check next segment */ - blkaddr = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + blkaddr = next_blkaddr_of_node(folio); + f2fs_folio_put(folio, true); err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast, &is_detecting); @@ -492,7 +494,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); struct f2fs_summary_block *sum_node; struct f2fs_summary sum; - struct page *sum_page, *node_page; + struct folio *sum_folio, *node_folio; struct dnode_of_data tdn = *dn; nid_t ino, nid; struct inode *inode; @@ -514,18 +516,18 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, } } - sum_page = f2fs_get_sum_page(sbi, segno); - if (IS_ERR(sum_page)) - return PTR_ERR(sum_page); - sum_node = (struct f2fs_summary_block *)page_address(sum_page); + sum_folio = f2fs_get_sum_folio(sbi, segno); + if (IS_ERR(sum_folio)) + return PTR_ERR(sum_folio); + sum_node = folio_address(sum_folio); sum = sum_node->entries[blkoff]; - f2fs_put_page(sum_page, 1); + f2fs_folio_put(sum_folio, true); got_it: /* Use the locked dnode page and inode */ nid = le32_to_cpu(sum.nid); ofs_in_node = le16_to_cpu(sum.ofs_in_node); - max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); + max_addrs = ADDRS_PER_PAGE(dn->node_folio, dn->inode); if (ofs_in_node >= max_addrs) { f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", ofs_in_node, dn->inode->i_ino, nid, max_addrs); @@ -535,9 +537,9 @@ got_it: if (dn->inode->i_ino == nid) { tdn.nid = nid; - if (!dn->inode_page_locked) - lock_page(dn->inode_page); - tdn.node_page = dn->inode_page; + if (!dn->inode_folio_locked) + folio_lock(dn->inode_folio); + tdn.node_folio = dn->inode_folio; tdn.ofs_in_node = ofs_in_node; goto truncate_out; } else if (dn->nid == nid) { @@ -546,13 +548,13 @@ got_it: } /* Get the node page */ - node_page = f2fs_get_node_page(sbi, nid); - if (IS_ERR(node_page)) - return PTR_ERR(node_page); + node_folio = f2fs_get_node_folio(sbi, nid); + if (IS_ERR(node_folio)) + return PTR_ERR(node_folio); - offset = ofs_of_node(node_page); - ino = ino_of_node(node_page); - f2fs_put_page(node_page, 1); + offset = ofs_of_node(node_folio); + ino = ino_of_node(node_folio); + f2fs_folio_put(node_folio, true); if (ino != dn->inode->i_ino) { int ret; @@ -578,8 +580,8 @@ got_it: * if inode page is locked, unlock temporarily, but its reference * count keeps alive. */ - if (ino == dn->inode->i_ino && dn->inode_page_locked) - unlock_page(dn->inode_page); + if (ino == dn->inode->i_ino && dn->inode_folio_locked) + folio_unlock(dn->inode_folio); set_new_dnode(&tdn, inode, NULL, NULL, 0); if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) @@ -592,15 +594,15 @@ got_it: out: if (ino != dn->inode->i_ino) iput(inode); - else if (dn->inode_page_locked) - lock_page(dn->inode_page); + else if (dn->inode_folio_locked) + folio_lock(dn->inode_folio); return 0; truncate_out: if (f2fs_data_blkaddr(&tdn) == blkaddr) f2fs_truncate_data_blocks_range(&tdn, 1); - if (dn->inode->i_ino == nid && !dn->inode_page_locked) - unlock_page(dn->inode_page); + if (dn->inode->i_ino == nid && !dn->inode_folio_locked) + folio_unlock(dn->inode_folio); return 0; } @@ -618,27 +620,27 @@ static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn) } static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, - struct page *page) + struct folio *folio) { struct dnode_of_data dn; struct node_info ni; - unsigned int start, end; + unsigned int start = 0, end = 0, index; int err = 0, recovered = 0; /* step 1: recover xattr */ - if (IS_INODE(page)) { - err = f2fs_recover_inline_xattr(inode, page); + if (IS_INODE(folio)) { + err = f2fs_recover_inline_xattr(inode, folio); if (err) goto out; - } else if (f2fs_has_xattr_block(ofs_of_node(page))) { - err = f2fs_recover_xattr_data(inode, page); + } else if (f2fs_has_xattr_block(ofs_of_node(folio))) { + err = f2fs_recover_xattr_data(inode, folio); if (!err) recovered++; goto out; } /* step 2: recover inline data */ - err = f2fs_recover_inline_data(inode, page); + err = f2fs_recover_inline_data(inode, folio); if (err) { if (err == 1) err = 0; @@ -646,8 +648,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 3: recover data indices */ - start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); - end = start + ADDRS_PER_PAGE(page, inode); + start = f2fs_start_bidx_of_node(ofs_of_node(folio), inode); + end = start + ADDRS_PER_PAGE(folio, inode); set_new_dnode(&dn, inode, NULL, NULL, 0); retry_dn: @@ -660,28 +662,28 @@ retry_dn: goto out; } - f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); + f2fs_folio_wait_writeback(dn.node_folio, NODE, true, true); err = f2fs_get_node_info(sbi, dn.nid, &ni, false); if (err) goto err; - f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); + f2fs_bug_on(sbi, ni.ino != ino_of_node(folio)); - if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { + if (ofs_of_node(dn.node_folio) != ofs_of_node(folio)) { f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", - inode->i_ino, ofs_of_node(dn.node_page), - ofs_of_node(page)); + inode->i_ino, ofs_of_node(dn.node_folio), + ofs_of_node(folio)); err = -EFSCORRUPTED; f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); goto err; } - for (; start < end; start++, dn.ofs_in_node++) { + for (index = start; index < end; index++, dn.ofs_in_node++) { block_t src, dest; src = f2fs_data_blkaddr(&dn); - dest = data_blkaddr(dn.inode, page, dn.ofs_in_node); + dest = data_blkaddr(dn.inode, folio, dn.ofs_in_node); if (__is_valid_data_blkaddr(src) && !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { @@ -706,9 +708,9 @@ retry_dn: } if (!file_keep_isize(inode) && - (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) + (i_size_read(inode) <= ((loff_t)index << PAGE_SHIFT))) f2fs_i_size_write(inode, - (loff_t)(start + 1) << PAGE_SHIFT); + (loff_t)(index + 1) << PAGE_SHIFT); /* * dest is reserved block, invalidate src block @@ -756,16 +758,18 @@ retry_prev: } } - copy_node_footer(dn.node_page, page); - fill_node_footer(dn.node_page, dn.nid, ni.ino, - ofs_of_node(page), false); - set_page_dirty(dn.node_page); + copy_node_footer(dn.node_folio, folio); + fill_node_footer(dn.node_folio, dn.nid, ni.ino, + ofs_of_node(folio), false); + folio_mark_dirty(dn.node_folio); err: f2fs_put_dnode(&dn); out: - f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", - inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", - recovered, err); + f2fs_notice(sbi, "recover_data: ino = %lx, nid = %x (i_size: %s), " + "range (%u, %u), recovered = %d, err = %d", + inode->i_ino, nid_of_node(folio), + file_keep_isize(inode) ? "keep" : "recover", + start, end, recovered, err); return err; } @@ -773,10 +777,17 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, struct list_head *tmp_inode_list, struct list_head *dir_list) { struct curseg_info *curseg; - struct page *page = NULL; int err = 0; block_t blkaddr; unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; + unsigned int recoverable_dnode = 0; + unsigned int fsynced_dnode = 0; + unsigned int total_dnode = 0; + unsigned int recovered_inode = 0; + unsigned int recovered_dentry = 0; + unsigned int recovered_dnode = 0; + + f2fs_notice(sbi, "do_recover_data: start to recover dnode"); /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); @@ -784,63 +795,75 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, while (1) { struct fsync_inode_entry *entry; + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) break; - page = f2fs_get_tmp_page(sbi, blkaddr); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, blkaddr); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; } - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(folio)) { + f2fs_folio_put(folio, true); break; } + recoverable_dnode++; - entry = get_fsync_inode(inode_list, ino_of_node(page)); + entry = get_fsync_inode(inode_list, ino_of_node(folio)); if (!entry) goto next; + fsynced_dnode++; /* * inode(x) | CP | inode(x) | dnode(F) * In this case, we can lose the latest inode(x). * So, call recover_inode for the inode update. */ - if (IS_INODE(page)) { - err = recover_inode(entry->inode, page); + if (IS_INODE(folio)) { + err = recover_inode(entry->inode, folio); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } + recovered_inode++; } if (entry->last_dentry == blkaddr) { - err = recover_dentry(entry->inode, page, dir_list); + err = recover_dentry(entry->inode, folio, dir_list); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } + recovered_dentry++; } - err = do_recover_data(sbi, entry->inode, page); + err = do_recover_data(sbi, entry->inode, folio); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } + recovered_dnode++; if (entry->blkaddr == blkaddr) list_move_tail(&entry->list, tmp_inode_list); next: ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr, - next_blkaddr_of_node(page)); + next_blkaddr_of_node(folio)); /* check next segment */ - blkaddr = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + blkaddr = next_blkaddr_of_node(folio); + f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks); + total_dnode++; } if (!err) err = f2fs_allocate_new_segments(sbi); + + f2fs_notice(sbi, "do_recover_data: dnode: (recoverable: %u, fsynced: %u, " + "total: %u), recovered: (inode: %u, dentry: %u, dnode: %u), err: %d", + recoverable_dnode, fsynced_dnode, total_dnode, recovered_inode, + recovered_dentry, recovered_dnode, err); return err; } @@ -853,6 +876,9 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; + f2fs_notice(sbi, "f2fs_recover_fsync_data: recovery fsync data, " + "check_only: %d", check_only); + if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) f2fs_info(sbi, "recover fsync data on readonly fs"); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 396ef71f41e3..cc82d42ef14c 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -334,7 +334,7 @@ static int __f2fs_commit_atomic_write(struct inode *inode) goto next; } - blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode), + blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode), len); index = off; for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) { @@ -371,12 +371,21 @@ next: } out: + if (time_to_inject(sbi, FAULT_TIMEOUT)) + f2fs_io_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT); + if (ret) { sbi->revoked_atomic_block += fi->atomic_write_cnt; } else { sbi->committed_atomic_block += fi->atomic_write_cnt; set_inode_flag(inode, FI_ATOMIC_COMMITTED); + + /* + * inode may has no FI_ATOMIC_DIRTIED flag due to no write + * before commit. + */ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + /* clear atomic dirty status and set vfs dirty status */ clear_inode_flag(inode, FI_ATOMIC_DIRTIED); f2fs_mark_inode_dirty_sync(inode, true); } @@ -424,7 +433,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) if (need && excess_cached_nats(sbi)) f2fs_balance_fs_bg(sbi, false); - if (!f2fs_is_checkpoint_ready(sbi)) + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return; /* @@ -446,7 +455,8 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) } else { struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO, - .init_gc_type = BG_GC, + .init_gc_type = f2fs_sb_has_blkzoned(sbi) ? + FG_GC : BG_GC, .no_bg_gc = true, .should_migrate_blocks = false, .err_gc_skipped = false, @@ -763,7 +773,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); /* need not be added */ - if (IS_CURSEG(sbi, segno)) + if (is_curseg(sbi, segno)) return; if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) @@ -790,7 +800,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, !valid_blocks) || valid_blocks == CAP_BLKS_PER_SEC(sbi)); - if (!IS_CURSEC(sbi, secno)) + if (!is_cursec(sbi, secno)) set_bit(secno, dirty_i->dirty_secmap); } } @@ -829,7 +839,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, return; } - if (!IS_CURSEC(sbi, secno)) + if (!is_cursec(sbi, secno)) set_bit(secno, dirty_i->dirty_secmap); } } @@ -846,7 +856,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) unsigned short valid_blocks, ckpt_valid_blocks; unsigned int usable_blocks; - if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) + if (segno == NULL_SEGNO || is_curseg(sbi, segno)) return; usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); @@ -879,7 +889,7 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { if (get_valid_blocks(sbi, segno, false)) continue; - if (IS_CURSEG(sbi, segno)) + if (is_curseg(sbi, segno)) continue; __locate_dirty_segment(sbi, segno, PRE); __remove_dirty_segment(sbi, segno, DIRTY); @@ -2098,7 +2108,7 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, if (!force) { if (!f2fs_realtime_discard_enable(sbi) || (!se->valid_blocks && - !IS_CURSEG(sbi, cpc->trim_start)) || + !is_curseg(sbi, cpc->trim_start)) || SM_I(sbi)->dcc_info->nr_discards >= SM_I(sbi)->dcc_info->max_discards) return false; @@ -2226,7 +2236,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, next: secno = GET_SEC_FROM_SEG(sbi, start); start_segno = GET_SEG_FROM_SEC(sbi, secno); - if (!IS_CURSEC(sbi, secno) && + if (!is_cursec(sbi, secno) && !get_valid_blocks(sbi, start, true)) f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), BLKS_PER_SEC(sbi)); @@ -2438,7 +2448,7 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, * that the consecutive input blocks belong to the same segment. */ static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se, - block_t blkaddr, unsigned int offset, int del) + unsigned int segno, block_t blkaddr, unsigned int offset, int del) { bool exist; #ifdef CONFIG_F2FS_CHECK_FS @@ -2483,15 +2493,21 @@ static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_ent f2fs_test_and_clear_bit(offset + i, se->discard_map)) sbi->discard_blks++; - if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) + if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) { se->ckpt_valid_blocks -= 1; + if (__is_large_section(sbi)) + get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1; + } } + if (__is_large_section(sbi)) + sanity_check_valid_blocks(sbi, segno); + return del; } static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se, - block_t blkaddr, unsigned int offset, int del) + unsigned int segno, block_t blkaddr, unsigned int offset, int del) { bool exist; #ifdef CONFIG_F2FS_CHECK_FS @@ -2524,12 +2540,21 @@ static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry * or newly invalidated. */ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { - if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) + if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) { se->ckpt_valid_blocks++; + if (__is_large_section(sbi)) + get_sec_entry(sbi, segno)->ckpt_valid_blocks++; + } } - if (!f2fs_test_bit(offset, se->ckpt_valid_map)) + if (!f2fs_test_bit(offset, se->ckpt_valid_map)) { se->ckpt_valid_blocks += del; + if (__is_large_section(sbi)) + get_sec_entry(sbi, segno)->ckpt_valid_blocks += del; + } + + if (__is_large_section(sbi)) + sanity_check_valid_blocks(sbi, segno); return del; } @@ -2560,9 +2585,9 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) /* Update valid block bitmap */ if (del > 0) { - del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del); + del = update_sit_entry_for_alloc(sbi, se, segno, blkaddr, offset, del); } else { - del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del); + del = update_sit_entry_for_release(sbi, se, segno, blkaddr, offset, del); } __mark_sit_entry_dirty(sbi, segno); @@ -2675,23 +2700,23 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) } /* - * Caller should put this summary page + * Caller should put this summary folio */ -struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) +struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno) { if (unlikely(f2fs_cp_error(sbi))) return ERR_PTR(-EIO); - return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); + return f2fs_get_meta_folio_retry(sbi, GET_SUM_BLOCK(sbi, segno)); } void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { - struct page *page = f2fs_grab_meta_page(sbi, blk_addr); + struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr); - memcpy(page_address(page), src, PAGE_SIZE); - set_page_dirty(page); - f2fs_put_page(page, 1); + memcpy(folio_address(folio), src, PAGE_SIZE); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } static void write_sum_page(struct f2fs_sb_info *sbi, @@ -2704,11 +2729,11 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi, int type, block_t blk_addr) { struct curseg_info *curseg = CURSEG_I(sbi, type); - struct page *page = f2fs_grab_meta_page(sbi, blk_addr); + struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr); struct f2fs_summary_block *src = curseg->sum_blk; struct f2fs_summary_block *dst; - dst = (struct f2fs_summary_block *)page_address(page); + dst = folio_address(folio); memset(dst, 0, PAGE_SIZE); mutex_lock(&curseg->curseg_mutex); @@ -2722,8 +2747,8 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi, mutex_unlock(&curseg->curseg_mutex); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } static int is_next_segment_free(struct f2fs_sb_info *sbi, @@ -2777,7 +2802,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning) segno = 0; else - segno = max(sbi->first_zoned_segno, *newseg); + segno = max(sbi->first_seq_zone_segno, *newseg); hint = GET_SEC_FROM_SEG(sbi, segno); } #endif @@ -2789,7 +2814,7 @@ find_other_zone: if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) { /* Write only to sequential zones */ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) { - hint = GET_SEC_FROM_SEG(sbi, sbi->first_zoned_segno); + hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno); secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); } else secno = find_first_zero_bit(free_i->free_secmap, @@ -2836,11 +2861,15 @@ find_other_zone: } got_it: /* set it as dirty segment in free segmap */ - f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); + if (test_bit(segno, free_i->free_segmap)) { + ret = -EFSCORRUPTED; + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP); + goto out_unlock; + } - /* no free section in conventional zone */ + /* no free section in conventional device or conventional zone */ if (new_sec && pinning && - !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { + f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) { ret = -EAGAIN; goto out_unlock; } @@ -2997,7 +3026,7 @@ static int change_curseg(struct f2fs_sb_info *sbi, int type) struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int new_segno = curseg->next_segno; struct f2fs_summary_block *sum_node; - struct page *sum_page; + struct folio *sum_folio; if (curseg->inited) write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); @@ -3013,15 +3042,15 @@ static int change_curseg(struct f2fs_sb_info *sbi, int type) curseg->alloc_type = SSR; curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); - sum_page = f2fs_get_sum_page(sbi, new_segno); - if (IS_ERR(sum_page)) { + sum_folio = f2fs_get_sum_folio(sbi, new_segno); + if (IS_ERR(sum_folio)) { /* GC won't be able to use stale summary pages by cp_error */ memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); - return PTR_ERR(sum_page); + return PTR_ERR(sum_folio); } - sum_node = (struct f2fs_summary_block *)page_address(sum_page); + sum_node = folio_address(sum_folio); memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); - f2fs_put_page(sum_page, 1); + f2fs_folio_put(sum_folio, true); return 0; } @@ -3311,7 +3340,7 @@ retry: if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) { f2fs_down_write(&sbi->gc_lock); - err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), + err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1, true, ZONED_PIN_SEC_REQUIRED_COUNT); f2fs_up_write(&sbi->gc_lock); @@ -3584,14 +3613,14 @@ static int __get_segment_type_2(struct f2fs_io_info *fio) static int __get_segment_type_4(struct f2fs_io_info *fio) { if (fio->type == DATA) { - struct inode *inode = fio->page->mapping->host; + struct inode *inode = fio_inode(fio); if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; else return CURSEG_COLD_DATA; } else { - if (IS_DNODE(fio->page) && is_cold_node(fio->page)) + if (IS_DNODE(fio->folio) && is_cold_node(fio->folio)) return CURSEG_WARM_NODE; else return CURSEG_COLD_NODE; @@ -3618,7 +3647,7 @@ static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) static int __get_segment_type_6(struct f2fs_io_info *fio) { if (fio->type == DATA) { - struct inode *inode = fio->page->mapping->host; + struct inode *inode = fio_inode(fio); int type; if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) @@ -3637,8 +3666,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) if (file_is_cold(inode) || f2fs_need_compress_data(inode)) return CURSEG_COLD_DATA; - type = __get_age_segment_type(inode, - page_folio(fio->page)->index); + type = __get_age_segment_type(inode, fio->folio->index); if (type != NO_CHECK_TYPE) return type; @@ -3649,8 +3677,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), inode->i_write_hint); } else { - if (IS_DNODE(fio->page)) - return is_cold_node(fio->page) ? CURSEG_WARM_NODE : + if (IS_DNODE(fio->folio)) + return is_cold_node(fio->folio) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE; return CURSEG_COLD_NODE; } @@ -3718,7 +3746,7 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, get_random_u32_inclusive(1, sbi->max_fragment_hole); } -int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, +int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio) @@ -3822,10 +3850,10 @@ skip_new_segment: up_write(&sit_i->sentry_lock); - if (page && IS_NODESEG(curseg->seg_type)) { - fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); + if (folio && IS_NODESEG(curseg->seg_type)) { + fill_node_footer_blkaddr(folio, NEXT_FREE_BLKADDR(sbi, curseg)); - f2fs_inode_chksum_set(sbi, page); + f2fs_inode_chksum_set(sbi, folio); } if (fio) { @@ -3903,7 +3931,7 @@ static int log_type_to_seg_type(enum log_type type) static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) { - struct folio *folio = page_folio(fio->page); + struct folio *folio = fio->folio; enum log_type type = __get_segment_type(fio); int seg_type = log_type_to_seg_type(type); bool keep_order = (f2fs_lfs_mode(fio->sbi) && @@ -3912,15 +3940,21 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) if (keep_order) f2fs_down_read(&fio->sbi->io_order_lock); - if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, + if (f2fs_allocate_data_block(fio->sbi, folio, fio->old_blkaddr, &fio->new_blkaddr, sum, type, fio)) { if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host)) fscrypt_finalize_bounce_page(&fio->encrypted_page); folio_end_writeback(folio); if (f2fs_in_warm_node_list(fio->sbi, folio)) - f2fs_del_fsync_node_entry(fio->sbi, fio->page); + f2fs_del_fsync_node_entry(fio->sbi, folio); + f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi, + CP_ERROR_FLAG)); goto out; } + + f2fs_bug_on(fio->sbi, !f2fs_is_valid_blkaddr_raw(fio->sbi, + fio->new_blkaddr, DATA_GENERIC_ENHANCE)); + if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1); @@ -3944,7 +3978,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio, .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, .old_blkaddr = folio->index, .new_blkaddr = folio->index, - .page = folio_page(folio, 0), + .folio = folio, .encrypted_page = NULL, .in_list = 0, }; @@ -4023,7 +4057,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) if (!err) { f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); - f2fs_update_iostat(fio->sbi, fio->page->mapping->host, + f2fs_update_iostat(fio->sbi, fio_inode(fio), fio->io_type, F2FS_BLKSIZE); } @@ -4072,14 +4106,14 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, if (!recover_curseg) { /* for recovery flow */ - if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { + if (se->valid_blocks == 0 && !is_curseg(sbi, segno)) { if (old_blkaddr == NULL_ADDR) type = CURSEG_COLD_DATA; else type = CURSEG_WARM_DATA; } } else { - if (IS_CURSEG(sbi, segno)) { + if (is_curseg(sbi, segno)) { /* se->type is volatile as SSR allocation */ type = __f2fs_get_curseg(sbi, segno); f2fs_bug_on(sbi, type == NO_CHECK_TYPE); @@ -4163,9 +4197,9 @@ void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type, struct f2fs_sb_info *sbi = F2FS_F_SB(folio); /* submit cached LFS IO */ - f2fs_submit_merged_write_cond(sbi, NULL, &folio->page, 0, type); + f2fs_submit_merged_write_cond(sbi, NULL, folio, 0, type); /* submit cached IPU IO */ - f2fs_submit_merged_ipu_write(sbi, NULL, &folio->page); + f2fs_submit_merged_ipu_write(sbi, NULL, folio); if (ordered) { folio_wait_writeback(folio); f2fs_bug_on(sbi, locked && folio_test_writeback(folio)); @@ -4178,7 +4212,7 @@ void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type, void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct page *cpage; + struct folio *cfolio; if (!f2fs_meta_inode_gc_required(inode)) return; @@ -4186,10 +4220,10 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) if (!__is_valid_data_blkaddr(blkaddr)) return; - cpage = find_lock_page(META_MAPPING(sbi), blkaddr); - if (cpage) { - f2fs_wait_on_page_writeback(cpage, DATA, true, true); - f2fs_put_page(cpage, 1); + cfolio = filemap_lock_folio(META_MAPPING(sbi), blkaddr); + if (!IS_ERR(cfolio)) { + f2fs_folio_wait_writeback(cfolio, DATA, true, true); + f2fs_folio_put(cfolio, true); } } @@ -4213,16 +4247,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct curseg_info *seg_i; unsigned char *kaddr; - struct page *page; + struct folio *folio; block_t start; int i, j, offset; start = start_sum_block(sbi); - page = f2fs_get_meta_page(sbi, start++); - if (IS_ERR(page)) - return PTR_ERR(page); - kaddr = (unsigned char *)page_address(page); + folio = f2fs_get_meta_folio(sbi, start++); + if (IS_ERR(folio)) + return PTR_ERR(folio); + kaddr = folio_address(folio); /* Step 1: restore nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); @@ -4259,17 +4293,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) SUM_FOOTER_SIZE) continue; - f2fs_put_page(page, 1); - page = NULL; + f2fs_folio_put(folio, true); - page = f2fs_get_meta_page(sbi, start++); - if (IS_ERR(page)) - return PTR_ERR(page); - kaddr = (unsigned char *)page_address(page); + folio = f2fs_get_meta_folio(sbi, start++); + if (IS_ERR(folio)) + return PTR_ERR(folio); + kaddr = folio_address(folio); offset = 0; } } - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return 0; } @@ -4278,7 +4311,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_summary_block *sum; struct curseg_info *curseg; - struct page *new; + struct folio *new; unsigned short blk_off; unsigned int segno = 0; block_t blk_addr = 0; @@ -4305,10 +4338,10 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) blk_addr = GET_SUM_BLOCK(sbi, segno); } - new = f2fs_get_meta_page(sbi, blk_addr); + new = f2fs_get_meta_folio(sbi, blk_addr); if (IS_ERR(new)) return PTR_ERR(new); - sum = (struct f2fs_summary_block *)page_address(new); + sum = folio_address(new); if (IS_NODESEG(type)) { if (__exist_node_summaries(sbi)) { @@ -4343,7 +4376,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) curseg->next_blkoff = blk_off; mutex_unlock(&curseg->curseg_mutex); out: - f2fs_put_page(new, 1); + f2fs_folio_put(new, true); return err; } @@ -4392,15 +4425,15 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) { - struct page *page; + struct folio *folio; unsigned char *kaddr; struct f2fs_summary *summary; struct curseg_info *seg_i; int written_size = 0; int i, j; - page = f2fs_grab_meta_page(sbi, blkaddr++); - kaddr = (unsigned char *)page_address(page); + folio = f2fs_grab_meta_folio(sbi, blkaddr++); + kaddr = folio_address(folio); memset(kaddr, 0, PAGE_SIZE); /* Step 1: write nat cache */ @@ -4417,9 +4450,9 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { seg_i = CURSEG_I(sbi, i); for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { - if (!page) { - page = f2fs_grab_meta_page(sbi, blkaddr++); - kaddr = (unsigned char *)page_address(page); + if (!folio) { + folio = f2fs_grab_meta_folio(sbi, blkaddr++); + kaddr = folio_address(folio); memset(kaddr, 0, PAGE_SIZE); written_size = 0; } @@ -4431,14 +4464,14 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) SUM_FOOTER_SIZE) continue; - set_page_dirty(page); - f2fs_put_page(page, 1); - page = NULL; + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); + folio = NULL; } } - if (page) { - set_page_dirty(page); - f2fs_put_page(page, 1); + if (folio) { + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } } @@ -4491,29 +4524,29 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, return -1; } -static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, +static struct folio *get_current_sit_folio(struct f2fs_sb_info *sbi, unsigned int segno) { - return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); + return f2fs_get_meta_folio(sbi, current_sit_addr(sbi, segno)); } -static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, +static struct folio *get_next_sit_folio(struct f2fs_sb_info *sbi, unsigned int start) { struct sit_info *sit_i = SIT_I(sbi); - struct page *page; + struct folio *folio; pgoff_t src_off, dst_off; src_off = current_sit_addr(sbi, start); dst_off = next_sit_addr(sbi, src_off); - page = f2fs_grab_meta_page(sbi, dst_off); - seg_info_to_sit_page(sbi, page, start); + folio = f2fs_grab_meta_folio(sbi, dst_off); + seg_info_to_sit_folio(sbi, folio, start); - set_page_dirty(page); + folio_mark_dirty(folio); set_to_next_sit(sit_i, start); - return page; + return folio; } static struct sit_entry_set *grab_sit_entry_set(void) @@ -4643,7 +4676,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * #2, flush sit entries to sit page. */ list_for_each_entry_safe(ses, tmp, head, set_list) { - struct page *page = NULL; + struct folio *folio = NULL; struct f2fs_sit_block *raw_sit = NULL; unsigned int start_segno = ses->start_segno; unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, @@ -4657,8 +4690,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (to_journal) { down_write(&curseg->journal_rwsem); } else { - page = get_next_sit_page(sbi, start_segno); - raw_sit = page_address(page); + folio = get_next_sit_folio(sbi, start_segno); + raw_sit = folio_address(folio); } /* flush dirty sit entries in region of current sit set */ @@ -4696,6 +4729,12 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) &raw_sit->entries[sit_offset]); } + /* update ckpt_valid_block */ + if (__is_large_section(sbi)) { + set_ckpt_valid_blocks(sbi, segno); + sanity_check_valid_blocks(sbi, segno); + } + __clear_bit(segno, bitmap); sit_i->dirty_sentries--; ses->entry_cnt--; @@ -4704,7 +4743,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (to_journal) up_write(&curseg->journal_rwsem); else - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); f2fs_bug_on(sbi, ses->entry_cnt); release_sit_entry_set(ses); @@ -4916,15 +4955,15 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) for (; start < end && start < MAIN_SEGS(sbi); start++) { struct f2fs_sit_block *sit_blk; - struct page *page; + struct folio *folio; se = &sit_i->sentries[start]; - page = get_current_sit_page(sbi, start); - if (IS_ERR(page)) - return PTR_ERR(page); - sit_blk = (struct f2fs_sit_block *)page_address(page); + folio = get_current_sit_folio(sbi, start); + if (IS_ERR(folio)) + return PTR_ERR(folio); + sit_blk = folio_address(folio); sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); err = check_block_count(sbi, start, &sit); if (err) @@ -5017,6 +5056,16 @@ init_discard_map_done: } up_read(&curseg->journal_rwsem); + /* update ckpt_valid_block */ + if (__is_large_section(sbi)) { + unsigned int segno; + + for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { + set_ckpt_valid_blocks(sbi, segno); + sanity_check_valid_blocks(sbi, segno); + } + } + if (err) return err; @@ -5100,7 +5149,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) continue; - if (IS_CURSEC(sbi, secno)) + if (is_cursec(sbi, secno)) continue; set_bit(secno, dirty_i->dirty_secmap); } @@ -5236,7 +5285,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi, * Get # of valid block of the zone. */ valid_block_cnt = get_valid_blocks(sbi, zone_segno, true); - if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) { + if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) { f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]", zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond)); @@ -5763,9 +5812,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) kvfree(sit_i->dirty_sentries_bitmap); SM_I(sbi)->sit_info = NULL; - kvfree(sit_i->sit_bitmap); + kfree(sit_i->sit_bitmap); #ifdef CONFIG_F2FS_CHECK_FS - kvfree(sit_i->sit_bitmap_mir); + kfree(sit_i->sit_bitmap_mir); kvfree(sit_i->invalid_segmap); #endif kfree(sit_i); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 0465dc00b349..5e2ee5c686b1 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -34,34 +34,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); } -#define IS_CURSEG(sbi, seg) \ - (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ - ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) - -#define IS_CURSEC(sbi, secno) \ - (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ - SEGS_PER_SEC(sbi)) || \ - ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ - SEGS_PER_SEC(sbi))) - #define MAIN_BLKADDR(sbi) \ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) @@ -102,6 +74,8 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, #define CAP_SEGS_PER_SEC(sbi) \ (SEGS_PER_SEC(sbi) - \ BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec)) +#define GET_START_SEG_FROM_SEC(sbi, segno) \ + (rounddown(segno, SEGS_PER_SEC(sbi))) #define GET_SEC_FROM_SEG(sbi, segno) \ (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi)) #define GET_SEG_FROM_SEC(sbi, secno) \ @@ -209,6 +183,7 @@ struct seg_entry { struct sec_entry { unsigned int valid_blocks; /* # of valid blocks in a section */ + unsigned int ckpt_valid_blocks; /* # of valid blocks last cp in a section */ }; #define MAX_SKIP_GC_COUNT 16 @@ -315,6 +290,28 @@ static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); } +static inline bool is_curseg(struct f2fs_sb_info *sbi, unsigned int segno) +{ + int i; + + for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { + if (segno == CURSEG_I(sbi, i)->segno) + return true; + } + return false; +} + +static inline bool is_cursec(struct f2fs_sb_info *sbi, unsigned int secno) +{ + int i; + + for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { + if (secno == GET_SEC_FROM_SEG(sbi, CURSEG_I(sbi, i)->segno)) + return true; + } + return false; +} + static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, unsigned int segno) { @@ -345,22 +342,57 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, unsigned int segno, bool use_section) { - if (use_section && __is_large_section(sbi)) { - unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); - unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); - unsigned int blocks = 0; - int i; + if (use_section && __is_large_section(sbi)) + return get_sec_entry(sbi, segno)->ckpt_valid_blocks; + else + return get_seg_entry(sbi, segno)->ckpt_valid_blocks; +} - for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) { - struct seg_entry *se = get_seg_entry(sbi, start_segno); +static inline void set_ckpt_valid_blocks(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); + unsigned int blocks = 0; + int i; - blocks += se->ckpt_valid_blocks; - } - return blocks; + for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) { + struct seg_entry *se = get_seg_entry(sbi, start_segno); + + blocks += se->ckpt_valid_blocks; } - return get_seg_entry(sbi, segno)->ckpt_valid_blocks; + get_sec_entry(sbi, segno)->ckpt_valid_blocks = blocks; } +#ifdef CONFIG_F2FS_CHECK_FS +static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); + unsigned int blocks = 0; + int i; + + for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) { + struct seg_entry *se = get_seg_entry(sbi, start_segno); + + blocks += se->ckpt_valid_blocks; + } + + if (blocks != get_sec_entry(sbi, segno)->ckpt_valid_blocks) { + f2fs_err(sbi, + "Inconsistent ckpt valid blocks: " + "seg entry(%d) vs sec entry(%d) at secno %d", + blocks, get_sec_entry(sbi, segno)->ckpt_valid_blocks, secno); + f2fs_bug_on(sbi, 1); + } +} +#else +static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi, + unsigned int segno) +{ +} +#endif static inline void seg_info_from_raw_sit(struct seg_entry *se, struct f2fs_sit_entry *rs) { @@ -385,8 +417,8 @@ static inline void __seg_info_to_raw_sit(struct seg_entry *se, rs->mtime = cpu_to_le64(se->mtime); } -static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, - struct page *page, unsigned int start) +static inline void seg_info_to_sit_folio(struct f2fs_sb_info *sbi, + struct folio *folio, unsigned int start) { struct f2fs_sit_block *raw_sit; struct seg_entry *se; @@ -395,7 +427,7 @@ static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, (unsigned long)MAIN_SEGS(sbi)); int i; - raw_sit = (struct f2fs_sit_block *)page_address(page); + raw_sit = folio_address(folio); memset(raw_sit, 0, PAGE_SIZE); for (i = 0; i < end - start; i++) { rs = &raw_sit->entries[i]; @@ -429,7 +461,6 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; - unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); spin_lock(&free_i->segmap_lock); clear_bit(segno, free_i->free_segmap); @@ -437,7 +468,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) next = find_next_bit(free_i->free_segmap, start_segno + SEGS_PER_SEC(sbi), start_segno); - if (next >= start_segno + usable_segs) { + if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) { clear_bit(secno, free_i->free_secmap); free_i->free_sections++; } @@ -463,22 +494,36 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; - unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); + bool ret; spin_lock(&free_i->segmap_lock); - if (test_and_clear_bit(segno, free_i->free_segmap)) { - free_i->free_segments++; - - if (!inmem && IS_CURSEC(sbi, secno)) - goto skip_free; - next = find_next_bit(free_i->free_segmap, - start_segno + SEGS_PER_SEC(sbi), start_segno); - if (next >= start_segno + usable_segs) { - if (test_and_clear_bit(secno, free_i->free_secmap)) - free_i->free_sections++; - } - } -skip_free: + ret = test_and_clear_bit(segno, free_i->free_segmap); + if (!ret) + goto unlock_out; + + free_i->free_segments++; + + if (!inmem && is_cursec(sbi, secno)) + goto unlock_out; + + /* check large section */ + next = find_next_bit(free_i->free_segmap, + start_segno + SEGS_PER_SEC(sbi), start_segno); + if (next < start_segno + f2fs_usable_segs_in_sec(sbi)) + goto unlock_out; + + ret = test_and_clear_bit(secno, free_i->free_secmap); + if (!ret) + goto unlock_out; + + free_i->free_sections++; + + if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno) + sbi->next_victim_seg[BG_GC] = NULL_SEGNO; + if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno) + sbi->next_victim_seg[FG_GC] = NULL_SEGNO; + +unlock_out: spin_unlock(&free_i->segmap_lock); } @@ -569,8 +614,14 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, if (unlikely(segno == NULL_SEGNO)) return false; - left_blocks = CAP_BLKS_PER_SEC(sbi) - - get_ckpt_valid_blocks(sbi, segno, true); + if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) { + left_blocks = CAP_BLKS_PER_SEC(sbi) - + SEGS_TO_BLKS(sbi, (segno - GET_START_SEG_FROM_SEC(sbi, segno))) - + CURSEG_I(sbi, i)->next_blkoff; + } else { + left_blocks = CAP_BLKS_PER_SEC(sbi) - + get_ckpt_valid_blocks(sbi, segno, true); + } blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks; if (blocks > left_blocks) @@ -583,8 +634,15 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, if (unlikely(segno == NULL_SEGNO)) return false; - left_blocks = CAP_BLKS_PER_SEC(sbi) - - get_ckpt_valid_blocks(sbi, segno, true); + if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) { + left_blocks = CAP_BLKS_PER_SEC(sbi) - + SEGS_TO_BLKS(sbi, (segno - GET_START_SEG_FROM_SEC(sbi, segno))) - + CURSEG_I(sbi, CURSEG_HOT_DATA)->next_blkoff; + } else { + left_blocks = CAP_BLKS_PER_SEC(sbi) - + get_ckpt_valid_blocks(sbi, segno, true); + } + if (dent_blocks > left_blocks) return false; return true; @@ -610,8 +668,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi, unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi); unsigned int data_blocks = 0; - if (f2fs_lfs_mode(sbi) && - unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { + if (f2fs_lfs_mode(sbi)) { total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA); data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi); data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi); @@ -620,7 +677,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi, if (lower_p) *lower_p = node_secs + dent_secs + data_secs; if (upper_p) - *upper_p = node_secs + dent_secs + + *upper_p = node_secs + dent_secs + data_secs + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) + (data_blocks ? 1 : 0); if (curseg_p) @@ -922,7 +979,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) { - if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) + if (is_cursec(sbi, secno) || (sbi->cur_victim_sec == secno)) return true; return false; } diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index 9c8d3aee89af..b88babcf6ab4 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -184,10 +184,17 @@ static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi, if (!inode) continue; - len = fi->donate_end - fi->donate_start + 1; - npages = npages < len ? 0 : npages - len; - invalidate_inode_pages2_range(inode->i_mapping, + inode_lock(inode); + if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) { + len = fi->donate_end - fi->donate_start + 1; + npages = npages < len ? 0 : npages - len; + + invalidate_inode_pages2_range(inode->i_mapping, fi->donate_start, fi->donate_end); + set_inode_flag(inode, FI_DONATE_FINISHED); + } + inode_unlock(inode); + iput(inode); cond_resched(); } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f087b2b71c89..e16c4e2830c2 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -27,6 +27,8 @@ #include <linux/part_stat.h> #include <linux/zstd.h> #include <linux/lz4.h> +#include <linux/ctype.h> +#include <linux/fs_parser.h> #include "f2fs.h" #include "node.h" @@ -47,6 +49,7 @@ const char *f2fs_fault_name[FAULT_MAX] = { [FAULT_KVMALLOC] = "kvmalloc", [FAULT_PAGE_ALLOC] = "page alloc", [FAULT_PAGE_GET] = "page get", + [FAULT_ALLOC_BIO] = "alloc bio(obsolete)", [FAULT_ALLOC_NID] = "alloc nid", [FAULT_ORPHAN] = "orphan", [FAULT_BLOCK] = "no more block", @@ -64,32 +67,35 @@ const char *f2fs_fault_name[FAULT_MAX] = { [FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr", [FAULT_NO_SEGMENT] = "no free segment", [FAULT_INCONSISTENT_FOOTER] = "inconsistent footer", + [FAULT_TIMEOUT] = "timeout", + [FAULT_VMALLOC] = "vmalloc", }; int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, - unsigned long type) + unsigned long type, enum fault_option fo) { struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; - if (rate) { + if (fo & FAULT_ALL) { + memset(ffi, 0, sizeof(struct f2fs_fault_info)); + return 0; + } + + if (fo & FAULT_RATE) { if (rate > INT_MAX) return -EINVAL; atomic_set(&ffi->inject_ops, 0); ffi->inject_rate = (int)rate; + f2fs_info(sbi, "build fault injection rate: %lu", rate); } - if (type) { + if (fo & FAULT_TYPE) { if (type >= BIT(FAULT_MAX)) return -EINVAL; ffi->inject_type = (unsigned int)type; + f2fs_info(sbi, "build fault injection type: 0x%lx", type); } - if (!rate && !type) - memset(ffi, 0, sizeof(struct f2fs_fault_info)); - else - f2fs_info(sbi, - "build fault injection attr: rate: %lu, type: 0x%lx", - rate, type); return 0; } #endif @@ -121,29 +127,20 @@ enum { Opt_disable_roll_forward, Opt_norecovery, Opt_discard, - Opt_nodiscard, Opt_noheap, Opt_heap, Opt_user_xattr, - Opt_nouser_xattr, Opt_acl, - Opt_noacl, Opt_active_logs, Opt_disable_ext_identify, Opt_inline_xattr, - Opt_noinline_xattr, Opt_inline_xattr_size, Opt_inline_data, Opt_inline_dentry, - Opt_noinline_dentry, Opt_flush_merge, - Opt_noflush_merge, Opt_barrier, - Opt_nobarrier, Opt_fastboot, Opt_extent_cache, - Opt_noextent_cache, - Opt_noinline_data, Opt_data_flush, Opt_reserve_root, Opt_resgid, @@ -152,21 +149,13 @@ enum { Opt_fault_injection, Opt_fault_type, Opt_lazytime, - Opt_nolazytime, Opt_quota, - Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_usrjquota, Opt_grpjquota, Opt_prjjquota, - Opt_offusrjquota, - Opt_offgrpjquota, - Opt_offprjjquota, - Opt_jqfmt_vfsold, - Opt_jqfmt_vfsv0, - Opt_jqfmt_vfsv1, Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, @@ -176,107 +165,209 @@ enum { Opt_checkpoint_disable_cap_perc, Opt_checkpoint_enable, Opt_checkpoint_merge, - Opt_nocheckpoint_merge, Opt_compress_algorithm, Opt_compress_log_size, - Opt_compress_extension, Opt_nocompress_extension, + Opt_compress_extension, Opt_compress_chksum, Opt_compress_mode, Opt_compress_cache, Opt_atgc, Opt_gc_merge, - Opt_nogc_merge, Opt_discard_unit, Opt_memory_mode, Opt_age_extent_cache, Opt_errors, Opt_nat_bits, + Opt_jqfmt, + Opt_checkpoint, Opt_err, }; -static match_table_t f2fs_tokens = { - {Opt_gc_background, "background_gc=%s"}, - {Opt_disable_roll_forward, "disable_roll_forward"}, - {Opt_norecovery, "norecovery"}, - {Opt_discard, "discard"}, - {Opt_nodiscard, "nodiscard"}, - {Opt_noheap, "no_heap"}, - {Opt_heap, "heap"}, - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_active_logs, "active_logs=%u"}, - {Opt_disable_ext_identify, "disable_ext_identify"}, - {Opt_inline_xattr, "inline_xattr"}, - {Opt_noinline_xattr, "noinline_xattr"}, - {Opt_inline_xattr_size, "inline_xattr_size=%u"}, - {Opt_inline_data, "inline_data"}, - {Opt_inline_dentry, "inline_dentry"}, - {Opt_noinline_dentry, "noinline_dentry"}, - {Opt_flush_merge, "flush_merge"}, - {Opt_noflush_merge, "noflush_merge"}, - {Opt_barrier, "barrier"}, - {Opt_nobarrier, "nobarrier"}, - {Opt_fastboot, "fastboot"}, - {Opt_extent_cache, "extent_cache"}, - {Opt_noextent_cache, "noextent_cache"}, - {Opt_noinline_data, "noinline_data"}, - {Opt_data_flush, "data_flush"}, - {Opt_reserve_root, "reserve_root=%u"}, - {Opt_resgid, "resgid=%u"}, - {Opt_resuid, "resuid=%u"}, - {Opt_mode, "mode=%s"}, - {Opt_fault_injection, "fault_injection=%u"}, - {Opt_fault_type, "fault_type=%u"}, - {Opt_lazytime, "lazytime"}, - {Opt_nolazytime, "nolazytime"}, - {Opt_quota, "quota"}, - {Opt_noquota, "noquota"}, - {Opt_usrquota, "usrquota"}, - {Opt_grpquota, "grpquota"}, - {Opt_prjquota, "prjquota"}, - {Opt_usrjquota, "usrjquota=%s"}, - {Opt_grpjquota, "grpjquota=%s"}, - {Opt_prjjquota, "prjjquota=%s"}, - {Opt_offusrjquota, "usrjquota="}, - {Opt_offgrpjquota, "grpjquota="}, - {Opt_offprjjquota, "prjjquota="}, - {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, - {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, - {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, - {Opt_alloc, "alloc_mode=%s"}, - {Opt_fsync, "fsync_mode=%s"}, - {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, - {Opt_test_dummy_encryption, "test_dummy_encryption"}, - {Opt_inlinecrypt, "inlinecrypt"}, - {Opt_checkpoint_disable, "checkpoint=disable"}, - {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, - {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, - {Opt_checkpoint_enable, "checkpoint=enable"}, - {Opt_checkpoint_merge, "checkpoint_merge"}, - {Opt_nocheckpoint_merge, "nocheckpoint_merge"}, - {Opt_compress_algorithm, "compress_algorithm=%s"}, - {Opt_compress_log_size, "compress_log_size=%u"}, - {Opt_compress_extension, "compress_extension=%s"}, - {Opt_nocompress_extension, "nocompress_extension=%s"}, - {Opt_compress_chksum, "compress_chksum"}, - {Opt_compress_mode, "compress_mode=%s"}, - {Opt_compress_cache, "compress_cache"}, - {Opt_atgc, "atgc"}, - {Opt_gc_merge, "gc_merge"}, - {Opt_nogc_merge, "nogc_merge"}, - {Opt_discard_unit, "discard_unit=%s"}, - {Opt_memory_mode, "memory=%s"}, - {Opt_age_extent_cache, "age_extent_cache"}, - {Opt_errors, "errors=%s"}, - {Opt_nat_bits, "nat_bits"}, +static const struct constant_table f2fs_param_background_gc[] = { + {"on", BGGC_MODE_ON}, + {"off", BGGC_MODE_OFF}, + {"sync", BGGC_MODE_SYNC}, + {} +}; + +static const struct constant_table f2fs_param_mode[] = { + {"adaptive", FS_MODE_ADAPTIVE}, + {"lfs", FS_MODE_LFS}, + {"fragment:segment", FS_MODE_FRAGMENT_SEG}, + {"fragment:block", FS_MODE_FRAGMENT_BLK}, + {} +}; + +static const struct constant_table f2fs_param_jqfmt[] = { + {"vfsold", QFMT_VFS_OLD}, + {"vfsv0", QFMT_VFS_V0}, + {"vfsv1", QFMT_VFS_V1}, + {} +}; + +static const struct constant_table f2fs_param_alloc_mode[] = { + {"default", ALLOC_MODE_DEFAULT}, + {"reuse", ALLOC_MODE_REUSE}, + {} +}; +static const struct constant_table f2fs_param_fsync_mode[] = { + {"posix", FSYNC_MODE_POSIX}, + {"strict", FSYNC_MODE_STRICT}, + {"nobarrier", FSYNC_MODE_NOBARRIER}, + {} +}; + +static const struct constant_table f2fs_param_compress_mode[] = { + {"fs", COMPR_MODE_FS}, + {"user", COMPR_MODE_USER}, + {} +}; + +static const struct constant_table f2fs_param_discard_unit[] = { + {"block", DISCARD_UNIT_BLOCK}, + {"segment", DISCARD_UNIT_SEGMENT}, + {"section", DISCARD_UNIT_SECTION}, + {} +}; + +static const struct constant_table f2fs_param_memory_mode[] = { + {"normal", MEMORY_MODE_NORMAL}, + {"low", MEMORY_MODE_LOW}, + {} +}; + +static const struct constant_table f2fs_param_errors[] = { + {"remount-ro", MOUNT_ERRORS_READONLY}, + {"continue", MOUNT_ERRORS_CONTINUE}, + {"panic", MOUNT_ERRORS_PANIC}, + {} +}; + +static const struct fs_parameter_spec f2fs_param_specs[] = { + fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc), + fsparam_flag("disable_roll_forward", Opt_disable_roll_forward), + fsparam_flag("norecovery", Opt_norecovery), + fsparam_flag_no("discard", Opt_discard), + fsparam_flag("no_heap", Opt_noheap), + fsparam_flag("heap", Opt_heap), + fsparam_flag_no("user_xattr", Opt_user_xattr), + fsparam_flag_no("acl", Opt_acl), + fsparam_s32("active_logs", Opt_active_logs), + fsparam_flag("disable_ext_identify", Opt_disable_ext_identify), + fsparam_flag_no("inline_xattr", Opt_inline_xattr), + fsparam_s32("inline_xattr_size", Opt_inline_xattr_size), + fsparam_flag_no("inline_data", Opt_inline_data), + fsparam_flag_no("inline_dentry", Opt_inline_dentry), + fsparam_flag_no("flush_merge", Opt_flush_merge), + fsparam_flag_no("barrier", Opt_barrier), + fsparam_flag("fastboot", Opt_fastboot), + fsparam_flag_no("extent_cache", Opt_extent_cache), + fsparam_flag("data_flush", Opt_data_flush), + fsparam_u32("reserve_root", Opt_reserve_root), + fsparam_gid("resgid", Opt_resgid), + fsparam_uid("resuid", Opt_resuid), + fsparam_enum("mode", Opt_mode, f2fs_param_mode), + fsparam_s32("fault_injection", Opt_fault_injection), + fsparam_u32("fault_type", Opt_fault_type), + fsparam_flag_no("lazytime", Opt_lazytime), + fsparam_flag_no("quota", Opt_quota), + fsparam_flag("usrquota", Opt_usrquota), + fsparam_flag("grpquota", Opt_grpquota), + fsparam_flag("prjquota", Opt_prjquota), + fsparam_string_empty("usrjquota", Opt_usrjquota), + fsparam_string_empty("grpjquota", Opt_grpjquota), + fsparam_string_empty("prjjquota", Opt_prjjquota), + fsparam_flag("nat_bits", Opt_nat_bits), + fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt), + fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode), + fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode), + fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption), + fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption), + fsparam_flag("inlinecrypt", Opt_inlinecrypt), + fsparam_string("checkpoint", Opt_checkpoint), + fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge), + fsparam_string("compress_algorithm", Opt_compress_algorithm), + fsparam_u32("compress_log_size", Opt_compress_log_size), + fsparam_string("compress_extension", Opt_compress_extension), + fsparam_string("nocompress_extension", Opt_nocompress_extension), + fsparam_flag("compress_chksum", Opt_compress_chksum), + fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode), + fsparam_flag("compress_cache", Opt_compress_cache), + fsparam_flag("atgc", Opt_atgc), + fsparam_flag_no("gc_merge", Opt_gc_merge), + fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit), + fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode), + fsparam_flag("age_extent_cache", Opt_age_extent_cache), + fsparam_enum("errors", Opt_errors, f2fs_param_errors), + {} +}; + +/* Resort to a match_table for this interestingly formatted option */ +static match_table_t f2fs_checkpoint_tokens = { + {Opt_checkpoint_disable, "disable"}, + {Opt_checkpoint_disable_cap, "disable:%u"}, + {Opt_checkpoint_disable_cap_perc, "disable:%u%%"}, + {Opt_checkpoint_enable, "enable"}, {Opt_err, NULL}, }; +#define F2FS_SPEC_background_gc (1 << 0) +#define F2FS_SPEC_inline_xattr_size (1 << 1) +#define F2FS_SPEC_active_logs (1 << 2) +#define F2FS_SPEC_reserve_root (1 << 3) +#define F2FS_SPEC_resgid (1 << 4) +#define F2FS_SPEC_resuid (1 << 5) +#define F2FS_SPEC_mode (1 << 6) +#define F2FS_SPEC_fault_injection (1 << 7) +#define F2FS_SPEC_fault_type (1 << 8) +#define F2FS_SPEC_jqfmt (1 << 9) +#define F2FS_SPEC_alloc_mode (1 << 10) +#define F2FS_SPEC_fsync_mode (1 << 11) +#define F2FS_SPEC_checkpoint_disable_cap (1 << 12) +#define F2FS_SPEC_checkpoint_disable_cap_perc (1 << 13) +#define F2FS_SPEC_compress_level (1 << 14) +#define F2FS_SPEC_compress_algorithm (1 << 15) +#define F2FS_SPEC_compress_log_size (1 << 16) +#define F2FS_SPEC_compress_extension (1 << 17) +#define F2FS_SPEC_nocompress_extension (1 << 18) +#define F2FS_SPEC_compress_chksum (1 << 19) +#define F2FS_SPEC_compress_mode (1 << 20) +#define F2FS_SPEC_discard_unit (1 << 21) +#define F2FS_SPEC_memory_mode (1 << 22) +#define F2FS_SPEC_errors (1 << 23) + +struct f2fs_fs_context { + struct f2fs_mount_info info; + unsigned int opt_mask; /* Bits changed */ + unsigned int spec_mask; + unsigned short qname_mask; +}; + +#define F2FS_CTX_INFO(ctx) ((ctx)->info) + +static inline void ctx_set_opt(struct f2fs_fs_context *ctx, + unsigned int flag) +{ + ctx->info.opt |= flag; + ctx->opt_mask |= flag; +} + +static inline void ctx_clear_opt(struct f2fs_fs_context *ctx, + unsigned int flag) +{ + ctx->info.opt &= ~flag; + ctx->opt_mask |= flag; +} + +static inline bool ctx_test_opt(struct f2fs_fs_context *ctx, + unsigned int flag) +{ + return ctx->info.opt & flag; +} + void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, - const char *fmt, ...) + const char *fmt, ...) { struct va_format vaf; va_list args; @@ -288,11 +379,19 @@ void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, vaf.fmt = printk_skip_level(fmt); vaf.va = &args; if (limit_rate) - printk_ratelimited("%c%cF2FS-fs (%s): %pV\n", - KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); + if (sbi) + printk_ratelimited("%c%cF2FS-fs (%s): %pV\n", + KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); + else + printk_ratelimited("%c%cF2FS-fs: %pV\n", + KERN_SOH_ASCII, level, &vaf); else - printk("%c%cF2FS-fs (%s): %pV\n", - KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); + if (sbi) + printk("%c%cF2FS-fs (%s): %pV\n", + KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); + else + printk("%c%cF2FS-fs: %pV\n", + KERN_SOH_ASCII, level, &vaf); va_end(args); } @@ -386,159 +485,90 @@ static void init_once(void *foo) #ifdef CONFIG_QUOTA static const char * const quotatypes[] = INITQFNAMES; #define QTYPE2NAME(t) (quotatypes[t]) -static int f2fs_set_qf_name(struct f2fs_sb_info *sbi, int qtype, - substring_t *args) +/* + * Note the name of the specified quota file. + */ +static int f2fs_note_qf_name(struct fs_context *fc, int qtype, + struct fs_parameter *param) { - struct super_block *sb = sbi->sb; + struct f2fs_fs_context *ctx = fc->fs_private; char *qname; - int ret = -EINVAL; - if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { - f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); + if (param->size < 1) { + f2fs_err(NULL, "Missing quota name"); return -EINVAL; } - if (f2fs_sb_has_quota_ino(sbi)) { - f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); + if (strchr(param->string, '/')) { + f2fs_err(NULL, "quotafile must be on filesystem root"); + return -EINVAL; + } + if (ctx->info.s_qf_names[qtype]) { + if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) { + f2fs_err(NULL, "Quota file already specified"); + return -EINVAL; + } return 0; } - qname = match_strdup(args); + qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); if (!qname) { - f2fs_err(sbi, "Not enough memory for storing quotafile name"); + f2fs_err(NULL, "Not enough memory for storing quotafile name"); return -ENOMEM; } - if (F2FS_OPTION(sbi).s_qf_names[qtype]) { - if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) - ret = 0; - else - f2fs_err(sbi, "%s quota file already specified", - QTYPE2NAME(qtype)); - goto errout; - } - if (strchr(qname, '/')) { - f2fs_err(sbi, "quotafile must be on filesystem root"); - goto errout; - } - F2FS_OPTION(sbi).s_qf_names[qtype] = qname; - set_opt(sbi, QUOTA); + F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname; + ctx->qname_mask |= 1 << qtype; return 0; -errout: - kfree(qname); - return ret; } -static int f2fs_clear_qf_name(struct f2fs_sb_info *sbi, int qtype) +/* + * Clear the name of the specified quota file. + */ +static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype) { - struct super_block *sb = sbi->sb; + struct f2fs_fs_context *ctx = fc->fs_private; - if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { - f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); - return -EINVAL; - } - kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); - F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; + kfree(ctx->info.s_qf_names[qtype]); + ctx->info.s_qf_names[qtype] = NULL; + ctx->qname_mask |= 1 << qtype; return 0; } -static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) +static void f2fs_unnote_qf_name_all(struct fs_context *fc) { - /* - * We do the test below only for project quotas. 'usrquota' and - * 'grpquota' mount options are allowed even without quota feature - * to support legacy quotas in quota files. - */ - if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { - f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); - return -1; - } - if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || - F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || - F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { - if (test_opt(sbi, USRQUOTA) && - F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) - clear_opt(sbi, USRQUOTA); - - if (test_opt(sbi, GRPQUOTA) && - F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) - clear_opt(sbi, GRPQUOTA); - - if (test_opt(sbi, PRJQUOTA) && - F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) - clear_opt(sbi, PRJQUOTA); - - if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || - test_opt(sbi, PRJQUOTA)) { - f2fs_err(sbi, "old and new quota format mixing"); - return -1; - } - - if (!F2FS_OPTION(sbi).s_jquota_fmt) { - f2fs_err(sbi, "journaled quota format not specified"); - return -1; - } - } + int i; - if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { - f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); - F2FS_OPTION(sbi).s_jquota_fmt = 0; - } - return 0; + for (i = 0; i < MAXQUOTAS; i++) + f2fs_unnote_qf_name(fc, i); } #endif -static int f2fs_set_test_dummy_encryption(struct f2fs_sb_info *sbi, - const char *opt, - const substring_t *arg, - bool is_remount) +static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param, + struct f2fs_fs_context *ctx) { - struct fs_parameter param = { - .type = fs_value_is_string, - .string = arg->from ? arg->from : "", - }; - struct fscrypt_dummy_policy *policy = - &F2FS_OPTION(sbi).dummy_enc_policy; int err; if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { - f2fs_warn(sbi, "test_dummy_encryption option not supported"); + f2fs_warn(NULL, "test_dummy_encryption option not supported"); return -EINVAL; } - - if (!f2fs_sb_has_encrypt(sbi)) { - f2fs_err(sbi, "Encrypt feature is off"); - return -EINVAL; - } - - /* - * This mount option is just for testing, and it's not worthwhile to - * implement the extra complexity (e.g. RCU protection) that would be - * needed to allow it to be set or changed during remount. We do allow - * it to be specified during remount, but only if there is no change. - */ - if (is_remount && !fscrypt_is_dummy_policy_set(policy)) { - f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); - return -EINVAL; - } - - err = fscrypt_parse_test_dummy_encryption(¶m, policy); + err = fscrypt_parse_test_dummy_encryption(param, + &ctx->info.dummy_enc_policy); if (err) { - if (err == -EEXIST) - f2fs_warn(sbi, - "Can't change test_dummy_encryption on remount"); - else if (err == -EINVAL) - f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", - opt); + if (err == -EINVAL) + f2fs_warn(NULL, "Value of option \"%s\" is unrecognized", + param->key); + else if (err == -EEXIST) + f2fs_warn(NULL, "Conflicting test_dummy_encryption options"); else - f2fs_warn(sbi, "Error processing option \"%s\" [%d]", - opt, err); + f2fs_warn(NULL, "Error processing option \"%s\" [%d]", + param->key, err); return -EINVAL; } - f2fs_warn(sbi, "Test dummy encryption mode enabled"); return 0; } #ifdef CONFIG_F2FS_FS_COMPRESSION -static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, +static bool is_compress_extension_exist(struct f2fs_mount_info *info, const char *new_ext, bool is_ext) { unsigned char (*ext)[F2FS_EXTENSION_LEN]; @@ -546,11 +576,11 @@ static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, int i; if (is_ext) { - ext = F2FS_OPTION(sbi).extensions; - ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; + ext = info->extensions; + ext_cnt = info->compress_ext_cnt; } else { - ext = F2FS_OPTION(sbi).noextensions; - ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; + ext = info->noextensions; + ext_cnt = info->nocompress_ext_cnt; } for (i = 0; i < ext_cnt; i++) { @@ -568,28 +598,28 @@ static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, * extension will be treated as special cases and will not be compressed. * 3. Don't allow the non-compress extension specifies all files. */ -static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi) +static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN], + int noext_cnt, + unsigned char (*ext)[F2FS_EXTENSION_LEN], + int ext_cnt) { - unsigned char (*ext)[F2FS_EXTENSION_LEN]; - unsigned char (*noext)[F2FS_EXTENSION_LEN]; - int ext_cnt, noext_cnt, index = 0, no_index = 0; - - ext = F2FS_OPTION(sbi).extensions; - ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; - noext = F2FS_OPTION(sbi).noextensions; - noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; + int index = 0, no_index = 0; if (!noext_cnt) return 0; for (no_index = 0; no_index < noext_cnt; no_index++) { + if (strlen(noext[no_index]) == 0) + continue; if (!strcasecmp("*", noext[no_index])) { - f2fs_info(sbi, "Don't allow the nocompress extension specifies all files"); + f2fs_info(NULL, "Don't allow the nocompress extension specifies all files"); return -EINVAL; } for (index = 0; index < ext_cnt; index++) { + if (strlen(ext[index]) == 0) + continue; if (!strcasecmp(ext[index], noext[no_index])) { - f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension", + f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension", ext[index]); return -EINVAL; } @@ -599,58 +629,62 @@ static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi) } #ifdef CONFIG_F2FS_FS_LZ4 -static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str) +static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str) { #ifdef CONFIG_F2FS_FS_LZ4HC unsigned int level; if (strlen(str) == 3) { - F2FS_OPTION(sbi).compress_level = 0; + F2FS_CTX_INFO(ctx).compress_level = 0; + ctx->spec_mask |= F2FS_SPEC_compress_level; return 0; } str += 3; if (str[0] != ':') { - f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); + f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>"); return -EINVAL; } if (kstrtouint(str + 1, 10, &level)) return -EINVAL; if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) { - f2fs_info(sbi, "invalid lz4hc compress level: %d", level); + f2fs_info(NULL, "invalid lz4hc compress level: %d", level); return -EINVAL; } - F2FS_OPTION(sbi).compress_level = level; + F2FS_CTX_INFO(ctx).compress_level = level; + ctx->spec_mask |= F2FS_SPEC_compress_level; return 0; #else if (strlen(str) == 3) { - F2FS_OPTION(sbi).compress_level = 0; + F2FS_CTX_INFO(ctx).compress_level = 0; + ctx->spec_mask |= F2FS_SPEC_compress_level; return 0; } - f2fs_info(sbi, "kernel doesn't support lz4hc compression"); + f2fs_info(NULL, "kernel doesn't support lz4hc compression"); return -EINVAL; #endif } #endif #ifdef CONFIG_F2FS_FS_ZSTD -static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str) +static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str) { int level; int len = 4; if (strlen(str) == len) { - F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; + F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; + ctx->spec_mask |= F2FS_SPEC_compress_level; return 0; } str += len; if (str[0] != ':') { - f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); + f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>"); return -EINVAL; } if (kstrtoint(str + 1, 10, &level)) @@ -658,686 +692,750 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str) /* f2fs does not support negative compress level now */ if (level < 0) { - f2fs_info(sbi, "do not support negative compress level: %d", level); + f2fs_info(NULL, "do not support negative compress level: %d", level); return -ERANGE; } if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) { - f2fs_info(sbi, "invalid zstd compress level: %d", level); + f2fs_info(NULL, "invalid zstd compress level: %d", level); return -EINVAL; } - F2FS_OPTION(sbi).compress_level = level; + F2FS_CTX_INFO(ctx).compress_level = level; + ctx->spec_mask |= F2FS_SPEC_compress_level; return 0; } #endif #endif -static int parse_options(struct f2fs_sb_info *sbi, char *options, bool is_remount) +static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param) { - substring_t args[MAX_OPT_ARGS]; + struct f2fs_fs_context *ctx = fc->fs_private; #ifdef CONFIG_F2FS_FS_COMPRESSION unsigned char (*ext)[F2FS_EXTENSION_LEN]; unsigned char (*noext)[F2FS_EXTENSION_LEN]; int ext_cnt, noext_cnt; + char *name; #endif - char *p, *name; - int arg = 0; - kuid_t uid; - kgid_t gid; - int ret; - - if (!options) - return 0; - - while ((p = strsep(&options, ",")) != NULL) { - int token; - - if (!*p) - continue; - /* - * Initialize args struct so we know whether arg was - * found; some options take optional arguments. - */ - args[0].to = args[0].from = NULL; - token = match_token(p, f2fs_tokens, args); + substring_t args[MAX_OPT_ARGS]; + struct fs_parse_result result; + int token, ret, arg; - switch (token) { - case Opt_gc_background: - name = match_strdup(&args[0]); + token = fs_parse(fc, f2fs_param_specs, param, &result); + if (token < 0) + return token; - if (!name) - return -ENOMEM; - if (!strcmp(name, "on")) { - F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; - } else if (!strcmp(name, "off")) { - if (f2fs_sb_has_blkzoned(sbi)) { - f2fs_warn(sbi, "zoned devices need bggc"); - kfree(name); - return -EINVAL; - } - F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; - } else if (!strcmp(name, "sync")) { - F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_disable_roll_forward: - set_opt(sbi, DISABLE_ROLL_FORWARD); - break; - case Opt_norecovery: - /* requires ro mount, checked in f2fs_default_check */ - set_opt(sbi, NORECOVERY); - break; - case Opt_discard: - if (!f2fs_hw_support_discard(sbi)) { - f2fs_warn(sbi, "device does not support discard"); - break; - } - set_opt(sbi, DISCARD); - break; - case Opt_nodiscard: - if (f2fs_hw_should_discard(sbi)) { - f2fs_warn(sbi, "discard is required for zoned block devices"); - return -EINVAL; - } - clear_opt(sbi, DISCARD); - break; - case Opt_noheap: - case Opt_heap: - f2fs_warn(sbi, "heap/no_heap options were deprecated"); - break; + switch (token) { + case Opt_gc_background: + F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_background_gc; + break; + case Opt_disable_roll_forward: + ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD); + break; + case Opt_norecovery: + /* requires ro mount, checked in f2fs_validate_options */ + ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY); + break; + case Opt_discard: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD); + else + ctx_set_opt(ctx, F2FS_MOUNT_DISCARD); + break; + case Opt_noheap: + case Opt_heap: + f2fs_warn(NULL, "heap/no_heap options were deprecated"); + break; #ifdef CONFIG_F2FS_FS_XATTR - case Opt_user_xattr: - set_opt(sbi, XATTR_USER); - break; - case Opt_nouser_xattr: - clear_opt(sbi, XATTR_USER); - break; - case Opt_inline_xattr: - set_opt(sbi, INLINE_XATTR); - break; - case Opt_noinline_xattr: - clear_opt(sbi, INLINE_XATTR); - break; - case Opt_inline_xattr_size: - if (args->from && match_int(args, &arg)) - return -EINVAL; - set_opt(sbi, INLINE_XATTR_SIZE); - F2FS_OPTION(sbi).inline_xattr_size = arg; - break; + case Opt_user_xattr: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER); + else + ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER); + break; + case Opt_inline_xattr: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR); + else + ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR); + break; + case Opt_inline_xattr_size: + if (result.int_32 < MIN_INLINE_XATTR_SIZE || + result.int_32 > MAX_INLINE_XATTR_SIZE) { + f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u", + (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE); + return -EINVAL; + } + ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE); + F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32; + ctx->spec_mask |= F2FS_SPEC_inline_xattr_size; + break; #else - case Opt_user_xattr: - case Opt_nouser_xattr: - case Opt_inline_xattr: - case Opt_noinline_xattr: - case Opt_inline_xattr_size: - f2fs_info(sbi, "xattr options not supported"); - break; + case Opt_user_xattr: + case Opt_inline_xattr: + case Opt_inline_xattr_size: + f2fs_info(NULL, "%s options not supported", param->key); + break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL - case Opt_acl: - set_opt(sbi, POSIX_ACL); - break; - case Opt_noacl: - clear_opt(sbi, POSIX_ACL); - break; + case Opt_acl: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL); + else + ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL); + break; #else - case Opt_acl: - case Opt_noacl: - f2fs_info(sbi, "acl options not supported"); - break; + case Opt_acl: + f2fs_info(NULL, "%s options not supported", param->key); + break; #endif - case Opt_active_logs: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (arg != 2 && arg != 4 && - arg != NR_CURSEG_PERSIST_TYPE) - return -EINVAL; - F2FS_OPTION(sbi).active_logs = arg; - break; - case Opt_disable_ext_identify: - set_opt(sbi, DISABLE_EXT_IDENTIFY); - break; - case Opt_inline_data: - set_opt(sbi, INLINE_DATA); - break; - case Opt_inline_dentry: - set_opt(sbi, INLINE_DENTRY); - break; - case Opt_noinline_dentry: - clear_opt(sbi, INLINE_DENTRY); - break; - case Opt_flush_merge: - set_opt(sbi, FLUSH_MERGE); - break; - case Opt_noflush_merge: - clear_opt(sbi, FLUSH_MERGE); - break; - case Opt_nobarrier: - set_opt(sbi, NOBARRIER); - break; - case Opt_barrier: - clear_opt(sbi, NOBARRIER); - break; - case Opt_fastboot: - set_opt(sbi, FASTBOOT); - break; - case Opt_extent_cache: - set_opt(sbi, READ_EXTENT_CACHE); - break; - case Opt_noextent_cache: - if (f2fs_sb_has_device_alias(sbi)) { - f2fs_err(sbi, "device aliasing requires extent cache"); - return -EINVAL; - } - clear_opt(sbi, READ_EXTENT_CACHE); - break; - case Opt_noinline_data: - clear_opt(sbi, INLINE_DATA); - break; - case Opt_data_flush: - set_opt(sbi, DATA_FLUSH); - break; - case Opt_reserve_root: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (test_opt(sbi, RESERVE_ROOT)) { - f2fs_info(sbi, "Preserve previous reserve_root=%u", - F2FS_OPTION(sbi).root_reserved_blocks); - } else { - F2FS_OPTION(sbi).root_reserved_blocks = arg; - set_opt(sbi, RESERVE_ROOT); - } - break; - case Opt_resuid: - if (args->from && match_int(args, &arg)) - return -EINVAL; - uid = make_kuid(current_user_ns(), arg); - if (!uid_valid(uid)) { - f2fs_err(sbi, "Invalid uid value %d", arg); - return -EINVAL; - } - F2FS_OPTION(sbi).s_resuid = uid; - break; - case Opt_resgid: - if (args->from && match_int(args, &arg)) - return -EINVAL; - gid = make_kgid(current_user_ns(), arg); - if (!gid_valid(gid)) { - f2fs_err(sbi, "Invalid gid value %d", arg); - return -EINVAL; - } - F2FS_OPTION(sbi).s_resgid = gid; - break; - case Opt_mode: - name = match_strdup(&args[0]); - - if (!name) - return -ENOMEM; - if (!strcmp(name, "adaptive")) { - F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; - } else if (!strcmp(name, "lfs")) { - F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; - } else if (!strcmp(name, "fragment:segment")) { - F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG; - } else if (!strcmp(name, "fragment:block")) { - F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; + case Opt_active_logs: + if (result.int_32 != 2 && result.int_32 != 4 && + result.int_32 != NR_CURSEG_PERSIST_TYPE) + return -EINVAL; + ctx->spec_mask |= F2FS_SPEC_active_logs; + F2FS_CTX_INFO(ctx).active_logs = result.int_32; + break; + case Opt_disable_ext_identify: + ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY); + break; + case Opt_inline_data: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA); + else + ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA); + break; + case Opt_inline_dentry: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY); + else + ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY); + break; + case Opt_flush_merge: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE); + else + ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE); + break; + case Opt_barrier: + if (result.negated) + ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER); + else + ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER); + break; + case Opt_fastboot: + ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT); + break; + case Opt_extent_cache: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE); + else + ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE); + break; + case Opt_data_flush: + ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH); + break; + case Opt_reserve_root: + ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT); + F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_reserve_root; + break; + case Opt_resuid: + F2FS_CTX_INFO(ctx).s_resuid = result.uid; + ctx->spec_mask |= F2FS_SPEC_resuid; + break; + case Opt_resgid: + F2FS_CTX_INFO(ctx).s_resgid = result.gid; + ctx->spec_mask |= F2FS_SPEC_resgid; + break; + case Opt_mode: + F2FS_CTX_INFO(ctx).fs_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_mode; + break; #ifdef CONFIG_F2FS_FAULT_INJECTION - case Opt_fault_injection: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (f2fs_build_fault_attr(sbi, arg, - F2FS_ALL_FAULT_TYPE)) - return -EINVAL; - set_opt(sbi, FAULT_INJECTION); - break; + case Opt_fault_injection: + F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32; + ctx->spec_mask |= F2FS_SPEC_fault_injection; + ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION); + break; - case Opt_fault_type: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (f2fs_build_fault_attr(sbi, 0, arg)) - return -EINVAL; - set_opt(sbi, FAULT_INJECTION); - break; + case Opt_fault_type: + if (result.uint_32 > BIT(FAULT_MAX)) + return -EINVAL; + F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_fault_type; + ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION); + break; #else - case Opt_fault_injection: - case Opt_fault_type: - f2fs_info(sbi, "fault injection options not supported"); - break; + case Opt_fault_injection: + case Opt_fault_type: + f2fs_info(NULL, "%s options not supported", param->key); + break; #endif - case Opt_lazytime: - set_opt(sbi, LAZYTIME); - break; - case Opt_nolazytime: - clear_opt(sbi, LAZYTIME); - break; + case Opt_lazytime: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME); + else + ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME); + break; #ifdef CONFIG_QUOTA - case Opt_quota: - case Opt_usrquota: - set_opt(sbi, USRQUOTA); - break; - case Opt_grpquota: - set_opt(sbi, GRPQUOTA); - break; - case Opt_prjquota: - set_opt(sbi, PRJQUOTA); - break; - case Opt_usrjquota: - ret = f2fs_set_qf_name(sbi, USRQUOTA, &args[0]); - if (ret) - return ret; - break; - case Opt_grpjquota: - ret = f2fs_set_qf_name(sbi, GRPQUOTA, &args[0]); - if (ret) - return ret; - break; - case Opt_prjjquota: - ret = f2fs_set_qf_name(sbi, PRJQUOTA, &args[0]); - if (ret) - return ret; - break; - case Opt_offusrjquota: - ret = f2fs_clear_qf_name(sbi, USRQUOTA); - if (ret) - return ret; - break; - case Opt_offgrpjquota: - ret = f2fs_clear_qf_name(sbi, GRPQUOTA); - if (ret) - return ret; - break; - case Opt_offprjjquota: - ret = f2fs_clear_qf_name(sbi, PRJQUOTA); - if (ret) - return ret; - break; - case Opt_jqfmt_vfsold: - F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; - break; - case Opt_jqfmt_vfsv0: - F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; - break; - case Opt_jqfmt_vfsv1: - F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; - break; - case Opt_noquota: - clear_opt(sbi, QUOTA); - clear_opt(sbi, USRQUOTA); - clear_opt(sbi, GRPQUOTA); - clear_opt(sbi, PRJQUOTA); - break; + case Opt_quota: + if (result.negated) { + ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA); + ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA); + ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA); + ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA); + } else + ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA); + break; + case Opt_usrquota: + ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA); + break; + case Opt_grpquota: + ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA); + break; + case Opt_prjquota: + ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA); + break; + case Opt_usrjquota: + if (!*param->string) + ret = f2fs_unnote_qf_name(fc, USRQUOTA); + else + ret = f2fs_note_qf_name(fc, USRQUOTA, param); + if (ret) + return ret; + break; + case Opt_grpjquota: + if (!*param->string) + ret = f2fs_unnote_qf_name(fc, GRPQUOTA); + else + ret = f2fs_note_qf_name(fc, GRPQUOTA, param); + if (ret) + return ret; + break; + case Opt_prjjquota: + if (!*param->string) + ret = f2fs_unnote_qf_name(fc, PRJQUOTA); + else + ret = f2fs_note_qf_name(fc, PRJQUOTA, param); + if (ret) + return ret; + break; + case Opt_jqfmt: + F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32; + ctx->spec_mask |= F2FS_SPEC_jqfmt; + break; #else - case Opt_quota: - case Opt_usrquota: - case Opt_grpquota: - case Opt_prjquota: - case Opt_usrjquota: - case Opt_grpjquota: - case Opt_prjjquota: - case Opt_offusrjquota: - case Opt_offgrpjquota: - case Opt_offprjjquota: - case Opt_jqfmt_vfsold: - case Opt_jqfmt_vfsv0: - case Opt_jqfmt_vfsv1: - case Opt_noquota: - f2fs_info(sbi, "quota operations not supported"); - break; + case Opt_quota: + case Opt_usrquota: + case Opt_grpquota: + case Opt_prjquota: + case Opt_usrjquota: + case Opt_grpjquota: + case Opt_prjjquota: + f2fs_info(NULL, "quota operations not supported"); + break; #endif - case Opt_alloc: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - - if (!strcmp(name, "default")) { - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; - } else if (!strcmp(name, "reuse")) { - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_fsync: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "posix")) { - F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; - } else if (!strcmp(name, "strict")) { - F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; - } else if (!strcmp(name, "nobarrier")) { - F2FS_OPTION(sbi).fsync_mode = - FSYNC_MODE_NOBARRIER; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_test_dummy_encryption: - ret = f2fs_set_test_dummy_encryption(sbi, p, &args[0], - is_remount); - if (ret) - return ret; - break; - case Opt_inlinecrypt: + case Opt_alloc: + F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_alloc_mode; + break; + case Opt_fsync: + F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_fsync_mode; + break; + case Opt_test_dummy_encryption: + ret = f2fs_parse_test_dummy_encryption(param, ctx); + if (ret) + return ret; + break; + case Opt_inlinecrypt: #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT - set_opt(sbi, INLINECRYPT); + ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT); #else - f2fs_info(sbi, "inline encryption not supported"); + f2fs_info(NULL, "inline encryption not supported"); #endif - break; + break; + case Opt_checkpoint: + /* + * Initialize args struct so we know whether arg was + * found; some options take optional arguments. + */ + args[0].from = args[0].to = NULL; + arg = 0; + + /* revert to match_table for checkpoint= options */ + token = match_token(param->string, f2fs_checkpoint_tokens, args); + switch (token) { case Opt_checkpoint_disable_cap_perc: if (args->from && match_int(args, &arg)) return -EINVAL; if (arg < 0 || arg > 100) return -EINVAL; - F2FS_OPTION(sbi).unusable_cap_perc = arg; - set_opt(sbi, DISABLE_CHECKPOINT); + F2FS_CTX_INFO(ctx).unusable_cap_perc = arg; + ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc; + ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable_cap: if (args->from && match_int(args, &arg)) return -EINVAL; - F2FS_OPTION(sbi).unusable_cap = arg; - set_opt(sbi, DISABLE_CHECKPOINT); + F2FS_CTX_INFO(ctx).unusable_cap = arg; + ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap; + ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable: - set_opt(sbi, DISABLE_CHECKPOINT); + ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); break; case Opt_checkpoint_enable: - clear_opt(sbi, DISABLE_CHECKPOINT); - break; - case Opt_checkpoint_merge: - set_opt(sbi, MERGE_CHECKPOINT); - break; - case Opt_nocheckpoint_merge: - clear_opt(sbi, MERGE_CHECKPOINT); + ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); break; + default: + return -EINVAL; + } + break; + case Opt_checkpoint_merge: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT); + else + ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT); + break; #ifdef CONFIG_F2FS_FS_COMPRESSION - case Opt_compress_algorithm: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "lzo")) { + case Opt_compress_algorithm: + name = param->string; + if (!strcmp(name, "lzo")) { #ifdef CONFIG_F2FS_FS_LZO - F2FS_OPTION(sbi).compress_level = 0; - F2FS_OPTION(sbi).compress_algorithm = - COMPRESS_LZO; + F2FS_CTX_INFO(ctx).compress_level = 0; + F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO; + ctx->spec_mask |= F2FS_SPEC_compress_level; + ctx->spec_mask |= F2FS_SPEC_compress_algorithm; #else - f2fs_info(sbi, "kernel doesn't support lzo compression"); + f2fs_info(NULL, "kernel doesn't support lzo compression"); #endif - } else if (!strncmp(name, "lz4", 3)) { + } else if (!strncmp(name, "lz4", 3)) { #ifdef CONFIG_F2FS_FS_LZ4 - ret = f2fs_set_lz4hc_level(sbi, name); - if (ret) { - kfree(name); - return -EINVAL; - } - F2FS_OPTION(sbi).compress_algorithm = - COMPRESS_LZ4; + ret = f2fs_set_lz4hc_level(ctx, name); + if (ret) + return -EINVAL; + F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4; + ctx->spec_mask |= F2FS_SPEC_compress_algorithm; #else - f2fs_info(sbi, "kernel doesn't support lz4 compression"); + f2fs_info(NULL, "kernel doesn't support lz4 compression"); #endif - } else if (!strncmp(name, "zstd", 4)) { + } else if (!strncmp(name, "zstd", 4)) { #ifdef CONFIG_F2FS_FS_ZSTD - ret = f2fs_set_zstd_level(sbi, name); - if (ret) { - kfree(name); - return -EINVAL; - } - F2FS_OPTION(sbi).compress_algorithm = - COMPRESS_ZSTD; + ret = f2fs_set_zstd_level(ctx, name); + if (ret) + return -EINVAL; + F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD; + ctx->spec_mask |= F2FS_SPEC_compress_algorithm; #else - f2fs_info(sbi, "kernel doesn't support zstd compression"); + f2fs_info(NULL, "kernel doesn't support zstd compression"); #endif - } else if (!strcmp(name, "lzo-rle")) { + } else if (!strcmp(name, "lzo-rle")) { #ifdef CONFIG_F2FS_FS_LZORLE - F2FS_OPTION(sbi).compress_level = 0; - F2FS_OPTION(sbi).compress_algorithm = - COMPRESS_LZORLE; + F2FS_CTX_INFO(ctx).compress_level = 0; + F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE; + ctx->spec_mask |= F2FS_SPEC_compress_level; + ctx->spec_mask |= F2FS_SPEC_compress_algorithm; #else - f2fs_info(sbi, "kernel doesn't support lzorle compression"); + f2fs_info(NULL, "kernel doesn't support lzorle compression"); #endif - } else { - kfree(name); - return -EINVAL; - } - kfree(name); + } else + return -EINVAL; + break; + case Opt_compress_log_size: + if (result.uint_32 < MIN_COMPRESS_LOG_SIZE || + result.uint_32 > MAX_COMPRESS_LOG_SIZE) { + f2fs_err(NULL, + "Compress cluster log size is out of range"); + return -EINVAL; + } + F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_compress_log_size; + break; + case Opt_compress_extension: + name = param->string; + ext = F2FS_CTX_INFO(ctx).extensions; + ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; + + if (strlen(name) >= F2FS_EXTENSION_LEN || + ext_cnt >= COMPRESS_EXT_NUM) { + f2fs_err(NULL, "invalid extension length/number"); + return -EINVAL; + } + + if (is_compress_extension_exist(&ctx->info, name, true)) break; - case Opt_compress_log_size: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (arg < MIN_COMPRESS_LOG_SIZE || - arg > MAX_COMPRESS_LOG_SIZE) { - f2fs_err(sbi, - "Compress cluster log size is out of range"); - return -EINVAL; - } - F2FS_OPTION(sbi).compress_log_size = arg; + + ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN); + if (ret < 0) + return ret; + F2FS_CTX_INFO(ctx).compress_ext_cnt++; + ctx->spec_mask |= F2FS_SPEC_compress_extension; + break; + case Opt_nocompress_extension: + name = param->string; + noext = F2FS_CTX_INFO(ctx).noextensions; + noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; + + if (strlen(name) >= F2FS_EXTENSION_LEN || + noext_cnt >= COMPRESS_EXT_NUM) { + f2fs_err(NULL, "invalid extension length/number"); + return -EINVAL; + } + + if (is_compress_extension_exist(&ctx->info, name, false)) break; - case Opt_compress_extension: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - ext = F2FS_OPTION(sbi).extensions; - ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; + ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN); + if (ret < 0) + return ret; + F2FS_CTX_INFO(ctx).nocompress_ext_cnt++; + ctx->spec_mask |= F2FS_SPEC_nocompress_extension; + break; + case Opt_compress_chksum: + F2FS_CTX_INFO(ctx).compress_chksum = true; + ctx->spec_mask |= F2FS_SPEC_compress_chksum; + break; + case Opt_compress_mode: + F2FS_CTX_INFO(ctx).compress_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_compress_mode; + break; + case Opt_compress_cache: + ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE); + break; +#else + case Opt_compress_algorithm: + case Opt_compress_log_size: + case Opt_compress_extension: + case Opt_nocompress_extension: + case Opt_compress_chksum: + case Opt_compress_mode: + case Opt_compress_cache: + f2fs_info(NULL, "compression options not supported"); + break; +#endif + case Opt_atgc: + ctx_set_opt(ctx, F2FS_MOUNT_ATGC); + break; + case Opt_gc_merge: + if (result.negated) + ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE); + else + ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE); + break; + case Opt_discard_unit: + F2FS_CTX_INFO(ctx).discard_unit = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_discard_unit; + break; + case Opt_memory_mode: + F2FS_CTX_INFO(ctx).memory_mode = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_memory_mode; + break; + case Opt_age_extent_cache: + ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE); + break; + case Opt_errors: + F2FS_CTX_INFO(ctx).errors = result.uint_32; + ctx->spec_mask |= F2FS_SPEC_errors; + break; + case Opt_nat_bits: + ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS); + break; + } + return 0; +} - if (strlen(name) >= F2FS_EXTENSION_LEN || - ext_cnt >= COMPRESS_EXT_NUM) { - f2fs_err(sbi, - "invalid extension length/number"); - kfree(name); - return -EINVAL; - } +/* + * Check quota settings consistency. + */ +static int f2fs_check_quota_consistency(struct fs_context *fc, + struct super_block *sb) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + #ifdef CONFIG_QUOTA + struct f2fs_fs_context *ctx = fc->fs_private; + bool quota_feature = f2fs_sb_has_quota_ino(sbi); + bool quota_turnon = sb_any_quota_loaded(sb); + char *old_qname, *new_qname; + bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota; + int i; - if (is_compress_extension_exist(sbi, name, true)) { - kfree(name); - break; - } + /* + * We do the test below only for project quotas. 'usrquota' and + * 'grpquota' mount options are allowed even without quota feature + * to support legacy quotas in quota files. + */ + if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) && + !f2fs_sb_has_project_quota(sbi)) { + f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); + return -EINVAL; + } - ret = strscpy(ext[ext_cnt], name); - if (ret < 0) { - kfree(name); - return ret; - } - F2FS_OPTION(sbi).compress_ext_cnt++; - kfree(name); - break; - case Opt_nocompress_extension: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; + if (ctx->qname_mask) { + for (i = 0; i < MAXQUOTAS; i++) { + if (!(ctx->qname_mask & (1 << i))) + continue; - noext = F2FS_OPTION(sbi).noextensions; - noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; + old_qname = F2FS_OPTION(sbi).s_qf_names[i]; + new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i]; + if (quota_turnon && + !!old_qname != !!new_qname) + goto err_jquota_change; - if (strlen(name) >= F2FS_EXTENSION_LEN || - noext_cnt >= COMPRESS_EXT_NUM) { - f2fs_err(sbi, - "invalid extension length/number"); - kfree(name); - return -EINVAL; + if (old_qname) { + if (strcmp(old_qname, new_qname) == 0) { + ctx->qname_mask &= ~(1 << i); + continue; + } + goto err_jquota_specified; } - if (is_compress_extension_exist(sbi, name, false)) { - kfree(name); - break; + if (quota_feature) { + f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); + ctx->qname_mask &= ~(1 << i); + kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]); + F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL; } + } + } + + /* Make sure we don't mix old and new quota format */ + usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || + F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA]; + grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || + F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA]; + prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] || + F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA]; + usrquota = test_opt(sbi, USRQUOTA) || + ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA); + grpquota = test_opt(sbi, GRPQUOTA) || + ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA); + prjquota = test_opt(sbi, PRJQUOTA) || + ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA); + + if (usr_qf_name) { + ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA); + usrquota = false; + } + if (grp_qf_name) { + ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA); + grpquota = false; + } + if (prj_qf_name) { + ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA); + prjquota = false; + } + if (usr_qf_name || grp_qf_name || prj_qf_name) { + if (grpquota || usrquota || prjquota) { + f2fs_err(sbi, "old and new quota format mixing"); + return -EINVAL; + } + if (!(ctx->spec_mask & F2FS_SPEC_jqfmt || + F2FS_OPTION(sbi).s_jquota_fmt)) { + f2fs_err(sbi, "journaled quota format not specified"); + return -EINVAL; + } + } + return 0; + +err_jquota_change: + f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); + return -EINVAL; +err_jquota_specified: + f2fs_err(sbi, "%s quota file already specified", + QTYPE2NAME(i)); + return -EINVAL; - ret = strscpy(noext[noext_cnt], name); - if (ret < 0) { - kfree(name); - return ret; - } - F2FS_OPTION(sbi).nocompress_ext_cnt++; - kfree(name); - break; - case Opt_compress_chksum: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - F2FS_OPTION(sbi).compress_chksum = true; - break; - case Opt_compress_mode: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "fs")) { - F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; - } else if (!strcmp(name, "user")) { - F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_compress_cache: - if (!f2fs_sb_has_compression(sbi)) { - f2fs_info(sbi, "Image doesn't support compression"); - break; - } - set_opt(sbi, COMPRESS_CACHE); - break; #else - case Opt_compress_algorithm: - case Opt_compress_log_size: - case Opt_compress_extension: - case Opt_nocompress_extension: - case Opt_compress_chksum: - case Opt_compress_mode: - case Opt_compress_cache: - f2fs_info(sbi, "compression options not supported"); - break; + if (f2fs_readonly(sbi->sb)) + return 0; + if (f2fs_sb_has_quota_ino(sbi)) { + f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); + return -EINVAL; + } + if (f2fs_sb_has_project_quota(sbi)) { + f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); + return -EINVAL; + } + + return 0; #endif - case Opt_atgc: - set_opt(sbi, ATGC); - break; - case Opt_gc_merge: - set_opt(sbi, GC_MERGE); - break; - case Opt_nogc_merge: - clear_opt(sbi, GC_MERGE); - break; - case Opt_discard_unit: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "block")) { - F2FS_OPTION(sbi).discard_unit = - DISCARD_UNIT_BLOCK; - } else if (!strcmp(name, "segment")) { - F2FS_OPTION(sbi).discard_unit = - DISCARD_UNIT_SEGMENT; - } else if (!strcmp(name, "section")) { - F2FS_OPTION(sbi).discard_unit = - DISCARD_UNIT_SECTION; - } else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_memory_mode: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "normal")) { - F2FS_OPTION(sbi).memory_mode = - MEMORY_MODE_NORMAL; - } else if (!strcmp(name, "low")) { - F2FS_OPTION(sbi).memory_mode = - MEMORY_MODE_LOW; - } else { - kfree(name); - return -EINVAL; +} + +static int f2fs_check_test_dummy_encryption(struct fs_context *fc, + struct super_block *sb) +{ + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy)) + return 0; + + if (!f2fs_sb_has_encrypt(sbi)) { + f2fs_err(sbi, "Encrypt feature is off"); + return -EINVAL; + } + + /* + * This mount option is just for testing, and it's not worthwhile to + * implement the extra complexity (e.g. RCU protection) that would be + * needed to allow it to be set or changed during remount. We do allow + * it to be specified during remount, but only if there is no change. + */ + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { + if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy, + &F2FS_CTX_INFO(ctx).dummy_enc_policy)) + return 0; + f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount"); + return -EINVAL; + } + return 0; +} + +static inline bool test_compression_spec(unsigned int mask) +{ + return mask & (F2FS_SPEC_compress_algorithm + | F2FS_SPEC_compress_log_size + | F2FS_SPEC_compress_extension + | F2FS_SPEC_nocompress_extension + | F2FS_SPEC_compress_chksum + | F2FS_SPEC_compress_mode); +} + +static inline void clear_compression_spec(struct f2fs_fs_context *ctx) +{ + ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm + | F2FS_SPEC_compress_log_size + | F2FS_SPEC_compress_extension + | F2FS_SPEC_nocompress_extension + | F2FS_SPEC_compress_chksum + | F2FS_SPEC_compress_mode); +} + +static int f2fs_check_compression(struct fs_context *fc, + struct super_block *sb) +{ +#ifdef CONFIG_F2FS_FS_COMPRESSION + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int i, cnt; + + if (!f2fs_sb_has_compression(sbi)) { + if (test_compression_spec(ctx->spec_mask) || + ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE)) + f2fs_info(sbi, "Image doesn't support compression"); + clear_compression_spec(ctx); + ctx->opt_mask &= ~F2FS_MOUNT_COMPRESS_CACHE; + return 0; + } + if (ctx->spec_mask & F2FS_SPEC_compress_extension) { + cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; + for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) { + if (is_compress_extension_exist(&F2FS_OPTION(sbi), + F2FS_CTX_INFO(ctx).extensions[i], true)) { + F2FS_CTX_INFO(ctx).extensions[i][0] = '\0'; + cnt--; } - kfree(name); - break; - case Opt_age_extent_cache: - set_opt(sbi, AGE_EXTENT_CACHE); - break; - case Opt_errors: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - if (!strcmp(name, "remount-ro")) { - F2FS_OPTION(sbi).errors = - MOUNT_ERRORS_READONLY; - } else if (!strcmp(name, "continue")) { - F2FS_OPTION(sbi).errors = - MOUNT_ERRORS_CONTINUE; - } else if (!strcmp(name, "panic")) { - F2FS_OPTION(sbi).errors = - MOUNT_ERRORS_PANIC; - } else { - kfree(name); - return -EINVAL; + } + if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) { + f2fs_err(sbi, "invalid extension length/number"); + return -EINVAL; + } + } + if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) { + cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; + for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) { + if (is_compress_extension_exist(&F2FS_OPTION(sbi), + F2FS_CTX_INFO(ctx).noextensions[i], false)) { + F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0'; + cnt--; } - kfree(name); - break; - case Opt_nat_bits: - set_opt(sbi, NAT_BITS); - break; - default: - f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", - p); + } + if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) { + f2fs_err(sbi, "invalid noextension length/number"); return -EINVAL; } } + + if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions, + F2FS_CTX_INFO(ctx).nocompress_ext_cnt, + F2FS_CTX_INFO(ctx).extensions, + F2FS_CTX_INFO(ctx).compress_ext_cnt)) { + f2fs_err(sbi, "new noextensions conflicts with new extensions"); + return -EINVAL; + } + if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions, + F2FS_CTX_INFO(ctx).nocompress_ext_cnt, + F2FS_OPTION(sbi).extensions, + F2FS_OPTION(sbi).compress_ext_cnt)) { + f2fs_err(sbi, "new noextensions conflicts with old extensions"); + return -EINVAL; + } + if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions, + F2FS_OPTION(sbi).nocompress_ext_cnt, + F2FS_CTX_INFO(ctx).extensions, + F2FS_CTX_INFO(ctx).compress_ext_cnt)) { + f2fs_err(sbi, "new extensions conflicts with old noextensions"); + return -EINVAL; + } +#endif return 0; } -static int f2fs_default_check(struct f2fs_sb_info *sbi) +static int f2fs_check_opt_consistency(struct fs_context *fc, + struct super_block *sb) { -#ifdef CONFIG_QUOTA - if (f2fs_check_quota_options(sbi)) + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int err; + + if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb)) return -EINVAL; -#else - if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { - f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); + + if (f2fs_hw_should_discard(sbi) && + (ctx->opt_mask & F2FS_MOUNT_DISCARD) && + !ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { + f2fs_warn(sbi, "discard is required for zoned block devices"); return -EINVAL; } - if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { - f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); + + if (!f2fs_hw_support_discard(sbi) && + (ctx->opt_mask & F2FS_MOUNT_DISCARD) && + ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { + f2fs_warn(sbi, "device does not support discard"); + ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD); + ctx->opt_mask &= ~F2FS_MOUNT_DISCARD; + } + + if (f2fs_sb_has_device_alias(sbi) && + (ctx->opt_mask & F2FS_MOUNT_READ_EXTENT_CACHE) && + !ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) { + f2fs_err(sbi, "device aliasing requires extent cache"); return -EINVAL; } -#endif + + if (test_opt(sbi, RESERVE_ROOT) && + (ctx->opt_mask & F2FS_MOUNT_RESERVE_ROOT) && + ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) { + f2fs_info(sbi, "Preserve previous reserve_root=%u", + F2FS_OPTION(sbi).root_reserved_blocks); + ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT); + ctx->opt_mask &= ~F2FS_MOUNT_RESERVE_ROOT; + } + + err = f2fs_check_test_dummy_encryption(fc, sb); + if (err) + return err; + + err = f2fs_check_compression(fc, sb); + if (err) + return err; + + err = f2fs_check_quota_consistency(fc, sb); + if (err) + return err; if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) { f2fs_err(sbi, @@ -1351,15 +1449,19 @@ static int f2fs_default_check(struct f2fs_sb_info *sbi) * devices, but mandatory for host-managed zoned block devices. */ if (f2fs_sb_has_blkzoned(sbi)) { + if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) { + f2fs_warn(sbi, "zoned devices need bggc"); + return -EINVAL; + } #ifdef CONFIG_BLK_DEV_ZONED - if (F2FS_OPTION(sbi).discard_unit != - DISCARD_UNIT_SECTION) { + if ((ctx->spec_mask & F2FS_SPEC_discard_unit) && + F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) { f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default"); - F2FS_OPTION(sbi).discard_unit = - DISCARD_UNIT_SECTION; + F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION; } - if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) { + if ((ctx->spec_mask & F2FS_SPEC_mode) && + F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) { f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature"); return -EINVAL; } @@ -1369,43 +1471,25 @@ static int f2fs_default_check(struct f2fs_sb_info *sbi) #endif } -#ifdef CONFIG_F2FS_FS_COMPRESSION - if (f2fs_test_compress_extension(sbi)) { - f2fs_err(sbi, "invalid compress or nocompress extension"); - return -EINVAL; - } -#endif - - if (test_opt(sbi, INLINE_XATTR_SIZE)) { - int min_size, max_size; - + if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) { if (!f2fs_sb_has_extra_attr(sbi) || !f2fs_sb_has_flexible_inline_xattr(sbi)) { f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); return -EINVAL; } - if (!test_opt(sbi, INLINE_XATTR)) { + if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) { f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); return -EINVAL; } - - min_size = MIN_INLINE_XATTR_SIZE; - max_size = MAX_INLINE_XATTR_SIZE; - - if (F2FS_OPTION(sbi).inline_xattr_size < min_size || - F2FS_OPTION(sbi).inline_xattr_size > max_size) { - f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", - min_size, max_size); - return -EINVAL; - } } - if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) { + if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) && + F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) { f2fs_err(sbi, "LFS is not compatible with ATGC"); return -EINVAL; } - if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) { + if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) { f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode"); return -EINVAL; } @@ -1414,12 +1498,190 @@ static int f2fs_default_check(struct f2fs_sb_info *sbi) f2fs_err(sbi, "Allow to mount readonly mode only"); return -EROFS; } + return 0; +} + +static void f2fs_apply_quota_options(struct fs_context *fc, + struct super_block *sb) +{ +#ifdef CONFIG_QUOTA + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + bool quota_feature = f2fs_sb_has_quota_ino(sbi); + char *qname; + int i; + + if (quota_feature) + return; + + for (i = 0; i < MAXQUOTAS; i++) { + if (!(ctx->qname_mask & (1 << i))) + continue; + + qname = F2FS_CTX_INFO(ctx).s_qf_names[i]; + if (qname) { + qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i], + GFP_KERNEL | __GFP_NOFAIL); + set_opt(sbi, QUOTA); + } + F2FS_OPTION(sbi).s_qf_names[i] = qname; + } + + if (ctx->spec_mask & F2FS_SPEC_jqfmt) + F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt; - if (test_opt(sbi, NORECOVERY) && !f2fs_readonly(sbi->sb)) { - f2fs_err(sbi, "norecovery requires readonly mount"); + if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) { + f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); + F2FS_OPTION(sbi).s_jquota_fmt = 0; + } +#endif +} + +static void f2fs_apply_test_dummy_encryption(struct fs_context *fc, + struct super_block *sb) +{ + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) || + /* if already set, it was already verified to be the same */ + fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy)) + return; + swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy); + f2fs_warn(sbi, "Test dummy encryption mode enabled"); +} + +static void f2fs_apply_compression(struct fs_context *fc, + struct super_block *sb) +{ +#ifdef CONFIG_F2FS_FS_COMPRESSION + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN]; + unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN]; + int ctx_cnt, sbi_cnt, i; + + if (ctx->spec_mask & F2FS_SPEC_compress_level) + F2FS_OPTION(sbi).compress_level = + F2FS_CTX_INFO(ctx).compress_level; + if (ctx->spec_mask & F2FS_SPEC_compress_algorithm) + F2FS_OPTION(sbi).compress_algorithm = + F2FS_CTX_INFO(ctx).compress_algorithm; + if (ctx->spec_mask & F2FS_SPEC_compress_log_size) + F2FS_OPTION(sbi).compress_log_size = + F2FS_CTX_INFO(ctx).compress_log_size; + if (ctx->spec_mask & F2FS_SPEC_compress_chksum) + F2FS_OPTION(sbi).compress_chksum = + F2FS_CTX_INFO(ctx).compress_chksum; + if (ctx->spec_mask & F2FS_SPEC_compress_mode) + F2FS_OPTION(sbi).compress_mode = + F2FS_CTX_INFO(ctx).compress_mode; + if (ctx->spec_mask & F2FS_SPEC_compress_extension) { + ctx_ext = F2FS_CTX_INFO(ctx).extensions; + ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; + sbi_ext = F2FS_OPTION(sbi).extensions; + sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt; + for (i = 0; i < ctx_cnt; i++) { + if (strlen(ctx_ext[i]) == 0) + continue; + strscpy(sbi_ext[sbi_cnt], ctx_ext[i]); + sbi_cnt++; + } + F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt; + } + if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) { + ctx_ext = F2FS_CTX_INFO(ctx).noextensions; + ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; + sbi_ext = F2FS_OPTION(sbi).noextensions; + sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; + for (i = 0; i < ctx_cnt; i++) { + if (strlen(ctx_ext[i]) == 0) + continue; + strscpy(sbi_ext[sbi_cnt], ctx_ext[i]); + sbi_cnt++; + } + F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt; + } +#endif +} + +static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb) +{ + struct f2fs_fs_context *ctx = fc->fs_private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + F2FS_OPTION(sbi).opt &= ~ctx->opt_mask; + F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt; + + if (ctx->spec_mask & F2FS_SPEC_background_gc) + F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode; + if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size) + F2FS_OPTION(sbi).inline_xattr_size = + F2FS_CTX_INFO(ctx).inline_xattr_size; + if (ctx->spec_mask & F2FS_SPEC_active_logs) + F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs; + if (ctx->spec_mask & F2FS_SPEC_reserve_root) + F2FS_OPTION(sbi).root_reserved_blocks = + F2FS_CTX_INFO(ctx).root_reserved_blocks; + if (ctx->spec_mask & F2FS_SPEC_resgid) + F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid; + if (ctx->spec_mask & F2FS_SPEC_resuid) + F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid; + if (ctx->spec_mask & F2FS_SPEC_mode) + F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode; +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (ctx->spec_mask & F2FS_SPEC_fault_injection) + (void)f2fs_build_fault_attr(sbi, + F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE); + if (ctx->spec_mask & F2FS_SPEC_fault_type) + (void)f2fs_build_fault_attr(sbi, 0, + F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE); +#endif + if (ctx->spec_mask & F2FS_SPEC_alloc_mode) + F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode; + if (ctx->spec_mask & F2FS_SPEC_fsync_mode) + F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode; + if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap) + F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap; + if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc) + F2FS_OPTION(sbi).unusable_cap_perc = + F2FS_CTX_INFO(ctx).unusable_cap_perc; + if (ctx->spec_mask & F2FS_SPEC_discard_unit) + F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit; + if (ctx->spec_mask & F2FS_SPEC_memory_mode) + F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode; + if (ctx->spec_mask & F2FS_SPEC_errors) + F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors; + + f2fs_apply_compression(fc, sb); + f2fs_apply_test_dummy_encryption(fc, sb); + f2fs_apply_quota_options(fc, sb); +} + +static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount) +{ + if (f2fs_sb_has_device_alias(sbi) && + !test_opt(sbi, READ_EXTENT_CACHE)) { + f2fs_err(sbi, "device aliasing requires extent cache"); return -EINVAL; } + if (!remount) + return 0; + +#ifdef CONFIG_BLK_DEV_ZONED + if (f2fs_sb_has_blkzoned(sbi) && + sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { + f2fs_err(sbi, + "zoned: max open zones %u is too small, need at least %u open zones", + sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); + return -EINVAL; + } +#endif + if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { + f2fs_warn(sbi, "LFS is not compatible with IPU"); + return -EINVAL; + } return 0; } @@ -1439,6 +1701,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Initialize f2fs-specific inode info */ atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->i_compr_blocks, 0); + atomic_set(&fi->open_count, 0); init_f2fs_rwsem(&fi->i_sem); spin_lock_init(&fi->i_size_lock); INIT_LIST_HEAD(&fi->dirty_list); @@ -1531,7 +1794,9 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync) } spin_unlock(&sbi->inode_lock[DIRTY_META]); - if (!ret && f2fs_is_atomic_file(inode)) + /* if atomic write is not committed, set inode w/ atomic dirty */ + if (!ret && f2fs_is_atomic_file(inode) && + !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) set_inode_flag(inode, FI_ATOMIC_DIRTIED); return ret; @@ -1713,7 +1978,7 @@ static void f2fs_put_super(struct super_block *sb) destroy_percpu_info(sbi); f2fs_destroy_iostat(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) - kvfree(sbi->write_io[i]); + kfree(sbi->write_io[i]); #if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif @@ -1804,26 +2069,32 @@ static int f2fs_statfs_project(struct super_block *sb, limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, dquot->dq_dqb.dqb_bhardlimit); - if (limit) - limit >>= sb->s_blocksize_bits; + limit >>= sb->s_blocksize_bits; + + if (limit) { + uint64_t remaining = 0; - if (limit && buf->f_blocks > limit) { curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); } limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); } spin_unlock(&dquot->dq_dqb_lock); @@ -1882,9 +2153,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_fsid = u64_to_fsid(id); #ifdef CONFIG_QUOTA - if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) && + if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) && sb_has_quota_limits_enabled(sb, PRJQUOTA)) { - f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf); + f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf); } #endif return 0; @@ -2208,7 +2479,7 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount) set_opt(sbi, POSIX_ACL); #endif - f2fs_build_fault_attr(sbi, 0, 0); + f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL); } #ifdef CONFIG_QUOTA @@ -2318,11 +2589,12 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) f2fs_flush_ckpt_thread(sbi); } -static int f2fs_remount(struct super_block *sb, int *flags, char *data) +static int __f2fs_remount(struct fs_context *fc, struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_mount_info org_mount_opt; unsigned long old_sb_flags; + unsigned int flags = fc->sb_flags; int err; bool need_restart_gc = false, need_stop_gc = false; bool need_restart_flush = false, need_stop_flush = false; @@ -2368,7 +2640,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) #endif /* recover superblocks we couldn't write due to previous RO mount */ - if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { + if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { err = f2fs_commit_super(sbi, false); f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", err); @@ -2378,23 +2650,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) default_options(sbi, true); - /* parse mount options */ - err = parse_options(sbi, data, true); + err = f2fs_check_opt_consistency(fc, sb); if (err) goto restore_opts; -#ifdef CONFIG_BLK_DEV_ZONED - if (f2fs_sb_has_blkzoned(sbi) && - sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { - f2fs_err(sbi, - "zoned: max open zones %u is too small, need at least %u open zones", - sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); - err = -EINVAL; - goto restore_opts; - } -#endif + f2fs_apply_options(fc, sb); - err = f2fs_default_check(sbi); + err = f2fs_sanity_check_options(sbi, true); if (err) goto restore_opts; @@ -2405,20 +2667,20 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) * Previous and new state of filesystem is RO, * so skip checking GC and FLUSH_MERGE conditions. */ - if (f2fs_readonly(sb) && (*flags & SB_RDONLY)) + if (f2fs_readonly(sb) && (flags & SB_RDONLY)) goto skip; - if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) { + if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) { err = -EROFS; goto restore_opts; } #ifdef CONFIG_QUOTA - if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) { + if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; - } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) { + } else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) { /* dquot_resume needs RW */ sb->s_flags &= ~SB_RDONLY; if (sb_any_quota_suspended(sb)) { @@ -2430,12 +2692,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) } } #endif - if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { - err = -EINVAL; - f2fs_warn(sbi, "LFS is not compatible with IPU"); - goto restore_opts; - } - /* disallow enable atgc dynamically */ if (no_atgc == !!test_opt(sbi, ATGC)) { err = -EINVAL; @@ -2474,7 +2730,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { + if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { err = -EINVAL; f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); goto restore_opts; @@ -2485,7 +2741,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) * or if background_gc = off is passed in mount * option. Also sync the filesystem. */ - if ((*flags & SB_RDONLY) || + if ((flags & SB_RDONLY) || (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && !test_opt(sbi, GC_MERGE))) { if (sbi->gc_thread) { @@ -2499,7 +2755,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) need_stop_gc = true; } - if (*flags & SB_RDONLY) { + if (flags & SB_RDONLY) { sync_inodes_sb(sb); set_sbi_flag(sbi, SBI_IS_DIRTY); @@ -2512,7 +2768,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) * We stop issue flush thread if FS is mounted as RO * or if flush_merge is not passed in mount option. */ - if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { + if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { clear_opt(sbi, FLUSH_MERGE); f2fs_destroy_flush_cmd_control(sbi, false); need_restart_flush = true; @@ -2554,11 +2810,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) * triggered while remount and we need to take care of it before * returning from remount. */ - if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || + if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || !test_opt(sbi, MERGE_CHECKPOINT)) { f2fs_stop_ckpt_thread(sbi); } else { - /* Flush if the prevous checkpoint, if exists. */ + /* Flush if the previous checkpoint, if exists. */ f2fs_flush_ckpt_thread(sbi); err = f2fs_start_ckpt_thread(sbi); @@ -2581,7 +2837,7 @@ skip: (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); limit_reserve_root(sbi); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); + fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); sbi->umount_lock_holder = NULL; return 0; @@ -2689,12 +2945,9 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, { struct inode *inode = sb_dqopt(sb)->files[type]; struct address_space *mapping = inode->i_mapping; - block_t blkidx = F2FS_BYTES_TO_BLK(off); - int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; loff_t i_size = i_size_read(inode); - struct page *page; if (off > i_size) return 0; @@ -2703,37 +2956,42 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, len = i_size - off; toread = len; while (toread > 0) { - tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); + struct folio *folio; + size_t offset; + repeat: - page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); - if (IS_ERR(page)) { - if (PTR_ERR(page) == -ENOMEM) { + folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT, + GFP_NOFS); + if (IS_ERR(folio)) { + if (PTR_ERR(folio) == -ENOMEM) { memalloc_retry_wait(GFP_NOFS); goto repeat; } set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); - return PTR_ERR(page); + return PTR_ERR(folio); } + offset = offset_in_folio(folio, off); + tocopy = min(folio_size(folio) - offset, toread); - lock_page(page); + folio_lock(folio); - if (unlikely(page->mapping != mapping)) { - f2fs_put_page(page, 1); + if (unlikely(folio->mapping != mapping)) { + f2fs_folio_put(folio, true); goto repeat; } - if (unlikely(!PageUptodate(page))) { - f2fs_put_page(page, 1); - set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); - return -EIO; - } - memcpy_from_page(data, page, offset, tocopy); - f2fs_put_page(page, 1); + /* + * should never happen, just leave f2fs_bug_on() here to catch + * any potential bug. + */ + f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio)); + + memcpy_from_folio(data, folio, offset, tocopy); + f2fs_folio_put(folio, true); - offset = 0; toread -= tocopy; data += tocopy; - blkidx++; + off += tocopy; } return len; } @@ -3250,7 +3508,6 @@ static const struct super_operations f2fs_sops = { .freeze_fs = f2fs_freeze, .unfreeze_fs = f2fs_unfreeze, .statfs = f2fs_statfs, - .remount_fs = f2fs_remount, .shutdown = f2fs_shutdown, }; @@ -3432,12 +3689,13 @@ static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio, bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS); /* it doesn't need to set crypto context for superblock update */ - bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio)); + bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index); if (!bio_add_folio(bio, folio, folio_size(folio), 0)) f2fs_bug_on(sbi, 1); ret = submit_bio_wait(bio); + bio_put(bio); folio_end_writeback(folio); return ret; @@ -3558,7 +3816,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, return -EFSCORRUPTED; } crc = le32_to_cpu(raw_super->crc); - if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { + if (crc != f2fs_crc32(raw_super, crc_offset)) { f2fs_info(sbi, "Invalid SB checksum value: %u", crc); return -EFSCORRUPTED; } @@ -3717,6 +3975,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) block_t user_block_count, valid_user_blocks; block_t avail_node_count, valid_node_count; unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks; + unsigned int sit_blk_cnt; int i, j; total = le32_to_cpu(raw_super->segment_count); @@ -3828,6 +4087,13 @@ skip_cross: return 1; } + sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK); + if (sit_bitmap_size * 8 < sit_blk_cnt) { + f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u", + sit_bitmap_size, sit_blk_cnt); + return 1; + } + cp_pack_start_sum = __start_sum_addr(sbi); cp_payload = __cp_payload(sbi); if (cp_pack_start_sum < cp_payload + 1 || @@ -4106,7 +4372,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) /* we should update superblock crc here */ if (!recover && f2fs_sb_has_sb_chksum(sbi)) { - crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi), + crc = f2fs_crc32(F2FS_RAW_SUPER(sbi), offsetof(struct f2fs_super_block, crc)); F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); } @@ -4304,14 +4570,35 @@ static void f2fs_record_error_work(struct work_struct *work) f2fs_record_stop_reason(sbi); } -static inline unsigned int get_first_zoned_segno(struct f2fs_sb_info *sbi) +static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi) { +#ifdef CONFIG_BLK_DEV_ZONED + unsigned int zoneno, total_zones; int devi; - for (devi = 0; devi < sbi->s_ndevs; devi++) - if (bdev_is_zoned(FDEV(devi).bdev)) - return GET_SEGNO(sbi, FDEV(devi).start_blk); - return 0; + if (!f2fs_sb_has_blkzoned(sbi)) + return NULL_SEGNO; + + for (devi = 0; devi < sbi->s_ndevs; devi++) { + if (!bdev_is_zoned(FDEV(devi).bdev)) + continue; + + total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments); + + for (zoneno = 0; zoneno < total_zones; zoneno++) { + unsigned int segs, blks; + + if (!f2fs_zone_is_seq(sbi, devi, zoneno)) + continue; + + segs = GET_SEG_FROM_SEC(sbi, + zoneno * sbi->secs_per_zone); + blks = SEGS_TO_BLKS(sbi, segs); + return GET_SEGNO(sbi, FDEV(devi).start_blk + blks); + } + } +#endif + return NULL_SEGNO; } static int f2fs_scan_devices(struct f2fs_sb_info *sbi) @@ -4348,6 +4635,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) #endif for (i = 0; i < max_devices; i++) { + if (max_devices == 1) { + FDEV(i).total_segments = + le32_to_cpu(raw_super->segment_count_main); + FDEV(i).start_blk = 0; + FDEV(i).end_blk = FDEV(i).total_segments * + BLKS_PER_SEG(sbi); + } + if (i == 0) FDEV(0).bdev_file = sbi->sb->s_bdev_file; else if (!RDEV(i).path[0]) @@ -4472,14 +4767,14 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) sbi->readdir_ra = true; } -static int f2fs_fill_super(struct super_block *sb, void *data, int silent) +static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc) { + struct f2fs_fs_context *ctx = fc->fs_private; struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct inode *root; int err; bool skip_recovery = false, need_fsck = false; - char *options = NULL; int recovery, i, valid_super_block; struct curseg_info *seg_i; int retry_cnt = 1; @@ -4538,22 +4833,18 @@ try_onemore: /* precompute checksum seed for metadata */ if (f2fs_sb_has_inode_chksum(sbi)) - sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, - sizeof(raw_super->uuid)); + sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid, + sizeof(raw_super->uuid)); default_options(sbi, false); - /* parse mount options */ - options = kstrdup((const char *)data, GFP_KERNEL); - if (data && !options) { - err = -ENOMEM; - goto free_sb_buf; - } - err = parse_options(sbi, options, false); + err = f2fs_check_opt_consistency(fc, sb); if (err) - goto free_options; + goto free_sb_buf; - err = f2fs_default_check(sbi); + f2fs_apply_options(fc, sb); + + err = f2fs_sanity_check_options(sbi, false); if (err) goto free_options; @@ -4718,7 +5009,11 @@ try_onemore: sbi->sectors_written_start = f2fs_get_sectors_written(sbi); /* get segno of first zoned block device */ - sbi->first_zoned_segno = get_first_zoned_segno(sbi); + sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi); + + sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ? + ZONED_PIN_SEC_REQUIRED_COUNT : + GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)); /* Read accumulated write IO statistics if exists */ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); @@ -4880,7 +5175,6 @@ reset_checkpoint: if (err) goto sync_free_meta; } - kvfree(options); /* recover broken superblock */ if (recovery) { @@ -4963,7 +5257,7 @@ free_iostat: f2fs_destroy_iostat(sbi); free_bio_info: for (i = 0; i < NR_PAGE_TYPE; i++) - kvfree(sbi->write_io[i]); + kfree(sbi->write_io[i]); #if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); @@ -4974,8 +5268,8 @@ free_options: for (i = 0; i < MAXQUOTAS; i++) kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif - fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); - kvfree(options); + /* no need to free dummy_enc_policy, we just keep it in ctx when failed */ + swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy); free_sb_buf: kfree(raw_super); free_sbi: @@ -4991,12 +5285,39 @@ free_sbi: return err; } -static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) +static int f2fs_get_tree(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); + return get_tree_bdev(fc, f2fs_fill_super); } +static int f2fs_reconfigure(struct fs_context *fc) +{ + struct super_block *sb = fc->root->d_sb; + + return __f2fs_remount(fc, sb); +} + +static void f2fs_fc_free(struct fs_context *fc) +{ + struct f2fs_fs_context *ctx = fc->fs_private; + + if (!ctx) + return; + +#ifdef CONFIG_QUOTA + f2fs_unnote_qf_name_all(fc); +#endif + fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy); + kfree(ctx); +} + +static const struct fs_context_operations f2fs_context_ops = { + .parse_param = f2fs_parse_param, + .get_tree = f2fs_get_tree, + .reconfigure = f2fs_reconfigure, + .free = f2fs_fc_free, +}; + static void kill_f2fs_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -5038,10 +5359,24 @@ static void kill_f2fs_super(struct super_block *sb) } } +static int f2fs_init_fs_context(struct fs_context *fc) +{ + struct f2fs_fs_context *ctx; + + ctx = kzalloc(sizeof(struct f2fs_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + fc->fs_private = ctx; + fc->ops = &f2fs_context_ops; + + return 0; +} + static struct file_system_type f2fs_fs_type = { .owner = THIS_MODULE, .name = "f2fs", - .mount = f2fs_mount, + .init_fs_context = f2fs_init_fs_context, .kill_sb = kill_f2fs_super, .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, }; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index c69161366467..f736052dea50 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -274,6 +274,13 @@ static ssize_t encoding_show(struct f2fs_attr *a, return sysfs_emit(buf, "(none)\n"); } +static ssize_t encoding_flags_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return sysfs_emit(buf, "%x\n", + le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags)); +} + static ssize_t mounted_time_sec_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -494,12 +501,12 @@ out: return ret; #ifdef CONFIG_F2FS_FAULT_INJECTION if (a->struct_type == FAULT_INFO_TYPE) { - if (f2fs_build_fault_attr(sbi, 0, t)) + if (f2fs_build_fault_attr(sbi, 0, t, FAULT_TYPE)) return -EINVAL; return count; } if (a->struct_type == FAULT_INFO_RATE) { - if (f2fs_build_fault_attr(sbi, t, 0)) + if (f2fs_build_fault_attr(sbi, t, 0, FAULT_RATE)) return -EINVAL; return count; } @@ -621,6 +628,27 @@ out: return count; } + if (!strcmp(a->attr.name, "gc_no_zoned_gc_percent")) { + if (t > 100) + return -EINVAL; + *ui = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "gc_boost_zoned_gc_percent")) { + if (t > 100) + return -EINVAL; + *ui = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "gc_valid_thresh_ratio")) { + if (t > 100) + return -EINVAL; + *ui = (unsigned int)t; + return count; + } + #ifdef CONFIG_F2FS_IOSTAT if (!strcmp(a->attr.name, "iostat_enable")) { sbi->iostat_enable = !!t; @@ -817,6 +845,27 @@ out: return count; } + if (!strcmp(a->attr.name, "reserved_pin_section")) { + if (t > GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi))) + return -EINVAL; + *ui = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "gc_boost_gc_multiple")) { + if (t < 1 || t > SEGS_PER_SEC(sbi)) + return -EINVAL; + sbi->gc_thread->boost_gc_multiple = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "gc_boost_gc_greedy")) { + if (t > GC_GREEDY) + return -EINVAL; + sbi->gc_thread->boost_gc_greedy = (unsigned int)t; + return count; + } + *ui = (unsigned int)t; return count; @@ -1043,6 +1092,8 @@ GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time); GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent); GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent); GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio); +GC_THREAD_RW_ATTR(gc_boost_gc_multiple, boost_gc_multiple); +GC_THREAD_RW_ATTR(gc_boost_gc_greedy, boost_gc_greedy); /* SM_INFO ATTR */ SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments); @@ -1123,6 +1174,7 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec); F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy); #endif F2FS_SBI_GENERAL_RW_ATTR(carve_out); +F2FS_SBI_GENERAL_RW_ATTR(reserved_pin_section); /* STAT_INFO ATTR */ #ifdef CONFIG_F2FS_STAT_FS @@ -1158,6 +1210,7 @@ F2FS_GENERAL_RO_ATTR(features); F2FS_GENERAL_RO_ATTR(current_reserved_blocks); F2FS_GENERAL_RO_ATTR(unusable); F2FS_GENERAL_RO_ATTR(encoding); +F2FS_GENERAL_RO_ATTR(encoding_flags); F2FS_GENERAL_RO_ATTR(mounted_time_sec); F2FS_GENERAL_RO_ATTR(main_blkaddr); F2FS_GENERAL_RO_ATTR(pending_discard); @@ -1199,6 +1252,9 @@ F2FS_FEATURE_RO_ATTR(readonly); F2FS_FEATURE_RO_ATTR(compression); #endif F2FS_FEATURE_RO_ATTR(pin_file); +#ifdef CONFIG_UNICODE +F2FS_FEATURE_RO_ATTR(linear_lookup); +#endif #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { @@ -1209,6 +1265,8 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_no_zoned_gc_percent), ATTR_LIST(gc_boost_zoned_gc_percent), ATTR_LIST(gc_valid_thresh_ratio), + ATTR_LIST(gc_boost_gc_multiple), + ATTR_LIST(gc_boost_gc_greedy), ATTR_LIST(gc_idle), ATTR_LIST(gc_urgent), ATTR_LIST(reclaim_segments), @@ -1270,6 +1328,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(reserved_blocks), ATTR_LIST(current_reserved_blocks), ATTR_LIST(encoding), + ATTR_LIST(encoding_flags), ATTR_LIST(mounted_time_sec), #ifdef CONFIG_F2FS_STAT_FS ATTR_LIST(cp_foreground_calls), @@ -1311,6 +1370,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(last_age_weight), ATTR_LIST(max_read_extent_count), ATTR_LIST(carve_out), + ATTR_LIST(reserved_pin_section), NULL, }; ATTRIBUTE_GROUPS(f2fs); @@ -1347,6 +1407,9 @@ static struct attribute *f2fs_feat_attrs[] = { BASE_ATTR_LIST(compression), #endif BASE_ATTR_LIST(pin_file), +#ifdef CONFIG_UNICODE + BASE_ATTR_LIST(linear_lookup), +#endif NULL, }; ATTRIBUTE_GROUPS(f2fs_feat); @@ -1679,6 +1742,24 @@ static int __maybe_unused disk_map_seq_show(struct seq_file *seq, return 0; } +#ifdef CONFIG_F2FS_FAULT_INJECTION +static int __maybe_unused inject_stats_seq_show(struct seq_file *seq, + void *offset) +{ + struct super_block *sb = seq->private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; + int i; + + seq_puts(seq, "fault_type injected_count\n"); + + for (i = 0; i < FAULT_MAX; i++) + seq_printf(seq, "%-24s%-10u\n", f2fs_fault_name[i], + ffi->inject_count[i]); + return 0; +} +#endif + int __init f2fs_init_sysfs(void) { int ret; @@ -1770,6 +1851,10 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi) discard_plist_seq_show, sb); proc_create_single_data("disk_map", 0444, sbi->s_proc, disk_map_seq_show, sb); +#ifdef CONFIG_F2FS_FAULT_INJECTION + proc_create_single_data("inject_stats", 0444, sbi->s_proc, + inject_stats_seq_show, sb); +#endif return 0; put_feature_list_kobj: kobject_put(&sbi->s_feature_list_kobj); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index c691b35618ad..58632a2b6613 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -136,7 +136,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler, #ifdef CONFIG_F2FS_FS_SECURITY static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, - void *page) + void *folio) { const struct xattr *xattr; int err = 0; @@ -144,7 +144,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY, xattr->name, xattr->value, - xattr->value_len, (struct page *)page, 0); + xattr->value_len, folio, 0); if (err < 0) break; } @@ -152,10 +152,10 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, } int f2fs_init_security(struct inode *inode, struct inode *dir, - const struct qstr *qstr, struct page *ipage) + const struct qstr *qstr, struct folio *ifolio) { return security_inode_init_security(inode, dir, qstr, - &f2fs_initxattrs, ipage); + f2fs_initxattrs, ifolio); } #endif @@ -271,25 +271,25 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode, return entry; } -static int read_inline_xattr(struct inode *inode, struct page *ipage, +static int read_inline_xattr(struct inode *inode, struct folio *ifolio, void *txattr_addr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int inline_size = inline_xattr_size(inode); - struct page *page = NULL; + struct folio *folio = NULL; void *inline_addr; - if (ipage) { - inline_addr = inline_xattr_addr(inode, ipage); + if (ifolio) { + inline_addr = inline_xattr_addr(inode, ifolio); } else { - page = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(folio)) + return PTR_ERR(folio); - inline_addr = inline_xattr_addr(inode, page); + inline_addr = inline_xattr_addr(inode, folio); } memcpy(txattr_addr, inline_addr, inline_size); - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return 0; } @@ -299,22 +299,22 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int inline_size = inline_xattr_size(inode); - struct page *xpage; + struct folio *xfolio; void *xattr_addr; /* The inode already has an extended attribute block. */ - xpage = f2fs_get_xnode_page(sbi, xnid); - if (IS_ERR(xpage)) - return PTR_ERR(xpage); + xfolio = f2fs_get_xnode_folio(sbi, xnid); + if (IS_ERR(xfolio)) + return PTR_ERR(xfolio); - xattr_addr = page_address(xpage); + xattr_addr = folio_address(xfolio); memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE); - f2fs_put_page(xpage, 1); + f2fs_folio_put(xfolio, true); return 0; } -static int lookup_all_xattrs(struct inode *inode, struct page *ipage, +static int lookup_all_xattrs(struct inode *inode, struct folio *ifolio, unsigned int index, unsigned int len, const char *name, struct f2fs_xattr_entry **xe, void **base_addr, int *base_size, @@ -338,7 +338,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - err = read_inline_xattr(inode, ipage, txattr_addr); + err = read_inline_xattr(inode, ifolio, txattr_addr); if (err) goto out; @@ -385,7 +385,7 @@ out: return err; } -static int read_all_xattrs(struct inode *inode, struct page *ipage, +static int read_all_xattrs(struct inode *inode, struct folio *ifolio, void **base_addr) { struct f2fs_xattr_header *header; @@ -402,7 +402,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - err = read_inline_xattr(inode, ipage, txattr_addr); + err = read_inline_xattr(inode, ifolio, txattr_addr); if (err) goto fail; } @@ -429,14 +429,14 @@ fail: } static inline int write_all_xattrs(struct inode *inode, __u32 hsize, - void *txattr_addr, struct page *ipage) + void *txattr_addr, struct folio *ifolio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t inline_size = inline_xattr_size(inode); - struct page *in_page = NULL; + struct folio *in_folio = NULL; void *xattr_addr; void *inline_addr = NULL; - struct page *xpage; + struct folio *xfolio; nid_t new_nid = 0; int err = 0; @@ -446,73 +446,73 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, /* write to inline xattr */ if (inline_size) { - if (ipage) { - inline_addr = inline_xattr_addr(inode, ipage); + if (ifolio) { + inline_addr = inline_xattr_addr(inode, ifolio); } else { - in_page = f2fs_get_inode_page(sbi, inode->i_ino); - if (IS_ERR(in_page)) { + in_folio = f2fs_get_inode_folio(sbi, inode->i_ino); + if (IS_ERR(in_folio)) { f2fs_alloc_nid_failed(sbi, new_nid); - return PTR_ERR(in_page); + return PTR_ERR(in_folio); } - inline_addr = inline_xattr_addr(inode, in_page); + inline_addr = inline_xattr_addr(inode, in_folio); } - f2fs_wait_on_page_writeback(ipage ? ipage : in_page, + f2fs_folio_wait_writeback(ifolio ? ifolio : in_folio, NODE, true, true); /* no need to use xattr node block */ if (hsize <= inline_size) { err = f2fs_truncate_xattr_node(inode); f2fs_alloc_nid_failed(sbi, new_nid); if (err) { - f2fs_put_page(in_page, 1); + f2fs_folio_put(in_folio, true); return err; } memcpy(inline_addr, txattr_addr, inline_size); - set_page_dirty(ipage ? ipage : in_page); + folio_mark_dirty(ifolio ? ifolio : in_folio); goto in_page_out; } } /* write to xattr node block */ if (F2FS_I(inode)->i_xattr_nid) { - xpage = f2fs_get_xnode_page(sbi, F2FS_I(inode)->i_xattr_nid); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + xfolio = f2fs_get_xnode_folio(sbi, F2FS_I(inode)->i_xattr_nid); + if (IS_ERR(xfolio)) { + err = PTR_ERR(xfolio); f2fs_alloc_nid_failed(sbi, new_nid); goto in_page_out; } f2fs_bug_on(sbi, new_nid); - f2fs_wait_on_page_writeback(xpage, NODE, true, true); + f2fs_folio_wait_writeback(xfolio, NODE, true, true); } else { struct dnode_of_data dn; set_new_dnode(&dn, inode, NULL, NULL, new_nid); - xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET); + if (IS_ERR(xfolio)) { + err = PTR_ERR(xfolio); f2fs_alloc_nid_failed(sbi, new_nid); goto in_page_out; } f2fs_alloc_nid_done(sbi, new_nid); } - xattr_addr = page_address(xpage); + xattr_addr = folio_address(xfolio); if (inline_size) memcpy(inline_addr, txattr_addr, inline_size); memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE); if (inline_size) - set_page_dirty(ipage ? ipage : in_page); - set_page_dirty(xpage); + folio_mark_dirty(ifolio ? ifolio : in_folio); + folio_mark_dirty(xfolio); - f2fs_put_page(xpage, 1); + f2fs_folio_put(xfolio, true); in_page_out: - f2fs_put_page(in_page, 1); + f2fs_folio_put(in_folio, true); return err; } int f2fs_getxattr(struct inode *inode, int index, const char *name, - void *buffer, size_t buffer_size, struct page *ipage) + void *buffer, size_t buffer_size, struct folio *ifolio) { struct f2fs_xattr_entry *entry = NULL; int error; @@ -528,11 +528,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, if (len > F2FS_NAME_LEN) return -ERANGE; - if (!ipage) + if (!ifolio) f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); - error = lookup_all_xattrs(inode, ipage, index, len, name, + error = lookup_all_xattrs(inode, ifolio, index, len, name, &entry, &base_addr, &base_size, &is_inline); - if (!ipage) + if (!ifolio) f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); if (error) return error; @@ -627,7 +627,7 @@ static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry, static int __f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, - struct page *ipage, int flags) + struct folio *ifolio, int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_xattr_entry *here, *last; @@ -651,7 +651,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, if (size > MAX_VALUE_LEN(inode)) return -E2BIG; retry: - error = read_all_xattrs(inode, ipage, &base_addr); + error = read_all_xattrs(inode, ifolio, &base_addr); if (error) return error; @@ -766,7 +766,7 @@ retry: *(u32 *)((u8 *)last + newsize) = 0; } - error = write_all_xattrs(inode, new_hsize, base_addr, ipage); + error = write_all_xattrs(inode, new_hsize, base_addr, ifolio); if (error) goto exit; @@ -800,7 +800,7 @@ exit: int f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, - struct page *ipage, int flags) + struct folio *ifolio, int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; @@ -815,14 +815,14 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, return err; /* this case is only from f2fs_init_inode_metadata */ - if (ipage) + if (ifolio) return __f2fs_setxattr(inode, index, name, value, - size, ipage, flags); + size, ifolio, flags); f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); f2fs_down_write(&F2FS_I(inode)->i_xattr_sem); - err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); + err = __f2fs_setxattr(inode, index, name, value, size, NULL, flags); f2fs_up_write(&F2FS_I(inode)->i_xattr_sem); f2fs_unlock_op(sbi); diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index a005ffdcf717..4fc0b2305fbd 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -127,26 +127,26 @@ extern const struct xattr_handler f2fs_xattr_security_handler; extern const struct xattr_handler * const f2fs_xattr_handlers[]; -extern int f2fs_setxattr(struct inode *, int, const char *, - const void *, size_t, struct page *, int); -extern int f2fs_getxattr(struct inode *, int, const char *, void *, - size_t, struct page *); -extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); -extern int f2fs_init_xattr_caches(struct f2fs_sb_info *); -extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); +int f2fs_setxattr(struct inode *, int, const char *, const void *, + size_t, struct folio *, int); +int f2fs_getxattr(struct inode *, int, const char *, void *, + size_t, struct folio *); +ssize_t f2fs_listxattr(struct dentry *, char *, size_t); +int f2fs_init_xattr_caches(struct f2fs_sb_info *); +void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); #else #define f2fs_xattr_handlers NULL #define f2fs_listxattr NULL static inline int f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, - struct page *page, int flags) + struct folio *folio, int flags) { return -EOPNOTSUPP; } static inline int f2fs_getxattr(struct inode *inode, int index, const char *name, void *buffer, - size_t buffer_size, struct page *dpage) + size_t buffer_size, struct folio *dfolio) { return -EOPNOTSUPP; } @@ -155,11 +155,11 @@ static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { } #endif #ifdef CONFIG_F2FS_FS_SECURITY -extern int f2fs_init_security(struct inode *, struct inode *, - const struct qstr *, struct page *); +int f2fs_init_security(struct inode *, struct inode *, + const struct qstr *, struct folio *); #else static inline int f2fs_init_security(struct inode *inode, struct inode *dir, - const struct qstr *qstr, struct page *ipage) + const struct qstr *qstr, struct folio *ifolio) { return 0; } |