summaryrefslogtreecommitdiff
path: root/fs/f2fs/f2fs.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/f2fs.h')
-rw-r--r--fs/f2fs/f2fs.h766
1 files changed, 492 insertions, 274 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index fced2b7652f4..9333a22b9a01 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -11,7 +11,6 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
@@ -25,7 +24,6 @@
#include <linux/quotaops.h>
#include <linux/part_stat.h>
#include <linux/rw_hint.h>
-#include <crypto/hash.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
@@ -64,16 +62,26 @@ enum {
FAULT_BLKADDR_VALIDITY,
FAULT_BLKADDR_CONSISTENCE,
FAULT_NO_SEGMENT,
+ FAULT_INCONSISTENT_FOOTER,
+ FAULT_TIMEOUT,
+ FAULT_VMALLOC,
FAULT_MAX,
};
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0))
+/* indicate which option to update */
+enum fault_option {
+ FAULT_RATE = 1, /* only update fault rate */
+ FAULT_TYPE = 2, /* only update fault type */
+ FAULT_ALL = 4, /* reset all fault injection options/stats */
+};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info {
atomic_t inject_ops;
- unsigned int inject_rate;
+ int inject_rate;
unsigned int inject_type;
+ /* Used to account total count of injection for each type */
+ unsigned int inject_count[FAULT_MAX];
};
extern const char *f2fs_fault_name[FAULT_MAX];
@@ -116,6 +124,13 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_GC_MERGE 0x02000000
#define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
+#define F2FS_MOUNT_NAT_BITS 0x10000000
+#define F2FS_MOUNT_INLINECRYPT 0x20000000
+/*
+ * Some f2fs environments expect to be able to pass the "lazytime" option
+ * string rather than using the MS_LAZYTIME flag, so this must remain.
+ */
+#define F2FS_MOUNT_LAZYTIME 0x40000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -134,6 +149,12 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16
+enum blkzone_allocation_policy {
+ BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
+ BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
+ BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
+};
+
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
@@ -208,6 +229,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_CASEFOLD 0x00001000
#define F2FS_FEATURE_COMPRESSION 0x00002000
#define F2FS_FEATURE_RO 0x00004000
+#define F2FS_FEATURE_DEVICE_ALIAS 0x00008000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -285,6 +307,7 @@ enum {
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
TRANS_DIR_INO, /* for transactions dir ino list */
+ XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -303,7 +326,7 @@ struct inode_entry {
struct fsync_node_entry {
struct list_head list; /* list head */
- struct page *page; /* warm node page pointer */
+ struct folio *folio; /* warm node folio pointer */
unsigned int seq_id; /* sequence id */
};
@@ -531,7 +554,7 @@ struct f2fs_filename {
* internal operation where usr_fname is also NULL. In all these cases
* we fall back to treating the name as an opaque byte sequence.
*/
- struct fscrypt_str cf_name;
+ struct qstr cf_name;
#endif
};
@@ -592,6 +615,9 @@ enum {
/* congestion wait timeout value, default: 20ms */
#define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
+/* timeout value injected, default: 1000ms */
+#define DEFAULT_FAULT_TIMEOUT (msecs_to_jiffies(1000))
+
/* maximum retry quota flush count */
#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
@@ -628,6 +654,9 @@ enum {
#define DEF_HOT_DATA_AGE_THRESHOLD 262144
#define DEF_WARM_DATA_AGE_THRESHOLD 2621440
+/* default max read extent count per inode */
+#define DEF_MAX_READ_EXTENT_COUNT 10240
+
/* extent cache type */
enum extent_type {
EX_READ,
@@ -765,11 +794,6 @@ enum {
#define DEF_DIR_LEVEL 0
-enum {
- GC_FAILURE_PIN,
- MAX_GC_FAILURE
-};
-
/* used for f2fs_inode_info->flags */
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
@@ -789,7 +813,6 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
@@ -807,7 +830,10 @@ enum {
FI_ALIGNED_WRITE, /* enable aligned write */
FI_COW_FILE, /* indicate COW file */
FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
+ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
+ FI_OPENED_FILE, /* indicate file has been opened */
+ FI_DONATE_FINISHED, /* indicate page donation of file has been finished */
FI_MAX, /* max flag, never be used */
};
@@ -816,14 +842,16 @@ struct f2fs_inode_info {
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */
- unsigned int i_current_depth; /* only for directory depth */
- /* for gc failure statistic */
- unsigned int i_gc_failures[MAX_GC_FAILURE];
+ union {
+ unsigned int i_current_depth; /* only for directory depth */
+ unsigned short i_gc_failures; /* for gc failure statistic */
+ };
unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */
/* Use below internally in f2fs*/
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
+ unsigned int ioprio_hint; /* hint for IO priority */
struct f2fs_rwsem i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
@@ -843,10 +871,19 @@ struct f2fs_inode_info {
#endif
struct list_head dirty_list; /* dirty list for dirs and files */
struct list_head gdirty_list; /* linked in global dirty list */
+
+ /* linked in global inode list for cache donation */
+ struct list_head gdonate_list;
+ pgoff_t donate_start, donate_end; /* inclusive */
+
struct task_struct *atomic_write_task; /* store atomic write task */
struct extent_tree *extent_tree[NR_EXTENT_CACHES];
/* cached extent_tree entry */
- struct inode *cow_inode; /* copy-on-write inode for atomic write */
+ union {
+ struct inode *cow_inode; /* copy-on-write inode for atomic write */
+ struct inode *atomic_inode;
+ /* point to atomic_inode, available only for cow_inode */
+ };
/* avoid racing between foreground op and gc */
struct f2fs_rwsem i_gc_rwsem[2];
@@ -970,11 +1007,11 @@ struct f2fs_nm_info {
*/
struct dnode_of_data {
struct inode *inode; /* vfs inode pointer */
- struct page *inode_page; /* its inode page, NULL is possible */
- struct page *node_page; /* cached direct node page */
+ struct folio *inode_folio; /* its inode folio, NULL is possible */
+ struct folio *node_folio; /* cached direct node folio */
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
- bool inode_page_locked; /* inode page is locked or not */
+ bool inode_folio_locked; /* inode folio is locked or not */
bool node_changed; /* is node block changed */
char cur_level; /* level of hole node page */
char max_level; /* level of current page located */
@@ -982,12 +1019,12 @@ struct dnode_of_data {
};
static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
- struct page *ipage, struct page *npage, nid_t nid)
+ struct folio *ifolio, struct folio *nfolio, nid_t nid)
{
memset(dn, 0, sizeof(*dn));
dn->inode = inode;
- dn->inode_page = ipage;
- dn->node_page = npage;
+ dn->inode_folio = ifolio;
+ dn->node_folio = nfolio;
dn->nid = nid;
}
@@ -1011,7 +1048,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
#define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
#define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
-enum {
+enum log_type {
CURSEG_HOT_DATA = 0, /* directory entry blocks */
CURSEG_WARM_DATA, /* data blocks */
CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
@@ -1056,7 +1093,6 @@ struct f2fs_sm_info {
unsigned int segment_count; /* total # of segments */
unsigned int main_segments; /* # of segments in main area */
unsigned int reserved_segments; /* # of reserved segments */
- unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
unsigned int ovp_segments; /* # of overprovision segments */
/* a threshold to reclaim prefree segments */
@@ -1154,6 +1190,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
+ CP_XATTR_DIR,
};
enum iostat_type {
@@ -1214,7 +1251,7 @@ struct f2fs_io_info {
unsigned int in_list:1; /* indicate fio is in io_list */
unsigned int is_por:1; /* indicate IO is from recovery or not */
unsigned int encrypted:1; /* indicate file is encrypted */
- unsigned int post_read:1; /* require post read */
+ unsigned int meta_gc:1; /* require meta inode GC */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@@ -1263,6 +1300,7 @@ enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
DIRTY_META, /* for all dirtied inode metadata */
+ DONATE_INODE, /* for all inode to donate pages */
NR_INODE_TYPE,
};
@@ -1292,6 +1330,7 @@ struct f2fs_gc_control {
bool no_bg_gc; /* check the space and stop bg_gc */
bool should_migrate_blocks; /* should migrate blocks */
bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
unsigned int nr_free_secs; /* # of free sections to do GC */
};
@@ -1417,7 +1456,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
* bit 2 PAGE_PRIVATE_INLINE_INODE
* bit 3 PAGE_PRIVATE_REF_RESOURCE
- * bit 4- f2fs private data
+ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
*
* Layout B: lowest bit should be 0
* page.private is a wrapped pointer.
@@ -1427,6 +1467,7 @@ enum {
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
PAGE_PRIVATE_MAX
};
@@ -1557,6 +1598,9 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int max_open_zones; /* max open zone resources of the zoned device */
+ /* For adjust the priority writing position of data in zone UFS */
+ unsigned int blkzone_alloc_policy;
#endif
/* for node-related operations */
@@ -1605,12 +1649,16 @@ struct f2fs_sb_info {
/* for extent tree cache */
struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
atomic64_t allocated_data_blocks; /* for block age extent_cache */
+ unsigned int max_read_extent_count; /* max read extent count per inode */
/* The threshold used for hot and warm data seperation*/
unsigned int hot_data_age_threshold;
unsigned int warm_data_age_threshold;
unsigned int last_age_weight;
+ /* control donate caches */
+ unsigned int donate_files;
+
/* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
unsigned int log_blocksize; /* log2 block size */
@@ -1642,6 +1690,7 @@ struct f2fs_sb_info {
unsigned int nquota_files; /* # of quota sysfile */
struct f2fs_rwsem quota_sem; /* blocking cp for flags */
+ struct task_struct *umount_lock_holder; /* s_umount lock holder */
/* # of pages, see count_type */
atomic_t nr_pages[NR_COUNT_TYPE];
@@ -1676,13 +1725,15 @@ struct f2fs_sb_info {
unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
- u64 gc_pin_file_threshold;
+ unsigned short gc_pin_file_threshold;
struct f2fs_rwsem pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
+ /* migration window granularity of garbage collection, unit: segment */
+ unsigned int migration_window_granularity;
/*
* for stat information.
@@ -1742,14 +1793,12 @@ struct f2fs_sb_info {
unsigned int dirty_device; /* for checkpoint data flush */
spinlock_t dev_lock; /* protect dirty_device */
bool aligned_blksize; /* all devices has the same logical blksize */
+ unsigned int first_seq_zone_segno; /* first segno in sequential zone */
/* For write statistics */
u64 sectors_written_start;
u64 kbytes_written;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
-
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
@@ -1783,6 +1832,9 @@ struct f2fs_sb_info {
u64 committed_atomic_block;
u64 revoked_atomic_block;
+ /* carve out reserved_blocks from total blocks */
+ bool carve_out;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -1863,6 +1915,7 @@ static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_count[type]++;
f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
f2fs_fault_name[type], func, parent_func);
return true;
@@ -1924,42 +1977,20 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
/*
* Inline functions
*/
-static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 __f2fs_crc32(u32 crc, const void *address,
+ unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
+ return crc32(crc, address, length);
}
-static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
- unsigned int length)
+static inline u32 f2fs_crc32(const void *address, unsigned int length)
{
- return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
+ return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length);
}
-static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
- void *buf, size_t buf_size)
+static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length)
{
- return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
-}
-
-static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
-{
- return __f2fs_crc32(sbi, crc, address, length);
+ return __f2fs_crc32(crc, address, length);
}
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
@@ -1982,9 +2013,14 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
return F2FS_I_SB(mapping->host);
}
+static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio)
+{
+ return F2FS_M_SB(folio->mapping);
+}
+
static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
{
- return F2FS_M_SB(page_file_mapping(page));
+ return F2FS_F_SB(page_folio(page));
}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -1992,12 +2028,22 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
return (struct f2fs_super_block *)(sbi->raw_super);
}
+static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
+ pgoff_t index)
+{
+ pgoff_t idx_in_folio = index % (1 << folio_order(folio));
+
+ return (struct f2fs_super_block *)
+ (page_address(folio_page(folio, idx_in_folio)) +
+ F2FS_SUPER_OFFSET);
+}
+
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
return (struct f2fs_checkpoint *)(sbi->ckpt);
}
-static inline struct f2fs_node *F2FS_NODE(struct page *page)
+static inline struct f2fs_node *F2FS_NODE(const struct page *page)
{
return (struct f2fs_node *)page_address(page);
}
@@ -2042,6 +2088,16 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
return sbi->node_inode->i_mapping;
}
+static inline bool is_meta_folio(struct folio *folio)
+{
+ return folio->mapping == META_MAPPING(F2FS_F_SB(folio));
+}
+
+static inline bool is_node_folio(struct folio *folio)
+{
+ return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio));
+}
+
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
return test_bit(type, &sbi->s_flag);
@@ -2201,6 +2257,36 @@ static inline void f2fs_up_write(struct f2fs_rwsem *sem)
#endif
}
+static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
+{
+ unsigned long flags;
+ unsigned char *nat_bits;
+
+ /*
+ * In order to re-enable nat_bits we need to call fsck.f2fs by
+ * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+ * so let's rely on regular fsck or unclean shutdown.
+ */
+
+ if (lock)
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
+ nat_bits = NM_I(sbi)->nat_bits;
+ NM_I(sbi)->nat_bits = NULL;
+ if (lock)
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+
+ kvfree(nat_bits);
+}
+
+static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc)
+{
+ bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+
+ return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
f2fs_down_read(&sbi->cp_rwsem);
@@ -2309,7 +2395,7 @@ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count, bool partial)
{
- blkcnt_t diff = 0, release = 0;
+ long long diff = 0, release = 0;
block_t avail_user_block_count;
int ret;
@@ -2329,26 +2415,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
spin_lock(&sbi->stat_lock);
- sbi->total_valid_block_count += (block_t)(*count);
- avail_user_block_count = get_available_block_count(sbi, inode, true);
- if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+ avail_user_block_count = get_available_block_count(sbi, inode, true);
+ diff = (long long)sbi->total_valid_block_count + *count -
+ avail_user_block_count;
+ if (unlikely(diff > 0)) {
if (!partial) {
spin_unlock(&sbi->stat_lock);
+ release = *count;
goto enospc;
}
-
- diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
diff = *count;
*count -= diff;
release = diff;
- sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
}
+ sbi->total_valid_block_count += (block_t)(*count);
+
spin_unlock(&sbi->stat_lock);
if (unlikely(release)) {
@@ -2393,14 +2480,17 @@ static inline void clear_page_private_##name(struct page *page) \
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page)
{
@@ -2432,6 +2522,7 @@ static inline void clear_page_private_all(struct page *page)
clear_page_private_reference(page);
clear_page_private_gcing(page);
clear_page_private_inline(page);
+ clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page));
}
@@ -2443,8 +2534,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
- sbi->total_valid_block_count -= (block_t)count;
+ if (unlikely(sbi->total_valid_block_count < count)) {
+ f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u",
+ sbi->total_valid_block_count, inode->i_ino, count);
+ sbi->total_valid_block_count = 0;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ } else {
+ sbi->total_valid_block_count -= count;
+ }
if (sbi->reserved_blocks &&
sbi->current_reserved_blocks < sbi->reserved_blocks)
sbi->current_reserved_blocks = min(sbi->reserved_blocks,
@@ -2742,33 +2839,46 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
-static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
- pgoff_t index, bool for_write)
+static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
+ pgoff_t index, bool for_write)
{
- struct page *page;
+ struct folio *folio;
unsigned int flags;
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ fgf_t fgf_flags;
+
if (!for_write)
- page = find_get_page_flags(mapping, index,
- FGP_LOCK | FGP_ACCESSED);
+ fgf_flags = FGP_LOCK | FGP_ACCESSED;
else
- page = find_lock_page(mapping, index);
- if (page)
- return page;
+ fgf_flags = FGP_LOCK;
+ folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
+ if (!IS_ERR(folio))
+ return folio;
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
if (!for_write)
- return grab_cache_page(mapping, index);
+ return filemap_grab_folio(mapping, index);
flags = memalloc_nofs_save();
- page = grab_cache_page_write_begin(mapping, index);
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
memalloc_nofs_restore(flags);
- return page;
+ return folio;
+}
+
+static inline struct folio *f2fs_filemap_get_folio(
+ struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp_mask)
+{
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
+ return ERR_PTR(-ENOMEM);
+
+ return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask);
}
static inline struct page *f2fs_pagecache_get_page(
@@ -2781,26 +2891,33 @@ static inline struct page *f2fs_pagecache_get_page(
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
}
-static inline void f2fs_put_page(struct page *page, int unlock)
+static inline void f2fs_folio_put(struct folio *folio, bool unlock)
{
- if (!page)
+ if (IS_ERR_OR_NULL(folio))
return;
if (unlock) {
- f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
- unlock_page(page);
+ f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio));
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
+}
+
+static inline void f2fs_put_page(struct page *page, int unlock)
+{
+ if (!page)
+ return;
+ f2fs_folio_put(page_folio(page), unlock);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
{
- if (dn->node_page)
- f2fs_put_page(dn->node_page, 1);
- if (dn->inode_page && dn->node_page != dn->inode_page)
- f2fs_put_page(dn->inode_page, 0);
- dn->node_page = NULL;
- dn->inode_page = NULL;
+ if (dn->node_folio)
+ f2fs_folio_put(dn->node_folio, true);
+ if (dn->inode_folio && dn->node_folio != dn->inode_folio)
+ f2fs_folio_put(dn->inode_folio, false);
+ dn->node_folio = NULL;
+ dn->inode_folio = NULL;
}
static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
@@ -2851,13 +2968,26 @@ static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
return false;
}
+static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
+}
+
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
+ bool zoned_gc = (type == GC_TIME &&
+ F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
+
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (is_inflight_io(sbi, type))
- return false;
+ if (zoned_gc) {
+ if (is_inflight_read_io(sbi))
+ return false;
+ } else {
+ if (is_inflight_io(sbi, type))
+ return false;
+ }
if (sbi->gc_mode == GC_URGENT_MID)
return true;
@@ -2866,6 +2996,9 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
(type == DISCARD_TIME || type == GC_TIME))
return true;
+ if (zoned_gc)
+ return true;
+
return f2fs_time_over(sbi, type);
}
@@ -2897,31 +3030,32 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+static inline unsigned int get_dnode_base(struct inode *inode,
+ struct page *node_page)
{
- struct f2fs_node *raw_node;
- __le32 *addr_array;
- int base = 0;
- bool is_inode = IS_INODE(node_page);
+ if (!IS_INODE(node_page))
+ return 0;
- raw_node = F2FS_NODE(node_page);
+ return inode ? get_extra_isize(inode) :
+ offset_in_addr(&F2FS_NODE(node_page)->i);
+}
- if (is_inode) {
- if (!inode)
- /* from GC path only */
- base = offset_in_addr(&raw_node->i);
- else if (f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
- }
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct folio *node_folio)
+{
+ return blkaddr_in_node(F2FS_NODE(&node_folio->page)) +
+ get_dnode_base(inode, &node_folio->page);
+}
- addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[base + offset]);
+static inline block_t data_blkaddr(struct inode *inode,
+ struct folio *node_folio, unsigned int offset)
+{
+ return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
{
- return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
+ return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr)
@@ -2998,6 +3132,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
+#define F2FS_DEVICE_ALIAS_FL 0x80000000 /* File for aliasing a device */
#define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
@@ -3013,6 +3148,8 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
/* Flags that are appropriate for non-directories/regular files. */
#define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
+#define IS_DEVICE_ALIASING(inode) (F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL)
+
static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
@@ -3035,10 +3172,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
fallthrough;
case FI_DATA_EXIST:
- case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
- case FI_ATOMIC_COMMITTED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -3132,7 +3267,7 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
static inline void f2fs_i_gc_failures_write(struct inode *inode,
unsigned int count)
{
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
+ F2FS_I(inode)->i_gc_failures = count;
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -3160,8 +3295,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
set_bit(FI_DATA_EXIST, fi->flags);
- if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
@@ -3182,8 +3315,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
- ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -3224,26 +3355,20 @@ static inline bool f2fs_need_compress_data(struct inode *inode)
return false;
}
-static inline unsigned int addrs_per_inode(struct inode *inode)
+static inline unsigned int addrs_per_page(struct inode *inode,
+ bool is_inode)
{
- unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
- get_inline_xattr_addrs(inode);
+ unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) -
+ get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK;
- if (!f2fs_compressed_file(inode))
- return addrs;
- return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
+ if (f2fs_compressed_file(inode))
+ return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
+ return addrs;
}
-static inline unsigned int addrs_per_block(struct inode *inode)
+static inline void *inline_xattr_addr(struct inode *inode, struct folio *folio)
{
- if (!f2fs_compressed_file(inode))
- return DEF_ADDRS_PER_BLOCK;
- return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
-}
-
-static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
-{
- struct f2fs_inode *ri = F2FS_INODE(page);
+ struct f2fs_inode *ri = F2FS_INODE(&folio->page);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
get_inline_xattr_addrs(inode)]);
@@ -3258,7 +3383,7 @@ static inline int inline_xattr_size(struct inode *inode)
/*
* Notice: check inline_data flag without inode page lock is unsafe.
- * It could change at any time by f2fs_convert_inline_page().
+ * It could change at any time by f2fs_convert_inline_folio().
*/
static inline int f2fs_has_inline_data(struct inode *inode)
{
@@ -3270,11 +3395,6 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(inode, FI_DATA_EXIST);
}
-static inline int f2fs_has_inline_dots(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_INLINE_DOTS);
-}
-
static inline int f2fs_is_mmap_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_MMAP_FILE);
@@ -3295,11 +3415,9 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page);
-static inline void *inline_data_addr(struct inode *inode, struct page *page)
+static inline void *inline_data_addr(struct inode *inode, struct folio *folio)
{
- __le32 *addr = get_dnode_addr(inode, page);
+ __le32 *addr = get_dnode_addr(inode, folio);
return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
}
@@ -3425,6 +3543,14 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
}
+static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size)
+{
+ if (time_to_inject(sbi, FAULT_VMALLOC))
+ return NULL;
+
+ return vmalloc(size);
+}
+
static inline int get_extra_isize(struct inode *inode)
{
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
@@ -3435,17 +3561,6 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
return F2FS_I(inode)->i_inline_xattr_size;
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page)
-{
- int base = 0;
-
- if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
-
- return blkaddr_in_node(F2FS_NODE(node_page)) + base;
-}
-
#define f2fs_get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -3497,6 +3612,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
+ bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap,
@@ -3510,12 +3627,12 @@ int f2fs_pin_file_control(struct inode *inode, bool inc);
* inode.c
*/
void f2fs_set_inode_flags(struct inode *inode);
-bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_update_inode(struct inode *inode, struct page *node_page);
+void f2fs_update_inode(struct inode *inode, struct folio *node_folio);
void f2fs_update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_evict_inode(struct inode *inode);
@@ -3533,36 +3650,50 @@ int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
/*
* dir.c
*/
+#if IS_ENABLED(CONFIG_UNICODE)
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname);
+void f2fs_free_casefolded_name(struct f2fs_filename *fname);
+#else
+static inline int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname)
+{
+ return 0;
+}
+
+static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname)
+{
+}
+#endif /* CONFIG_UNICODE */
+
int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct f2fs_filename *fname);
int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct f2fs_filename *fname);
void f2fs_free_filename(struct f2fs_filename *fname);
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots);
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d);
-struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct f2fs_filename *fname, struct page *dpage);
+struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct f2fs_filename *fname, struct folio *dfolio);
void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth);
int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page);
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
+ const struct qstr *child, struct folio **res_folio);
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f);
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
- struct page **page);
+ struct folio **folio);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
- struct page *page, struct inode *inode);
-bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+ struct folio *folio, struct inode *inode);
+bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
const struct f2fs_filename *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct fscrypt_str *name, f2fs_hash_t name_hash,
@@ -3573,7 +3704,7 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
-void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
struct inode *dir, struct inode *inode);
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
struct f2fs_filename *fname);
@@ -3594,12 +3725,11 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_dquot_initialize(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
-int f2fs_quota_sync(struct super_block *sb, int type);
+int f2fs_do_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
-void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
- bool irq_context);
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason);
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
@@ -3618,9 +3748,9 @@ struct node_info;
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
-void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
@@ -3633,14 +3763,14 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
unsigned int seq_id);
-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
int f2fs_remove_inode_page(struct inode *inode);
-struct page *f2fs_new_inode_page(struct inode *inode);
-struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+struct folio *f2fs_new_inode_folio(struct inode *inode);
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs);
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
-struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
-struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-int f2fs_move_node_page(struct page *node_page, int gc_type);
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid);
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino);
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid);
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type);
void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
@@ -3653,12 +3783,11 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio);
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
@@ -3677,7 +3806,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
@@ -3692,6 +3822,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi);
void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
@@ -3702,10 +3833,10 @@ int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
-struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type);
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3719,14 +3850,18 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
unsigned char version, bool recover_curseg,
bool recover_newaddr);
+enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
+ enum log_type seg_type);
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio);
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
block_t blkaddr, unsigned int blkcnt);
-void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered, bool locked);
+void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
+ bool ordered, bool locked);
+#define f2fs_wait_on_page_writeback(page, type, ordered, locked) \
+ f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked)
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len);
@@ -3735,17 +3870,24 @@ void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc);
void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
-int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
+int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi);
int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_segment_manager_caches(void);
void f2fs_destroy_segment_manager_caches(void);
-int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno);
+int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
+enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+ enum page_type type, enum temp_type temp);
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
+unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
+ unsigned int segno);
+
+static inline struct inode *fio_inode(struct f2fs_io_info *fio)
+{
+ return page_folio(fio->page)->mapping->host;
+}
#define DEF_FRAGMENT_SIZE 4
#define MIN_FRAGMENT_SIZE 1
@@ -3763,10 +3905,10 @@ static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason);
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
-struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index);
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
@@ -3822,7 +3964,7 @@ void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type);
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page);
+ struct bio **bio, struct folio *folio);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
@@ -3836,14 +3978,14 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
- pgoff_t *next_pgofs);
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
+ pgoff_t *next_pgofs);
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write);
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size);
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3851,7 +3993,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
@@ -3860,7 +4002,7 @@ void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_page_cache_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
@@ -3884,7 +4026,7 @@ void f2fs_destroy_garbage_collection_cache(void);
/* victim selection function for cleaning and SSR */
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age);
+ unsigned long long age, bool one_time);
/*
* recovery.c
@@ -3898,6 +4040,19 @@ void f2fs_destroy_recovery_cache(void);
* debug.c
*/
#ifdef CONFIG_F2FS_STAT_FS
+enum {
+ DEVSTAT_INUSE,
+ DEVSTAT_DIRTY,
+ DEVSTAT_FULL,
+ DEVSTAT_FREE,
+ DEVSTAT_PREFREE,
+ DEVSTAT_MAX,
+};
+
+struct f2fs_dev_stats {
+ unsigned int devstats[2][DEVSTAT_MAX]; /* 0: segs, 1: secs */
+};
+
struct f2fs_stat_info {
struct list_head stat_list;
struct f2fs_sb_info *sbi;
@@ -3918,7 +4073,8 @@ struct f2fs_stat_info {
unsigned long long allocated_data_blocks;
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
int ndirty_data, ndirty_qdata;
- unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ unsigned int nquota_files, ndonate_files;
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
@@ -3961,6 +4117,7 @@ struct f2fs_stat_info {
unsigned int block_count[2];
unsigned int inplace_count;
unsigned long long base_mem, cache_mem, page_mem;
+ struct f2fs_dev_stats *dev_stats;
};
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
@@ -3970,7 +4127,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_cp_call_count(sbi, foreground) \
atomic_inc(&sbi->cp_call_count[(foreground)])
-#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
+#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4146,27 +4303,26 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
* inline.c
*/
bool f2fs_may_inline_data(struct inode *inode);
-bool f2fs_sanity_check_inline_data(struct inode *inode);
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
bool f2fs_may_inline_dentry(struct inode *inode);
-void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
-void f2fs_truncate_inline_inode(struct inode *inode,
- struct page *ipage, u64 from);
-int f2fs_read_inline_data(struct inode *inode, struct page *page);
-int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio);
+void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
+ u64 from);
+int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
+int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
-int f2fs_write_inline_data(struct inode *inode, struct page *page);
-int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
+int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio,
+ bool use_hash);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
- struct page *ipage);
+ struct folio *ifolio);
int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
- struct page *page, struct inode *dir,
- struct inode *inode);
+ struct folio *folio, struct inode *dir, struct inode *inode);
bool f2fs_empty_inline_dir(struct inode *dir);
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct fscrypt_str *fstr);
@@ -4181,13 +4337,15 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc);
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc);
+unsigned int f2fs_donate_files(void);
+void f2fs_reclaim_caches(unsigned int reclaim_caches_kb);
void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-bool sanity_check_extent_cache(struct inode *inode);
+bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
void f2fs_init_extent_tree(struct inode *inode);
void f2fs_drop_extent_tree(struct inode *inode);
void f2fs_destroy_extent_node(struct inode *inode);
@@ -4197,7 +4355,7 @@ int __init f2fs_create_extent_cache(void);
void f2fs_destroy_extent_cache(void);
/* read extent cache ops */
-void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
+void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio);
bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei);
bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
@@ -4258,12 +4416,27 @@ static inline bool f2fs_post_read_required(struct inode *inode)
f2fs_compressed_file(inode);
}
+static inline bool f2fs_used_in_atomic_write(struct inode *inode)
+{
+ return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
+}
+
+static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
+{
+ return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
+}
+
/*
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
+enum cluster_check_type {
+ CLUSTER_IS_COMPR, /* check only if compressed cluster */
+ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
+ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
+};
bool f2fs_is_compressed_page(struct page *page);
-struct page *f2fs_compress_control_page(struct page *page);
+struct folio *f2fs_compress_control_folio(struct folio *folio);
int f2fs_prepare_compress_overwrite(struct inode *inode,
struct page **pagep, pgoff_t index, void **fsdata);
bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
@@ -4282,22 +4455,23 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
int f2fs_write_multi_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr,
unsigned int llen, unsigned int c_len);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead, bool for_write);
+ struct readahead_control *rac, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);
-void f2fs_put_page_dic(struct page *page, bool in_task);
+void f2fs_put_folio_dic(struct folio *folio, bool in_task);
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
unsigned int ofs_in_node);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
@@ -4310,10 +4484,11 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len);
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
nid_t ino, block_t blkaddr);
-bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
block_t blkaddr);
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
#define inc_compr_inode_stat(inode) \
@@ -4338,7 +4513,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
return false;
}
static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
-static inline struct page *f2fs_compress_control_page(struct page *page)
+static inline struct folio *f2fs_compress_control_folio(struct folio *folio)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
@@ -4352,7 +4527,7 @@ static inline void f2fs_end_read_compressed_page(struct page *page,
{
WARN_ON_ONCE(1);
}
-static inline void f2fs_put_page_dic(struct page *page, bool in_task)
+static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task)
{
WARN_ON_ONCE(1);
}
@@ -4365,15 +4540,21 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
-static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
- block_t blkaddr) { }
+static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len) { }
static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
struct page *page, nid_t ino, block_t blkaddr) { }
-static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
- struct page *page, block_t blkaddr) { return false; }
+static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi,
+ struct folio *folio, block_t blkaddr) { return false; }
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
+static inline int f2fs_is_compressed_cluster(
+ struct inode *inode,
+ pgoff_t index) { return 0; }
+static inline bool f2fs_is_sparse_cluster(
+ struct inode *inode,
+ pgoff_t index) { return true; }
static inline void f2fs_update_read_extent_tree_range_compressed(
struct inode *inode,
pgoff_t fofs, block_t blkaddr,
@@ -4384,22 +4565,18 @@ static inline int set_compress_context(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
- F2FS_I(inode)->i_compress_algorithm =
- F2FS_OPTION(sbi).compress_algorithm;
- F2FS_I(inode)->i_log_cluster_size =
- F2FS_OPTION(sbi).compress_log_size;
- F2FS_I(inode)->i_compress_flag =
- F2FS_OPTION(sbi).compress_chksum ?
- BIT(COMPRESS_CHKSUM) : 0;
- F2FS_I(inode)->i_cluster_size =
- BIT(F2FS_I(inode)->i_log_cluster_size);
- if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
- F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
+ fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm;
+ fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size;
+ fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ?
+ BIT(COMPRESS_CHKSUM) : 0;
+ fi->i_cluster_size = BIT(fi->i_log_cluster_size);
+ if ((fi->i_compress_algorithm == COMPRESS_LZ4 ||
+ fi->i_compress_algorithm == COMPRESS_ZSTD) &&
F2FS_OPTION(sbi).compress_level)
- F2FS_I(inode)->i_compress_level =
- F2FS_OPTION(sbi).compress_level;
- F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ fi->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
inc_compr_inode_stat(inode);
@@ -4414,15 +4591,15 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write(&fi->i_sem);
if (!f2fs_compressed_file(inode)) {
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return true;
}
if (f2fs_is_mmap_file(inode) ||
(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return false;
}
@@ -4431,7 +4608,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
clear_inode_flag(inode, FI_COMPRESSED_FILE);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&fi->i_sem);
return true;
}
@@ -4455,14 +4632,19 @@ F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
F2FS_FEATURE_FUNCS(readonly, RO);
+F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS);
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
- block_t blkaddr)
+static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
+ unsigned int zone)
{
- unsigned int zno = blkaddr / sbi->blocks_per_blkz;
+ return test_bit(zone, FDEV(devi).blkz_seq);
+}
- return test_bit(zno, FDEV(devi).blkz_seq);
+static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+ block_t blkaddr)
+{
+ return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz);
}
#endif
@@ -4534,15 +4716,31 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
-static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
+static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
block_t blkaddr)
{
if (f2fs_sb_has_blkzoned(sbi)) {
+#ifdef CONFIG_BLK_DEV_ZONED
int devi = f2fs_target_device_index(sbi, blkaddr);
- return !bdev_is_zoned(FDEV(devi).bdev);
+ if (!bdev_is_zoned(FDEV(devi).bdev))
+ return false;
+
+ if (f2fs_is_multi_device(sbi)) {
+ if (blkaddr < FDEV(devi).start_blk ||
+ blkaddr > FDEV(devi).end_blk) {
+ f2fs_err(sbi, "Invalid block %x", blkaddr);
+ return false;
+ }
+ blkaddr -= FDEV(devi).start_blk;
+ }
+
+ return f2fs_blkz_is_seq(sbi, devi, blkaddr);
+#else
+ return false;
+#endif
}
- return true;
+ return false;
}
static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
@@ -4596,10 +4794,15 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
- unsigned int type);
+extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
+ unsigned long type, enum fault_option fo);
#else
-#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
+static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned long rate, unsigned long type,
+ enum fault_option fo)
+{
+ return 0;
+}
#endif
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
@@ -4626,9 +4829,24 @@ static inline void f2fs_io_schedule_timeout(long timeout)
io_schedule_timeout(timeout);
}
-static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
- enum page_type type)
+static inline void f2fs_io_schedule_timeout_killable(long timeout)
{
+ while (timeout) {
+ if (fatal_signal_pending(current))
+ return;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ if (timeout <= DEFAULT_IO_TIMEOUT)
+ return;
+ timeout -= DEFAULT_IO_TIMEOUT;
+ }
+}
+
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
+ struct folio *folio, enum page_type type)
+{
+ pgoff_t ofs = folio->index;
+
if (unlikely(f2fs_cp_error(sbi)))
return;
@@ -4653,13 +4871,13 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
int i = 0;
do {
- struct page *page;
+ struct folio *folio;
- page = find_get_page(META_MAPPING(sbi), blkaddr + i);
- if (page) {
- if (PageWriteback(page))
+ folio = filemap_get_folio(META_MAPPING(sbi), blkaddr + i);
+ if (!IS_ERR(folio)) {
+ if (folio_test_writeback(folio))
need_submit = true;
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
}
} while (++i < cnt && !need_submit);
@@ -4673,10 +4891,10 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
}
static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
- block_t blkaddr)
+ block_t blkaddr, unsigned int len)
{
- f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
- f2fs_invalidate_compress_page(sbi, blkaddr);
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
+ f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
}
#define EFSBADCRC EBADMSG /* Bad CRC detected */