summaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c413
1 files changed, 208 insertions, 205 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 54f89f0ee69b..7961e0ddfca3 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -47,14 +47,14 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-bool f2fs_is_cp_guaranteed(struct page *page)
+bool f2fs_is_cp_guaranteed(const struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
- if (!mapping)
- return false;
+ if (fscrypt_is_bounce_folio(folio))
+ return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio));
inode = mapping->host;
sbi = F2FS_I_SB(inode);
@@ -65,7 +65,7 @@ bool f2fs_is_cp_guaranteed(struct page *page)
return true;
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
- page_private_gcing(page))
+ folio_test_f2fs_gcing(folio))
return true;
return false;
}
@@ -142,16 +142,16 @@ static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(&folio->page)) {
+ if (f2fs_is_compressed_page(folio)) {
if (ctx && !ctx->decompression_attempted)
- f2fs_end_read_compressed_page(&folio->page, true, 0,
+ f2fs_end_read_compressed_page(folio, true, 0,
in_task);
- f2fs_put_page_dic(&folio->page, in_task);
+ f2fs_put_folio_dic(folio, in_task);
continue;
}
dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
- folio_end_read(folio, bio->bi_status == 0);
+ folio_end_read(folio, bio->bi_status == BLK_STS_OK);
}
if (ctx)
@@ -181,14 +181,13 @@ static void f2fs_verify_bio(struct work_struct *work)
* as those were handled separately by f2fs_end_read_compressed_page().
*/
if (may_have_compressed_pages) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (!f2fs_is_compressed_page(page) &&
- !fsverity_verify_page(page)) {
+ if (!f2fs_is_compressed_page(folio) &&
+ !fsverity_verify_page(&folio->page)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
@@ -233,16 +232,15 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
bool all_compressed = true;
block_t blkaddr = ctx->fs_blkaddr;
- bio_for_each_segment_all(bv, ctx->bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, ctx->bio) {
+ struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(page))
- f2fs_end_read_compressed_page(page, false, blkaddr,
+ if (f2fs_is_compressed_page(folio))
+ f2fs_end_read_compressed_page(folio, false, blkaddr,
in_task);
else
all_compressed = false;
@@ -280,9 +278,9 @@ static void f2fs_post_read_work(struct work_struct *work)
static void f2fs_read_end_io(struct bio *bio)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+ struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio));
struct bio_post_read_ctx *ctx;
- bool intask = in_task();
+ bool intask = in_task() && !irqs_disabled();
iostat_update_and_unbind_ctx(bio);
ctx = bio->bi_private;
@@ -290,7 +288,7 @@ static void f2fs_read_end_io(struct bio *bio)
if (time_to_inject(sbi, FAULT_READ_IO))
bio->bi_status = BLK_STS_IOERR;
- if (bio->bi_status) {
+ if (bio->bi_status != BLK_STS_OK) {
f2fs_finish_read_bio(bio, intask);
return;
}
@@ -339,28 +337,28 @@ static void f2fs_write_end_io(struct bio *bio)
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_is_compressed_page(&folio->page)) {
- f2fs_compress_write_end_io(bio, &folio->page);
+ if (f2fs_is_compressed_page(folio)) {
+ f2fs_compress_write_end_io(bio, folio);
continue;
}
#endif
- type = WB_DATA_TYPE(&folio->page, false);
+ type = WB_DATA_TYPE(folio, false);
- if (unlikely(bio->bi_status)) {
+ if (unlikely(bio->bi_status != BLK_STS_OK)) {
mapping_set_error(folio->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
f2fs_stop_checkpoint(sbi, true,
STOP_CP_REASON_WRITE_FAIL);
}
- f2fs_bug_on(sbi, folio->mapping == NODE_MAPPING(sbi) &&
- folio->index != nid_of_node(&folio->page));
+ f2fs_bug_on(sbi, is_node_folio(folio) &&
+ folio->index != nid_of_node(folio));
dec_page_count(sbi, type);
if (f2fs_in_warm_node_list(sbi, folio))
- f2fs_del_fsync_node_entry(sbi, &folio->page);
- clear_page_private_gcing(&folio->page);
+ f2fs_del_fsync_node_entry(sbi, folio);
+ folio_clear_f2fs_gcing(folio);
folio_end_writeback(folio);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
@@ -419,7 +417,6 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
{
unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
- struct folio *fio_folio = page_folio(fio->page);
unsigned int fua_flag, meta_flag, io_flag;
blk_opf_t op_flags = 0;
@@ -447,7 +444,7 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
op_flags |= REQ_FUA;
if (fio->type == DATA &&
- F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
+ F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
op_flags |= REQ_PRIO;
return op_flags;
@@ -546,34 +543,33 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
}
static bool __has_merged_page(struct bio *bio, struct inode *inode,
- struct page *page, nid_t ino)
+ struct folio *folio, nid_t ino)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
if (!bio)
return false;
- if (!inode && !page && !ino)
+ if (!inode && !folio && !ino)
return true;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *target = bvec->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *target = fi.folio;
- if (fscrypt_is_bounce_page(target)) {
- target = fscrypt_pagecache_page(target);
+ if (fscrypt_is_bounce_folio(target)) {
+ target = fscrypt_pagecache_folio(target);
if (IS_ERR(target))
continue;
}
if (f2fs_is_compressed_page(target)) {
- target = f2fs_compress_control_page(target);
+ target = f2fs_compress_control_folio(target);
if (IS_ERR(target))
continue;
}
if (inode && inode == target->mapping->host)
return true;
- if (page && page == target)
+ if (folio && folio == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
@@ -642,7 +638,7 @@ unlock_out:
}
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
@@ -654,7 +650,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
f2fs_down_read(&io->io_rwsem);
- ret = __has_merged_page(io->bio, inode, page, ino);
+ ret = __has_merged_page(io->bio, inode, folio, ino);
f2fs_up_read(&io->io_rwsem);
}
if (ret)
@@ -672,10 +668,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
}
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type)
{
- __submit_merged_write_cond(sbi, inode, page, ino, type, false);
+ __submit_merged_write_cond(sbi, inode, folio, ino, type, false);
}
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
@@ -692,7 +688,7 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct folio *fio_folio = page_folio(fio->page);
+ struct folio *fio_folio = fio->folio;
struct folio *data_folio = fio->encrypted_page ?
page_folio(fio->encrypted_page) : fio_folio;
@@ -714,7 +710,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false));
+ __read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@@ -780,6 +776,7 @@ static void del_bio_entry(struct bio_entry *be)
static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
struct page *page)
{
+ struct folio *fio_folio = fio->folio;
struct f2fs_sb_info *sbi = fio->sbi;
enum temp_type temp;
bool found = false;
@@ -801,8 +798,8 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
*fio->last_block,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
- fio->page->mapping->host,
- page_folio(fio->page)->index, fio) &&
+ fio_folio->mapping->host,
+ fio_folio->index, fio) &&
bio_add_page(*bio, page, PAGE_SIZE, 0) ==
PAGE_SIZE) {
ret = 0;
@@ -826,13 +823,13 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
}
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page)
+ struct bio **bio, struct folio *folio)
{
enum temp_type temp;
bool found = false;
struct bio *target = bio ? *bio : NULL;
- f2fs_bug_on(sbi, !target && !page);
+ f2fs_bug_on(sbi, !target && !folio);
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
@@ -848,7 +845,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found)
break;
}
@@ -865,7 +862,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found) {
target = be->bio;
del_bio_entry(be);
@@ -886,15 +883,15 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio = *fio->bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
- struct folio *folio = page_folio(fio->page);
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio->folio;
+ struct folio *folio = fio->folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;
- trace_f2fs_submit_folio_bio(page_folio(page), fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
@@ -905,16 +902,16 @@ alloc_new:
f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
folio->index, fio, GFP_NOIO);
- add_bio_entry(fio->sbi, bio, page, fio->temp);
+ add_bio_entry(fio->sbi, bio, &data_folio->page, fio->temp);
} else {
- if (add_ipu_page(fio, &bio, page))
+ if (add_ipu_page(fio, &bio, &data_folio->page))
goto alloc_new;
}
if (fio->io_wbc)
wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
- inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(data_folio, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -949,7 +946,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
- struct page *bio_page;
+ struct folio *bio_folio;
enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -980,44 +977,44 @@ next:
verify_fio_blkaddr(fio);
if (fio->encrypted_page)
- bio_page = fio->encrypted_page;
+ bio_folio = page_folio(fio->encrypted_page);
else if (fio->compressed_page)
- bio_page = fio->compressed_page;
+ bio_folio = page_folio(fio->compressed_page);
else
- bio_page = fio->page;
+ bio_folio = fio->folio;
/* set submitted = true as a return value */
fio->submitted = 1;
- type = WB_DATA_TYPE(bio_page, fio->compressed_page);
+ type = WB_DATA_TYPE(bio_folio, fio->compressed_page);
inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
- !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- page_folio(bio_page)->index, fio)))
+ !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio),
+ bio_folio->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
- f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- page_folio(bio_page)->index, fio, GFP_NOIO);
+ f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio),
+ bio_folio->index, fio, GFP_NOIO);
io->fio = *fio;
}
- if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) {
__submit_merged_bio(io);
goto alloc_new;
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->folio,
+ folio_size(fio->folio));
io->last_block_in_bio = fio->new_blkaddr;
- trace_f2fs_submit_folio_write(page_folio(fio->page), fio);
+ trace_f2fs_submit_folio_write(fio->folio, fio);
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@@ -1116,7 +1113,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio);
dn->data_blkaddr = blkaddr;
addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
@@ -1125,14 +1122,14 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
/*
* Lock ordering for the change of data block address:
* ->data_page
- * ->node_page
+ * ->node_folio
* update block addresses in the node page
*/
void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
__set_data_blkaddr(dn, blkaddr);
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
}
@@ -1160,7 +1157,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr = f2fs_data_blkaddr(dn);
@@ -1171,7 +1168,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
}
}
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
return 0;
}
@@ -1189,7 +1186,7 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn)
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
- bool need_put = dn->inode_page ? false : true;
+ bool need_put = dn->inode_folio ? false : true;
int err;
err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
@@ -1257,7 +1254,7 @@ got_it:
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
- * see, f2fs_add_link -> f2fs_get_new_data_page ->
+ * see, f2fs_add_link -> f2fs_get_new_data_folio ->
* f2fs_init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
@@ -1338,57 +1335,57 @@ struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir, and if any error occur,
- * ipage should be released by this function.
+ * Note that, ifolio is set only by make_empty_dir, and if any error occur,
+ * ifolio should be released by this function.
*/
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size)
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
struct dnode_of_data dn;
int err;
- page = f2fs_grab_cache_page(mapping, index, true);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, true);
+ if (IS_ERR(folio)) {
/*
- * before exiting, we should make sure ipage will be released
+ * before exiting, we should make sure ifolio will be released
* if any error occur.
*/
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return ERR_PTR(-ENOMEM);
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
+ set_new_dnode(&dn, inode, ifolio, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
- if (!ipage)
+ if (!ifolio)
f2fs_put_dnode(&dn);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
- /* if ipage exists, blkaddr should be NEW_ADDR */
- f2fs_bug_on(F2FS_I_SB(inode), ipage);
- page = f2fs_get_lock_data_page(inode, index, true);
- if (IS_ERR(page))
- return page;
+ /* if ifolio exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ifolio);
+ folio = f2fs_get_lock_data_folio(inode, index, true);
+ if (IS_ERR(folio))
+ return folio;
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
- return page;
+ return folio;
}
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
@@ -1553,10 +1550,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
unsigned int start_pgofs;
int bidx = 0;
bool is_hole;
+ bool lfs_dio_write;
if (!maxblocks)
return 0;
+ lfs_dio_write = (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create);
+
if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
goto out;
@@ -1572,8 +1573,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
end = pgofs + maxblocks;
next_dnode:
- if (map->m_may_create)
+ if (map->m_may_create) {
+ if (f2fs_lfs_mode(sbi))
+ f2fs_balance_fs(sbi, true);
f2fs_map_lock(sbi, flag);
+ }
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1589,7 +1593,7 @@ next_dnode:
start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
next_block:
blkaddr = f2fs_data_blkaddr(&dn);
@@ -1603,7 +1607,7 @@ next_block:
/* use out-place-update for direct IO under LFS mode */
if (map->m_may_create && (is_hole ||
(flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
- !f2fs_is_pinned_file(inode)))) {
+ !f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto sync_out;
@@ -1687,10 +1691,15 @@ next_block:
if (map->m_multidev_dio)
map->m_bdev = FDEV(bidx).bdev;
+
+ if (lfs_dio_write)
+ map->m_last_pblk = NULL_ADDR;
} else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
ofs++;
map->m_len++;
} else {
+ if (lfs_dio_write && !f2fs_is_pinned_file(inode))
+ map->m_last_pblk = blkaddr;
goto sync_out;
}
@@ -1715,14 +1724,6 @@ skip:
dn.ofs_in_node = end_offset;
}
- if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
- map->m_may_create) {
- /* the next block to be allocated may not be contiguous. */
- if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
- CAP_BLKS_PER_SEC(sbi) - 1)
- goto sync_out;
- }
-
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
@@ -1825,7 +1826,6 @@ static int f2fs_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page;
struct node_info ni;
__u64 phys = 0, len;
__u32 flags;
@@ -1834,15 +1834,15 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (f2fs_has_inline_xattr(inode)) {
int offset;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ inode->i_ino, false);
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
- inode->i_ino, false);
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1854,7 +1854,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
phys += offset;
len = inline_xattr_size(inode);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
@@ -1868,20 +1868,22 @@ static int f2fs_xattr_fiemap(struct inode *inode,
}
if (xnid) {
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
- if (!page)
- return -ENOMEM;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ xnid, false);
+
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
err = f2fs_get_node_info(sbi, xnid, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
len = inode->i_sb->s_blocksize;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_LAST;
}
@@ -2077,7 +2079,7 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- pgoff_t index = folio_index(folio);
+ pgoff_t index = folio->index;
int ret = 0;
block_in_file = (sector_t)index;
@@ -2245,7 +2247,7 @@ skip_reading_dnode:
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) :
ei.blk + i - 1;
@@ -2279,14 +2281,13 @@ skip_reading_dnode:
block_t blkaddr;
struct bio_post_read_ctx *ctx;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1) :
ei.blk + i;
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
- blkaddr)) {
+ if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
break;
@@ -2303,7 +2304,7 @@ submit_and_realloc:
}
if (!bio) {
- bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
f2fs_ra_op_flags(rac),
folio->index, for_write);
if (IS_ERR(bio)) {
@@ -2376,6 +2377,14 @@ static int f2fs_mpage_readpages(struct inode *inode,
unsigned max_nr_pages = nr_pages;
int ret = 0;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ index = rac ? readahead_index(rac) : folio->index;
+ max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
+ round_down(index, cc.cluster_size);
+ }
+#endif
+
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
@@ -2392,7 +2401,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
- index = folio_index(folio);
+ index = folio->index;
if (!f2fs_compressed_file(inode))
goto read_single_page;
@@ -2501,8 +2510,9 @@ static void f2fs_readahead(struct readahead_control *rac)
int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
- struct page *mpage, *page;
+ struct inode *inode = fio_inode(fio);
+ struct folio *mfolio;
+ struct page *page;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
@@ -2527,12 +2537,12 @@ retry_encrypt:
return PTR_ERR(fio->encrypted_page);
}
- mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
- if (mpage) {
- if (PageUptodate(mpage))
- memcpy(page_address(mpage),
+ mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (!IS_ERR(mfolio)) {
+ if (folio_test_uptodate(mfolio))
+ memcpy(folio_address(mfolio),
page_address(fio->encrypted_page), PAGE_SIZE);
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
}
return 0;
}
@@ -2631,7 +2641,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
if (f2fs_should_update_outplace(inode, fio))
return false;
@@ -2641,7 +2651,7 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct folio *folio = page_folio(fio->page);
+ struct folio *folio = fio->folio;
struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
@@ -2651,7 +2661,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* Use COW inode to make dnode_of_data for atomic write */
atomic_commit = f2fs_is_atomic_file(inode) &&
- page_private_atomic(folio_page(folio, 0));
+ folio_test_f2fs_atomic(folio);
if (atomic_commit)
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
else
@@ -2682,7 +2692,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
folio_clear_uptodate(folio);
- clear_page_private_gcing(folio_page(folio, 0));
+ folio_clear_f2fs_gcing(folio);
goto out_writepage;
}
got_it:
@@ -2752,7 +2762,7 @@ got_it:
trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
if (atomic_commit)
- clear_page_private_atomic(folio_page(folio, 0));
+ folio_clear_f2fs_atomic(folio);
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2770,7 +2780,6 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted,
bool allow_balance)
{
struct inode *inode = folio->mapping->host;
- struct page *page = folio_page(folio, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
@@ -2787,7 +2796,7 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
.old_blkaddr = NULL_ADDR,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.submitted = 0,
.compr_blocks = compr_blocks,
@@ -2855,13 +2864,7 @@ write:
goto done;
}
- if (!wbc->for_reclaim)
- need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0, 0))
- goto redirty_out;
- else
- set_inode_flag(inode, FI_HOT_DATA);
-
+ need_balance_fs = true;
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
err = f2fs_write_inline_data(inode, folio);
@@ -2895,14 +2898,7 @@ out:
inode_dec_dirty_pages(inode);
if (err) {
folio_clear_uptodate(folio);
- clear_page_private_gcing(page);
- }
-
- if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
- clear_inode_flag(inode, FI_HOT_DATA);
- f2fs_remove_dirty_inode(inode);
- submitted = NULL;
+ folio_clear_f2fs_gcing(folio);
}
folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
@@ -2929,9 +2925,9 @@ redirty_out:
* file_write_and_wait_range() will see EIO error, which is critical
* to return value of fsync() followed by atomic_write failure to user.
*/
- if (!err || wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE;
folio_unlock(folio);
+ if (!err)
+ return 1;
return err;
}
@@ -3128,7 +3124,7 @@ continue_unlock:
if (folio_test_writeback(folio)) {
if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock;
- f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
}
if (!folio_clear_dirty_for_io(folio))
@@ -3145,8 +3141,6 @@ continue_unlock:
ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
- if (ret == AOP_WRITEPAGE_ACTIVATE)
- folio_unlock(folio);
#ifdef CONFIG_F2FS_FS_COMPRESSION
result:
#endif
@@ -3158,7 +3152,7 @@ result:
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ if (ret == 1) {
ret = 0;
goto next;
} else if (ret == -EAGAIN) {
@@ -3352,7 +3346,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct inode *inode = folio->mapping->host;
pgoff_t index = folio->index;
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
bool locked = false;
int flag = F2FS_GET_BLOCK_PRE_AIO;
int err = 0;
@@ -3377,23 +3371,23 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
restart:
/* check inline_data */
- ipage = f2fs_get_inode_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto unlock_out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(folio, ipage);
+ f2fs_do_read_inline_data(folio, ifolio);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
- set_page_private_inline(ipage);
+ folio_set_f2fs_inline(ifolio);
goto out;
}
- err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
+ err = f2fs_convert_inline_folio(&dn, folio);
if (err || dn.data_blkaddr != NULL_ADDR)
goto out;
}
@@ -3437,14 +3431,14 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
block_t *blk_addr)
{
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
- ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (!f2fs_lookup_read_extent_cache_block(inode, index,
&dn.data_blkaddr)) {
@@ -3465,17 +3459,17 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
- ipage = f2fs_get_inode_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto unlock_out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
&dn.data_blkaddr))
@@ -3533,8 +3527,10 @@ reserve_block:
return 0;
}
-static int f2fs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
+static int f2fs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3623,7 +3619,7 @@ repeat:
}
}
- f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
+ f2fs_folio_wait_writeback(folio, DATA, false, true);
if (len == folio_size(folio) || folio_test_uptodate(folio))
return 0;
@@ -3670,7 +3666,7 @@ fail:
return err;
}
-static int f2fs_write_end(struct file *file,
+static int f2fs_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
@@ -3710,7 +3706,7 @@ static int f2fs_write_end(struct file *file,
folio_mark_dirty(folio);
if (f2fs_is_atomic_file(inode))
- set_page_private_atomic(folio_page(folio, 0));
+ folio_set_f2fs_atomic(folio);
if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
@@ -3745,7 +3741,7 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
f2fs_remove_dirty_inode(inode);
}
}
- clear_page_private_all(&folio->page);
+ folio_detach_private(folio);
}
bool f2fs_release_folio(struct folio *folio, gfp_t wait)
@@ -3754,7 +3750,7 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait)
if (folio_test_dirty(folio))
return false;
- clear_page_private_all(&folio->page);
+ folio_detach_private(folio);
return true;
}
@@ -3878,18 +3874,18 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
set_inode_flag(inode, FI_SKIP_WRITES);
for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
- struct page *page;
+ struct folio *folio;
unsigned int blkidx = secidx * blk_per_sec + blkofs;
- page = f2fs_get_lock_data_page(inode, blkidx, true);
- if (IS_ERR(page)) {
+ folio = f2fs_get_lock_data_folio(inode, blkidx, true);
+ if (IS_ERR(folio)) {
f2fs_up_write(&sbi->pin_sem);
- ret = PTR_ERR(page);
+ ret = PTR_ERR(folio);
goto done;
}
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
clear_inode_flag(inode, FI_SKIP_WRITES);
@@ -3966,7 +3962,7 @@ retry:
if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
nr_pblocks % blks_per_sec ||
- !f2fs_valid_pinned_area(sbi, pblock)) {
+ f2fs_is_sequential_zone_area(sbi, pblock)) {
bool last_extent = false;
not_aligned++;
@@ -4172,7 +4168,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap,
struct iomap *srcmap)
{
- struct f2fs_map_blocks map = {};
+ struct f2fs_map_blocks map = { NULL, };
pgoff_t next_pgofs = 0;
int err;
@@ -4181,6 +4177,10 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_next_pgofs = &next_pgofs;
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
inode->i_write_hint);
+ if (flags & IOMAP_WRITE && iomap->private) {
+ map.m_last_pblk = (unsigned long)iomap->private;
+ iomap->private = NULL;
+ }
/*
* If the blocks being overwritten are already allocated,
@@ -4219,6 +4219,9 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->flags |= IOMAP_F_MERGED;
iomap->bdev = map.m_bdev;
iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
+
+ if (flags & IOMAP_WRITE && map.m_last_pblk)
+ iomap->private = (void *)map.m_last_pblk;
} else {
if (flags & IOMAP_WRITE)
return -ENOTBLK;