summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2021-05-12 17:52:58 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2021-06-23 01:09:33 -0700
commit8f1d49832636d514e949b29ce64370ebebf6d6d2 (patch)
tree90a5b62a1c6cf0fa7b55df2f93defa84bb6fce50 /fs/f2fs
parent8939a8489ca64b56f49428b0d882709080a928d4 (diff)
f2fs: compress: remove unneeded preallocation
We will reserve iblocks for compression saved, so during compressed cluster overwrite, we don't need to preallocate blocks for later write. In addition, it adds a bug_on to detect wrong reserved iblock number in __f2fs_cluster_blocks(). Bug fix in the original patch by Jaegeuk: If we released compressed blocks having an immutable bit, we can see less number of compressed block addresses. Let's fix wrong BUG_ON. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/compress.c27
-rw-r--r--fs/f2fs/file.c4
2 files changed, 3 insertions, 28 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 279f79f4fb1c..bec92ff5ee7d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -926,6 +926,9 @@ static int __f2fs_cluster_blocks(struct inode *inode,
ret++;
}
}
+
+ f2fs_bug_on(F2FS_I_SB(inode),
+ !compr && ret != cluster_size && !IS_IMMUTABLE(inode));
}
fail:
f2fs_put_dnode(&dn);
@@ -984,21 +987,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping;
struct page *page;
- struct dnode_of_data dn;
sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
- bool prealloc;
retry:
ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
if (ret <= 0)
return ret;
- /* compressed case */
- prealloc = (ret < cc->cluster_size);
-
ret = f2fs_init_compress_ctx(cc);
if (ret)
return ret;
@@ -1056,25 +1054,6 @@ release_and_retry:
}
}
- if (prealloc) {
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
-
- set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
-
- for (i = cc->cluster_size - 1; i > 0; i--) {
- ret = f2fs_get_block(&dn, start_idx + i);
- if (ret) {
- i = cc->cluster_size;
- break;
- }
-
- if (dn.data_blkaddr != NEW_ADDR)
- break;
- }
-
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
- }
-
if (likely(!ret)) {
*fsdata = cc->rpages;
*pagep = cc->rpages[offset_in_cluster(cc, index)];
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index a6be76289452..4a8c3128b5a5 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -85,10 +85,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
err = ret;
goto err;
} else if (ret) {
- if (ret < F2FS_I(inode)->i_cluster_size) {
- err = -EAGAIN;
- goto err;
- }
need_alloc = false;
}
}