summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2019-05-05 11:40:46 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2019-05-08 21:23:13 -0700
commitc9c8ed50d94c613fc3f4917c51e9c75d493a312e (patch)
tree161bfa38f995f3fceae4c60d4d9ba1f85384a7f2 /fs
parentd764834378a9870ca56e9b2977ea46e9911ec358 (diff)
f2fs: fix to avoid potential race on sbi->unusable_block_count access/update
Use sbi.stat_lock to protect sbi->unusable_block_count accesss/udpate, in order to avoid potential race on it. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/checkpoint.c4
-rw-r--r--fs/f2fs/segment.c5
-rw-r--r--fs/f2fs/super.c6
3 files changed, 14 insertions, 1 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 805a33088e82..ed70b68b2b38 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1536,7 +1536,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+
+ spin_lock(&sbi->stat_lock);
sbi->unusable_block_count = 0;
+ spin_unlock(&sbi->stat_lock);
+
__set_cp_next_pack(sbi);
/*
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 8388d2abacb5..8dee063c833f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2169,8 +2169,11 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
* before, we must track that to know how much space we
* really have.
*/
- if (f2fs_test_bit(offset, se->ckpt_valid_map))
+ if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+ spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
+ spin_unlock(&sbi->stat_lock);
+ }
}
if (f2fs_test_and_clear_bit(offset, se->discard_map))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index fe075aa12893..7ddf0d3cbece 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1226,10 +1226,13 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
+
+ spin_lock(&sbi->stat_lock);
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
buf->f_bfree = 0;
else
buf->f_bfree -= sbi->unusable_block_count;
+ spin_unlock(&sbi->stat_lock);
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
buf->f_bavail = buf->f_bfree -
@@ -1508,7 +1511,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
if (err)
goto out_unlock;
+ spin_lock(&sbi->stat_lock);
sbi->unusable_block_count = 0;
+ spin_unlock(&sbi->stat_lock);
+
out_unlock:
mutex_unlock(&sbi->gc_mutex);
restore_flag: