summaryrefslogtreecommitdiff
path: root/fs/f2fs/checkpoint.c
diff options
context:
space:
mode:
authorDaeho Jeong <daehojeong@google.com>2021-01-19 09:00:42 +0900
committerJaegeuk Kim <jaegeuk@kernel.org>2021-02-03 13:03:06 -0800
commit261eeb9c1585de4515a770b48a3c89672c08ae7f (patch)
tree7945ba0d2af375ab864302af025ced28dc213e4f /fs/f2fs/checkpoint.c
parentc8e43d55b1aa05d175daac25d228c7c1c71c7b11 (diff)
f2fs: introduce checkpoint_merge mount option
We've added a new mount options, "checkpoint_merge" and "nocheckpoint_merge", which creates a kernel daemon and makes it to merge concurrent checkpoint requests as much as possible to eliminate redundant checkpoint issues. Plus, we can eliminate the sluggish issue caused by slow checkpoint operation when the checkpoint is done in a process context in a cgroup having low i/o budget and cpu shares. To make this do better, we set the default i/o priority of the kernel daemon to "3", to give one higher priority than other kernel threads. The below verification result explains this. The basic idea has come from https://opensource.samsung.com. [Verification] Android Pixel Device(ARM64, 7GB RAM, 256GB UFS) Create two I/O cgroups (fg w/ weight 100, bg w/ wight 20) Set "strict_guarantees" to "1" in BFQ tunables In "fg" cgroup, - thread A => trigger 1000 checkpoint operations "for i in `seq 1 1000`; do touch test_dir1/file; fsync test_dir1; done" - thread B => gererating async. I/O "fio --rw=write --numjobs=1 --bs=128k --runtime=3600 --time_based=1 --filename=test_img --name=test" In "bg" cgroup, - thread C => trigger repeated checkpoint operations "echo $$ > /dev/blkio/bg/tasks; while true; do touch test_dir2/file; fsync test_dir2; done" We've measured thread A's execution time. [ w/o patch ] Elapsed Time: Avg. 68 seconds [ w/ patch ] Elapsed Time: Avg. 48 seconds Reported-by: kernel test robot <lkp@intel.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> [Jaegeuk Kim: fix the return value in f2fs_start_ckpt_thread, reported by Dan] Signed-off-by: Daeho Jeong <daehojeong@google.com> Signed-off-by: Sungjong Seo <sj1557.seo@samsung.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/checkpoint.c')
-rw-r--r--fs/f2fs/checkpoint.c177
1 files changed, 177 insertions, 0 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 8c79ba0566b1..6f6ecba8f920 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -13,12 +13,15 @@
#include <linux/f2fs_fs.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include <linux/kthread.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include <trace/events/f2fs.h>
+#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *f2fs_inode_entry_slab;
@@ -1704,3 +1707,177 @@ void f2fs_destroy_checkpoint_caches(void)
kmem_cache_destroy(ino_entry_slab);
kmem_cache_destroy(f2fs_inode_entry_slab);
}
+
+static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
+{
+ struct cp_control cpc = { .reason = CP_SYNC, };
+ int err;
+
+ down_write(&sbi->gc_lock);
+ err = f2fs_write_checkpoint(sbi, &cpc);
+ up_write(&sbi->gc_lock);
+
+ return err;
+}
+
+static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ struct ckpt_req *req, *next;
+ struct llist_node *dispatch_list;
+ u64 sum_diff = 0, diff, count = 0;
+ int ret;
+
+ dispatch_list = llist_del_all(&cprc->issue_list);
+ if (!dispatch_list)
+ return;
+ dispatch_list = llist_reverse_order(dispatch_list);
+
+ ret = __write_checkpoint_sync(sbi);
+ atomic_inc(&cprc->issued_ckpt);
+
+ llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
+ diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
+ req->ret = ret;
+ complete(&req->wait);
+
+ sum_diff += diff;
+ count++;
+ }
+ atomic_sub(count, &cprc->queued_ckpt);
+ atomic_add(count, &cprc->total_ckpt);
+
+ spin_lock(&cprc->stat_lock);
+ cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
+ if (cprc->peak_time < cprc->cur_time)
+ cprc->peak_time = cprc->cur_time;
+ spin_unlock(&cprc->stat_lock);
+}
+
+static int issue_checkpoint_thread(void *data)
+{
+ struct f2fs_sb_info *sbi = data;
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ wait_queue_head_t *q = &cprc->ckpt_wait_queue;
+repeat:
+ if (kthread_should_stop())
+ return 0;
+
+ sb_start_intwrite(sbi->sb);
+
+ if (!llist_empty(&cprc->issue_list))
+ __checkpoint_and_complete_reqs(sbi);
+
+ sb_end_intwrite(sbi->sb);
+
+ wait_event_interruptible(*q,
+ kthread_should_stop() || !llist_empty(&cprc->issue_list));
+ goto repeat;
+}
+
+static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
+ struct ckpt_req *wait_req)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (!llist_empty(&cprc->issue_list)) {
+ __checkpoint_and_complete_reqs(sbi);
+ } else {
+ /* already dispatched by issue_checkpoint_thread */
+ if (wait_req)
+ wait_for_completion(&wait_req->wait);
+ }
+}
+
+static void init_ckpt_req(struct ckpt_req *req)
+{
+ memset(req, 0, sizeof(struct ckpt_req));
+
+ init_completion(&req->wait);
+ req->queue_time = ktime_get();
+}
+
+int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ struct ckpt_req req;
+ struct cp_control cpc;
+
+ cpc.reason = __get_cp_reason(sbi);
+ if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
+ int ret;
+
+ down_write(&sbi->gc_lock);
+ ret = f2fs_write_checkpoint(sbi, &cpc);
+ up_write(&sbi->gc_lock);
+
+ return ret;
+ }
+
+ if (!cprc->f2fs_issue_ckpt)
+ return __write_checkpoint_sync(sbi);
+
+ init_ckpt_req(&req);
+
+ llist_add(&req.llnode, &cprc->issue_list);
+ atomic_inc(&cprc->queued_ckpt);
+
+ /* update issue_list before we wake up issue_checkpoint thread */
+ smp_mb();
+
+ if (waitqueue_active(&cprc->ckpt_wait_queue))
+ wake_up(&cprc->ckpt_wait_queue);
+
+ if (cprc->f2fs_issue_ckpt)
+ wait_for_completion(&req.wait);
+ else
+ flush_remained_ckpt_reqs(sbi, &req);
+
+ return req.ret;
+}
+
+int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (cprc->f2fs_issue_ckpt)
+ return 0;
+
+ cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
+ "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
+ if (IS_ERR(cprc->f2fs_issue_ckpt)) {
+ cprc->f2fs_issue_ckpt = NULL;
+ return -ENOMEM;
+ }
+
+ set_task_ioprio(cprc->f2fs_issue_ckpt, DEFAULT_CHECKPOINT_IOPRIO);
+
+ return 0;
+}
+
+void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (cprc->f2fs_issue_ckpt) {
+ struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
+
+ cprc->f2fs_issue_ckpt = NULL;
+ kthread_stop(ckpt_task);
+
+ flush_remained_ckpt_reqs(sbi, NULL);
+ }
+}
+
+void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ atomic_set(&cprc->issued_ckpt, 0);
+ atomic_set(&cprc->total_ckpt, 0);
+ atomic_set(&cprc->queued_ckpt, 0);
+ init_waitqueue_head(&cprc->ckpt_wait_queue);
+ init_llist_head(&cprc->issue_list);
+ spin_lock_init(&cprc->stat_lock);
+}