summaryrefslogtreecommitdiff
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2015-10-12 17:05:59 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2015-10-12 14:03:15 -0700
commit26879fb101f28c554294eaf25ac7817a2825b180 (patch)
tree06eef8e5fc29c7dd5638cf2f78cd0bcc46575d79 /fs/f2fs/node.c
parent2b947003fa98d5a39f3b21214380d0b1daf750b5 (diff)
f2fs: support lower priority asynchronous readahead in ra_meta_pages
Now, we use ra_meta_pages to reads continuous physical blocks as much as possible to improve performance of following reads. However, ra_meta_pages uses a synchronous readahead approach by submitting bio with READ, as READ is with high priority, it can not be used in the case of preloading blocks, and it's not sure when these RAed pages will be used. This patch supports asynchronous readahead in ra_meta_pages by tagging bio with READA flag in order to allow preloading. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c61dfb695308..ad98e35f3fcb 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1529,7 +1529,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
return;
/* readahead nat pages to be scanned */
- ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+ META_NAT, true);
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1804,7 +1805,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
nrpages = min(last_offset - i, bio_blocks);
/* readahead node pages */
- ra_meta_pages(sbi, addr, nrpages, META_POR);
+ ra_meta_pages(sbi, addr, nrpages, META_POR, true);
for (idx = addr; idx < addr + nrpages; idx++) {
struct page *page = get_tmp_page(sbi, idx);