summaryrefslogtreecommitdiff
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c3554
1 files changed, 2634 insertions, 920 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b418aee09573..482a362f2625 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1,17 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/node.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/mpage.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
@@ -19,106 +16,223 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "xattr.h"
+#include "iostat.h"
#include <trace/events/f2fs.h>
+#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
+
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
+static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
-static void clear_node_page_dirty(struct page *page)
+static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct address_space *mapping = page->mapping;
- struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
- unsigned int long flags;
+ return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid;
+}
- if (PageDirty(page)) {
- spin_lock_irqsave(&mapping->tree_lock, flags);
- radix_tree_tag_clear(&mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
+/*
+ * Check whether the given nid is within node id range.
+ */
+int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ if (unlikely(is_invalid_nid(sbi, nid))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
+ __func__, nid);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return -EFSCORRUPTED;
+ }
+ return 0;
+}
- clear_page_dirty_for_io(page);
- dec_page_count(sbi, F2FS_DIRTY_NODES);
+bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct sysinfo val;
+ unsigned long avail_ram;
+ unsigned long mem_size = 0;
+ bool res = false;
+
+ if (!nm_i)
+ return true;
+
+ si_meminfo(&val);
+
+ /* only uses low memory */
+ avail_ram = val.totalram - val.totalhigh;
+
+ /*
+ * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
+ */
+ if (type == FREE_NIDS) {
+ mem_size = (nm_i->nid_cnt[FREE_NID] *
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ } else if (type == NAT_ENTRIES) {
+ mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
+ sizeof(struct nat_entry)) >> PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
+ } else if (type == DIRTY_DENTS) {
+ if (sbi->sb->s_bdi->wb.dirty_exceeded)
+ return false;
+ mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == INO_ENTRIES) {
+ int i;
+
+ for (i = 0; i < MAX_INO_ENTRY; i++)
+ mem_size += sbi->im[i].ino_num *
+ sizeof(struct ino_entry);
+ mem_size >>= PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
+ enum extent_type etype = type == READ_EXTENT_CACHE ?
+ EX_READ : EX_BLOCK_AGE;
+ struct extent_tree_info *eti = &sbi->extent_tree[etype];
+
+ mem_size = (atomic_read(&eti->total_ext_tree) *
+ sizeof(struct extent_tree) +
+ atomic_read(&eti->total_ext_node) *
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ } else if (type == DISCARD_CACHE) {
+ mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
+ sizeof(struct discard_cmd)) >> PAGE_SHIFT;
+ res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
+ } else if (type == COMPRESS_PAGE) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ unsigned long free_ram = val.freeram;
+
+ /*
+ * free memory is lower than watermark or cached page count
+ * exceed threshold, deny caching compress page.
+ */
+ res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
+ (COMPRESS_MAPPING(sbi)->nrpages <
+ free_ram * sbi->compress_percent / 100);
+#else
+ res = false;
+#endif
+ } else {
+ if (!sbi->sb->s_bdi->wb.dirty_exceeded)
+ return true;
+ }
+ return res;
+}
+
+static void clear_node_folio_dirty(struct folio *folio)
+{
+ if (folio_test_dirty(folio)) {
+ f2fs_clear_page_cache_dirty_tag(folio);
+ folio_clear_dirty_for_io(folio);
+ dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
}
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
}
-static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- pgoff_t index = current_nat_addr(sbi, nid);
- return get_meta_page(sbi, index);
+ return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
}
-static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct page *src_page;
- struct page *dst_page;
- pgoff_t src_off;
+ struct folio *src_folio;
+ struct folio *dst_folio;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
struct f2fs_nm_info *nm_i = NM_I(sbi);
- src_off = current_nat_addr(sbi, nid);
- dst_off = next_nat_addr(sbi, src_off);
+ dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = get_meta_page(sbi, src_off);
+ src_folio = get_current_nat_folio(sbi, nid);
+ if (IS_ERR(src_folio))
+ return src_folio;
+ dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
+ f2fs_bug_on(sbi, folio_test_dirty(src_folio));
+
+ src_addr = folio_address(src_folio);
+ dst_addr = folio_address(dst_folio);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
+ folio_mark_dirty(dst_folio);
+ f2fs_folio_put(src_folio, true);
- /* Dirty src_page means that it is already the new target NAT page. */
- if (PageDirty(src_page))
- return src_page;
+ set_to_next_nat(nm_i, nid);
- dst_page = grab_meta_page(sbi, dst_off);
+ return dst_folio;
+}
- src_addr = page_address(src_page);
- dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
- set_page_dirty(dst_page);
- f2fs_put_page(src_page, 1);
+static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
+ nid_t nid, bool no_fail)
+{
+ struct nat_entry *new;
- set_to_next_nat(nm_i, nid);
+ new = f2fs_kmem_cache_alloc(nat_entry_slab,
+ GFP_F2FS_ZERO, no_fail, sbi);
+ if (new) {
+ nat_set_nid(new, nid);
+ nat_reset_flag(new);
+ }
+ return new;
+}
- return dst_page;
+static void __free_nat_entry(struct nat_entry *e)
+{
+ kmem_cache_free(nat_entry_slab, e);
}
-/*
- * Readahead NAT pages
- */
-static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
+/* must be locked by nat_tree_lock */
+static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty)
{
- struct address_space *mapping = sbi->meta_inode->i_mapping;
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct blk_plug plug;
- struct page *page;
- pgoff_t index;
- int i;
+ if (no_fail)
+ f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
+ else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
+ return NULL;
- blk_start_plug(&plug);
+ if (raw_ne)
+ node_info_from_raw_nat(&ne->ni, raw_ne);
- for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
- if (nid >= nm_i->max_nid)
- nid = 0;
- index = current_nat_addr(sbi, nid);
+ if (init_dirty) {
+ INIT_LIST_HEAD(&ne->list);
+ nm_i->nat_cnt[TOTAL_NAT]++;
+ return ne;
+ }
- page = grab_cache_page(mapping, index);
- if (!page)
- continue;
- if (PageUptodate(page)) {
- f2fs_put_page(page, 1);
- continue;
- }
- if (f2fs_readpage(sbi, page, index, READ))
- continue;
+ spin_lock(&nm_i->nat_list_lock);
+ list_add_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
- f2fs_put_page(page, 0);
- }
- blk_finish_plug(&plug);
+ nm_i->nat_cnt[TOTAL_NAT]++;
+ nm_i->nat_cnt[RECLAIMABLE_NAT]++;
+ return ne;
}
-static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
+static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty)
{
- return radix_tree_lookup(&nm_i->nat_root, n);
+ struct nat_entry *ne;
+
+ ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+ /*
+ * for recent accessed nat entry which will not be dirtied soon
+ * later, move it to tail of lru list.
+ */
+ if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) {
+ spin_lock(&nm_i->nat_list_lock);
+ if (!list_empty(&ne->list))
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+ }
+
+ return ne;
}
static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -129,198 +243,479 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
- list_del(&e->list);
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
- nm_i->nat_cnt--;
- kmem_cache_free(nat_entry_slab, e);
+ nm_i->nat_cnt[TOTAL_NAT]--;
+ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
+ __free_nat_entry(e);
}
-int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne)
{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct nat_entry *e;
- int is_cp = 1;
+ nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
+ struct nat_entry_set *head;
+
+ head = radix_tree_lookup(&nm_i->nat_set_root, set);
+ if (!head) {
+ head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
+ GFP_NOFS, true, NULL);
+
+ INIT_LIST_HEAD(&head->entry_list);
+ INIT_LIST_HEAD(&head->set_list);
+ head->set = set;
+ head->entry_cnt = 0;
+ f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
+ }
+ return head;
+}
- read_lock(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
- if (e && !e->checkpointed)
- is_cp = 0;
- read_unlock(&nm_i->nat_tree_lock);
- return is_cp;
+static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne, bool init_dirty)
+{
+ struct nat_entry_set *head;
+ bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
+
+ if (!new_ne)
+ head = __grab_nat_entry_set(nm_i, ne);
+
+ /*
+ * update entry_cnt in below condition:
+ * 1. update NEW_ADDR to valid block address;
+ * 2. update old block address to new one;
+ */
+ if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
+ !get_nat_flag(ne, IS_DIRTY)))
+ head->entry_cnt++;
+
+ set_nat_flag(ne, IS_PREALLOC, new_ne);
+
+ if (get_nat_flag(ne, IS_DIRTY))
+ goto refresh_list;
+
+ nm_i->nat_cnt[DIRTY_NAT]++;
+ if (!init_dirty)
+ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
+ set_nat_flag(ne, IS_DIRTY, true);
+refresh_list:
+ spin_lock(&nm_i->nat_list_lock);
+ if (new_ne)
+ list_del_init(&ne->list);
+ else
+ list_move_tail(&ne->list, &head->entry_list);
+ spin_unlock(&nm_i->nat_list_lock);
}
-static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
+static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+ struct nat_entry_set *set, struct nat_entry *ne)
{
- struct nat_entry *new;
+ spin_lock(&nm_i->nat_list_lock);
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
+ set_nat_flag(ne, IS_DIRTY, false);
+ set->entry_cnt--;
+ nm_i->nat_cnt[DIRTY_NAT]--;
+ nm_i->nat_cnt[RECLAIMABLE_NAT]++;
+}
- new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
- if (!new)
- return NULL;
- if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
- kmem_cache_free(nat_entry_slab, new);
- return NULL;
+static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
+ nid_t start, unsigned int nr, struct nat_entry_set **ep)
+{
+ return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
+ start, nr);
+}
+
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
+{
+ return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ spin_lock_init(&sbi->fsync_node_lock);
+ INIT_LIST_HEAD(&sbi->fsync_node_list);
+ sbi->fsync_seg_id = 0;
+ sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+ struct folio *folio)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+ unsigned int seq_id;
+
+ fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
+ GFP_NOFS, true, NULL);
+
+ folio_get(folio);
+ fn->folio = folio;
+ INIT_LIST_HEAD(&fn->list);
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_add_tail(&fn->list, &sbi->fsync_node_list);
+ fn->seq_id = sbi->fsync_seg_id++;
+ seq_id = fn->seq_id;
+ sbi->fsync_node_num++;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+ return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+ if (fn->folio == folio) {
+ list_del(&fn->list);
+ sbi->fsync_node_num--;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ kmem_cache_free(fsync_node_entry_slab, fn);
+ folio_put(folio);
+ return;
+ }
}
- memset(new, 0, sizeof(struct nat_entry));
- nat_set_nid(new, nid);
- list_add_tail(&new->list, &nm_i->nat_entries);
- nm_i->nat_cnt++;
- return new;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ f2fs_bug_on(sbi, 1);
}
-static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
- struct f2fs_nat_entry *ne)
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ sbi->fsync_seg_id = 0;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
+int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
-retry:
- write_lock(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
- if (!e) {
- e = grab_nat_entry(nm_i, nid);
- if (!e) {
- write_unlock(&nm_i->nat_tree_lock);
- goto retry;
- }
- nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
- nat_set_ino(e, le32_to_cpu(ne->ino));
- nat_set_version(e, ne->version);
- e->checkpointed = true;
+ bool need = false;
+
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
+ if (e) {
+ if (!get_nat_flag(e, IS_CHECKPOINTED) &&
+ !get_nat_flag(e, HAS_FSYNCED_INODE))
+ need = true;
}
- write_unlock(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ return need;
+}
+
+bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *e;
+ bool is_cp = true;
+
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
+ if (e && !get_nat_flag(e, IS_CHECKPOINTED))
+ is_cp = false;
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ return is_cp;
+}
+
+bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *e;
+ bool need_update = true;
+
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, ino, false);
+ if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
+ (get_nat_flag(e, IS_CHECKPOINTED) ||
+ get_nat_flag(e, HAS_FSYNCED_INODE)))
+ need_update = false;
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ return need_update;
+}
+
+/* must be locked by nat_tree_lock */
+static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
+ struct f2fs_nat_entry *ne)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *new, *e;
+
+ /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
+ if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
+ return;
+
+ new = __alloc_nat_entry(sbi, nid, false);
+ if (!new)
+ return;
+
+ f2fs_down_write(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
+ if (!e)
+ e = __init_nat_entry(nm_i, new, ne, false, false);
+ else
+ f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+ nat_get_blkaddr(e) !=
+ le32_to_cpu(ne->block_addr) ||
+ nat_get_version(e) != ne->version);
+ f2fs_up_write(&nm_i->nat_tree_lock);
+ if (e != new)
+ __free_nat_entry(new);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
- block_t new_blkaddr)
+ block_t new_blkaddr, bool fsync_done)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
-retry:
- write_lock(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, ni->nid);
+ struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
+ bool init_dirty = false;
+
+ f2fs_down_write(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, ni->nid, true);
if (!e) {
- e = grab_nat_entry(nm_i, ni->nid);
- if (!e) {
- write_unlock(&nm_i->nat_tree_lock);
- goto retry;
- }
- e->ni = *ni;
- e->checkpointed = true;
- BUG_ON(ni->blk_addr == NEW_ADDR);
+ init_dirty = true;
+ e = __init_nat_entry(nm_i, new, NULL, true, true);
+ copy_node_info(&e->ni, ni);
+ f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
/*
* when nid is reallocated,
* previous nat entry can be remained in nat cache.
* So, reinitialize it with new information.
*/
- e->ni = *ni;
- BUG_ON(ni->blk_addr != NULL_ADDR);
+ copy_node_info(&e->ni, ni);
+ f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
}
-
- if (new_blkaddr == NEW_ADDR)
- e->checkpointed = false;
+ /* let's free early to reduce memory consumption */
+ if (e != new)
+ __free_nat_entry(new);
/* sanity check */
- BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
- BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
new_blkaddr == NULL_ADDR);
- BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
- nat_get_blkaddr(e) != NULL_ADDR &&
+ f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
- /* increament version no as node is removed */
+ /* increment version no as node is removed */
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
unsigned char version = nat_get_version(e);
+
nat_set_version(e, inc_node_version(version));
}
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- __set_nat_cache_dirty(nm_i, e);
- write_unlock(&nm_i->nat_tree_lock);
+ if (!__is_valid_data_blkaddr(new_blkaddr))
+ set_nat_flag(e, IS_CHECKPOINTED, false);
+ __set_nat_cache_dirty(nm_i, e, init_dirty);
+
+ /* update fsync_mark if its inode nat entry is still alive */
+ if (ni->nid != ni->ino)
+ e = __lookup_nat_cache(nm_i, ni->ino, false);
+ if (e) {
+ if (fsync_done && ni->nid == ni->ino)
+ set_nat_flag(e, HAS_FSYNCED_INODE, true);
+ set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
+ }
+ f2fs_up_write(&nm_i->nat_tree_lock);
}
-static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int nr = nr_shrink;
- if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
+ if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
return 0;
- write_lock(&nm_i->nat_tree_lock);
- while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+ spin_lock(&nm_i->nat_list_lock);
+ while (nr_shrink) {
struct nat_entry *ne;
+
+ if (list_empty(&nm_i->nat_entries))
+ break;
+
ne = list_first_entry(&nm_i->nat_entries,
struct nat_entry, list);
+ list_del(&ne->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
+
+ spin_lock(&nm_i->nat_list_lock);
}
- write_unlock(&nm_i->nat_tree_lock);
- return nr_shrink;
+ spin_unlock(&nm_i->nat_list_lock);
+
+ f2fs_up_write(&nm_i->nat_tree_lock);
+ return nr - nr_shrink;
}
-/*
- * This function returns always success
- */
-void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+ struct node_info *ni, bool checkpoint_context)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
+ pgoff_t index;
int i;
+ bool need_cache = true;
- memset(&ne, 0, sizeof(struct f2fs_nat_entry));
+ ni->flag = 0;
ni->nid = nid;
-
+retry:
/* Check nat cache */
- read_lock(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) {
+ need_cache = false;
+ goto sanity_check;
+ }
+ return 0;
}
- read_unlock(&nm_i->nat_tree_lock);
- if (e)
- return;
- /* Check current segment summary */
- mutex_lock(&curseg->curseg_mutex);
- i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+ /*
+ * Check current segment summary by trying to grab journal_rwsem first.
+ * This sem is on the critical path on the checkpoint requiring the above
+ * nat_tree_lock. Therefore, we should retry, if we failed to grab here
+ * while not bothering checkpoint.
+ */
+ if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
+ down_read(&curseg->journal_rwsem);
+ } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
+ !down_read_trylock(&curseg->journal_rwsem)) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ goto retry;
+ }
+
+ i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
- ne = nat_in_journal(sum, i);
+ ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
}
- mutex_unlock(&curseg->curseg_mutex);
- if (i >= 0)
- goto cache;
+ up_read(&curseg->journal_rwsem);
+ if (i >= 0) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ goto sanity_check;
+ }
/* Fill node_info from nat page */
- page = get_current_nat_page(sbi, start_nid);
- nat_blk = (struct f2fs_nat_block *)page_address(page);
+ index = current_nat_addr(sbi, nid);
+ f2fs_up_read(&nm_i->nat_tree_lock);
+
+ folio = f2fs_get_meta_folio(sbi, index);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ nat_blk = folio_address(folio);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
- f2fs_put_page(page, 1);
-cache:
+ f2fs_folio_put(folio, true);
+sanity_check:
+ if (__is_valid_data_blkaddr(ni->blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni->blk_addr,
+ DATA_GENERIC_ENHANCE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err_ratelimited(sbi,
+ "f2fs_get_node_info of %pS: inconsistent nat entry, "
+ "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
+ __builtin_return_address(0),
+ ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ return -EFSCORRUPTED;
+ }
+
/* cache nat entry */
- cache_nat_entry(NM_I(sbi), nid, &ne);
+ if (need_cache)
+ cache_nat_entry(sbi, nid, &ne);
+ return 0;
+}
+
+/*
+ * readahead MAX_RA_NODE number of node pages.
+ */
+static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
+{
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
+ struct blk_plug plug;
+ int i, end;
+ nid_t nid;
+
+ blk_start_plug(&plug);
+
+ /* Then, try readahead for siblings of the desired node */
+ end = start + n;
+ end = min(end, (int)NIDS_PER_BLOCK);
+ for (i = start; i < end; i++) {
+ nid = get_nid(parent, i, false);
+ f2fs_ra_node_page(sbi, nid);
+ }
+
+ blk_finish_plug(&plug);
+}
+
+pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+ const long direct_index = ADDRS_PER_INODE(dn->inode);
+ const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
+ const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
+ unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
+ int cur_level = dn->cur_level;
+ int max_level = dn->max_level;
+ pgoff_t base = 0;
+
+ if (!dn->max_level)
+ return pgofs + 1;
+
+ while (max_level-- > cur_level)
+ skipped_unit *= NIDS_PER_BLOCK;
+
+ switch (dn->max_level) {
+ case 3:
+ base += 2 * indirect_blks;
+ fallthrough;
+ case 2:
+ base += 2 * direct_blks;
+ fallthrough;
+ case 1:
+ base += direct_index;
+ break;
+ default:
+ f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+ }
+
+ return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
}
/*
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-static int get_node_path(long block, int offset[4], unsigned int noffset[4])
+static int get_node_path(struct inode *inode, long block,
+ int offset[4], unsigned int noffset[4])
{
- const long direct_index = ADDRS_PER_INODE;
- const long direct_blks = ADDRS_PER_BLOCK;
+ const long direct_index = ADDRS_PER_INODE(inode);
+ const long direct_blks = ADDRS_PER_BLOCK(inode);
const long dptrs_per_blk = NIDS_PER_BLOCK;
- const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
int n = 0;
int level = 0;
@@ -382,172 +777,257 @@ static int get_node_path(long block, int offset[4], unsigned int noffset[4])
level = 3;
goto got;
} else {
- BUG();
+ return -E2BIG;
}
got:
return level;
}
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
+
/*
* Caller should call f2fs_put_dnode(dn).
- * Also, it should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op() only if ro is not set RDONLY_NODE.
- * In the case of RDONLY_NODE, we don't need to care about mutex.
+ * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
*/
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
- struct page *npage[4];
- struct page *parent;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ struct folio *nfolio[4];
+ struct folio *parent = NULL;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
- int level, i;
+ int level, i = 0;
int err = 0;
- level = get_node_path(index, offset, noffset);
+ level = get_node_path(dn->inode, index, offset, noffset);
+ if (level < 0)
+ return level;
nids[0] = dn->inode->i_ino;
- npage[0] = dn->inode_page;
- if (!npage[0]) {
- npage[0] = get_node_page(sbi, nids[0]);
- if (IS_ERR(npage[0]))
- return PTR_ERR(npage[0]);
+ if (!dn->inode_folio) {
+ nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
+ if (IS_ERR(nfolio[0]))
+ return PTR_ERR(nfolio[0]);
+ } else {
+ nfolio[0] = dn->inode_folio;
+ }
+
+ /* if inline_data is set, should not report any block indices */
+ if (f2fs_has_inline_data(dn->inode) && index) {
+ err = -ENOENT;
+ f2fs_folio_put(nfolio[0], true);
+ goto release_out;
}
- parent = npage[0];
+
+ parent = nfolio[0];
if (level != 0)
nids[1] = get_nid(parent, offset[0], true);
- dn->inode_page = npage[0];
- dn->inode_page_locked = true;
+ dn->inode_folio = nfolio[0];
+ dn->inode_folio_locked = true;
/* get indirect or direct nodes */
for (i = 1; i <= level; i++) {
bool done = false;
+ if (nids[i] && nids[i] == dn->inode->i_ino) {
+ err = -EFSCORRUPTED;
+ f2fs_err_ratelimited(sbi,
+ "inode mapping table is corrupted, run fsck to fix it, "
+ "ino:%lu, nid:%u, level:%d, offset:%d",
+ dn->inode->i_ino, nids[i], level, offset[level]);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto release_pages;
+ }
+
if (!nids[i] && mode == ALLOC_NODE) {
/* alloc new node */
- if (!alloc_nid(sbi, &(nids[i]))) {
+ if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
err = -ENOSPC;
goto release_pages;
}
dn->nid = nids[i];
- npage[i] = new_node_page(dn, noffset[i], NULL);
- if (IS_ERR(npage[i])) {
- alloc_nid_failed(sbi, nids[i]);
- err = PTR_ERR(npage[i]);
+ nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
+ if (IS_ERR(nfolio[i])) {
+ f2fs_alloc_nid_failed(sbi, nids[i]);
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
set_nid(parent, offset[i - 1], nids[i], i == 1);
- alloc_nid_done(sbi, nids[i]);
+ f2fs_alloc_nid_done(sbi, nids[i]);
done = true;
} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
- npage[i] = get_node_page_ra(parent, offset[i - 1]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
+ nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
done = true;
}
if (i == 1) {
- dn->inode_page_locked = false;
- unlock_page(parent);
+ dn->inode_folio_locked = false;
+ folio_unlock(parent);
} else {
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
}
if (!done) {
- npage[i] = get_node_page(sbi, nids[i]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
- f2fs_put_page(npage[0], 0);
+ nfolio[i] = f2fs_get_node_folio(sbi, nids[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
+ f2fs_folio_put(nfolio[0], false);
goto release_out;
}
}
if (i < level) {
- parent = npage[i];
+ parent = nfolio[i];
nids[i + 1] = get_nid(parent, offset[i], false);
}
}
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
- dn->node_page = npage[level];
- dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->node_folio = nfolio[level];
+ dn->data_blkaddr = f2fs_data_blkaddr(dn);
+
+ if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+ f2fs_sb_has_readonly(sbi)) {
+ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ pgoff_t fofs = index;
+ unsigned int c_len;
+ block_t blkaddr;
+
+ /* should align fofs and ofs_in_node to cluster_size */
+ if (fofs % cluster_size) {
+ fofs = round_down(fofs, cluster_size);
+ ofs_in_node = round_down(ofs_in_node, cluster_size);
+ }
+
+ c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
+ if (!c_len)
+ goto out;
+
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
+ if (blkaddr == COMPRESS_ADDR)
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ ofs_in_node + 1);
+
+ f2fs_update_read_extent_tree_range_compressed(dn->inode,
+ fofs, blkaddr, cluster_size, c_len);
+ }
+out:
return 0;
release_pages:
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
if (i > 1)
- f2fs_put_page(npage[0], 0);
+ f2fs_folio_put(nfolio[0], false);
release_out:
- dn->inode_page = NULL;
- dn->node_page = NULL;
+ dn->inode_folio = NULL;
+ dn->node_folio = NULL;
+ if (err == -ENOENT) {
+ dn->cur_level = i;
+ dn->max_level = level;
+ dn->ofs_in_node = offset[level];
+ }
return err;
}
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
+ int err;
+ pgoff_t index;
- get_node_info(sbi, dn->nid, &ni);
- if (dn->inode->i_blocks == 0) {
- BUG_ON(ni.blk_addr != NULL_ADDR);
- goto invalidate;
+ err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
+ if (err)
+ return err;
+
+ if (ni.blk_addr != NEW_ADDR &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
+ f2fs_err_ratelimited(sbi,
+ "nat entry is corrupted, run fsck to fix it, ino:%u, "
+ "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ return -EFSCORRUPTED;
}
- BUG_ON(ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
- invalidate_blocks(sbi, ni.blk_addr);
- dec_valid_node_count(sbi, dn->inode, 1);
- set_node_addr(sbi, &ni, NULL_ADDR);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
+ dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
+ set_node_addr(sbi, &ni, NULL_ADDR, false);
if (dn->nid == dn->inode->i_ino) {
- remove_orphan_inode(sbi, dn->nid);
+ f2fs_remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
- } else {
- sync_inode_page(dn);
+ f2fs_inode_synced(dn->inode);
}
-invalidate:
- clear_node_page_dirty(dn->node_page);
- F2FS_SET_SB_DIRT(sbi);
- f2fs_put_page(dn->node_page, 1);
- dn->node_page = NULL;
+ clear_node_folio_dirty(dn->node_folio);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+ index = dn->node_folio->index;
+ f2fs_folio_put(dn->node_folio, true);
+
+ invalidate_mapping_pages(NODE_MAPPING(sbi),
+ index, index);
+
+ dn->node_folio = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+ return 0;
}
static int truncate_dnode(struct dnode_of_data *dn)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
- struct page *page;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ struct folio *folio;
+ int err;
if (dn->nid == 0)
return 1;
/* get direct node */
- page = get_node_page(sbi, dn->nid);
- if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
+ folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE);
+ if (PTR_ERR(folio) == -ENOENT)
return 1;
- else if (IS_ERR(page))
- return PTR_ERR(page);
+ else if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
+ f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
+ dn->inode->i_ino, dn->nid, ino_of_node(folio));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
+ f2fs_folio_put(folio, true);
+ return -EFSCORRUPTED;
+ }
/* Make dnode_of_data for parameter */
- dn->node_page = page;
+ dn->node_folio = folio;
dn->ofs_in_node = 0;
- truncate_data_blocks(dn);
- truncate_node(dn);
+ f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
+ err = truncate_node(dn);
+ if (err) {
+ f2fs_folio_put(folio, true);
+ return err;
+ }
+
return 1;
}
static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
int ofs, int depth)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
struct dnode_of_data rdn = *dn;
- struct page *page;
+ struct folio *folio;
struct f2fs_node *rn;
nid_t child_nid;
unsigned int child_nofs;
@@ -559,13 +1039,16 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
- page = get_node_page(sbi, dn->nid);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
- return PTR_ERR(page);
+ folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid,
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- rn = (struct f2fs_node *)page_address(page);
+ f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
+
+ rn = F2FS_NODE(folio);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -575,7 +1058,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- set_nid(page, i, 0, false);
+ if (set_nid(folio, i, 0, false))
+ dn->node_changed = true;
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
@@ -588,7 +1072,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- set_nid(page, i, 0, false);
+ if (set_nid(folio, i, 0, false))
+ dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
@@ -599,17 +1084,19 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
if (!ofs) {
/* remove current indirect node */
- dn->node_page = page;
- truncate_node(dn);
+ dn->node_folio = folio;
+ ret = truncate_node(dn);
+ if (ret)
+ goto out_err;
freed++;
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
trace_f2fs_truncate_nodes_exit(dn->inode, freed);
return freed;
out_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
trace_f2fs_truncate_nodes_exit(dn->inode, ret);
return ret;
}
@@ -617,54 +1104,60 @@ out_err:
static int truncate_partial_nodes(struct dnode_of_data *dn,
struct f2fs_inode *ri, int *offset, int depth)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
- struct page *pages[2];
+ struct folio *folios[2];
nid_t nid[3];
nid_t child_nid;
int err = 0;
int i;
int idx = depth - 2;
- nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ nid[0] = get_nid(dn->inode_folio, offset[0], true);
if (!nid[0])
return 0;
/* get indirect nodes in the path */
- for (i = 0; i < depth - 1; i++) {
- /* refernece count'll be increased */
- pages[i] = get_node_page(sbi, nid[i]);
- if (IS_ERR(pages[i])) {
- depth = i + 1;
- err = PTR_ERR(pages[i]);
+ for (i = 0; i < idx + 1; i++) {
+ /* reference count'll be increased */
+ folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folios[i])) {
+ err = PTR_ERR(folios[i]);
+ idx = i - 1;
goto fail;
}
- nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
+ nid[i + 1] = get_nid(folios[i], offset[i + 1], false);
}
+ f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
+
/* free direct nodes linked to a partial indirect node */
- for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
- child_nid = get_nid(pages[idx], i, false);
+ for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
+ child_nid = get_nid(folios[idx], i, false);
if (!child_nid)
continue;
dn->nid = child_nid;
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- set_nid(pages[idx], i, 0, false);
+ if (set_nid(folios[idx], i, 0, false))
+ dn->node_changed = true;
}
- if (offset[depth - 1] == 0) {
- dn->node_page = pages[idx];
+ if (offset[idx + 1] == 0) {
+ dn->node_folio = folios[idx];
dn->nid = nid[idx];
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ goto fail;
} else {
- f2fs_put_page(pages[idx], 1);
+ f2fs_folio_put(folios[idx], true);
}
offset[idx]++;
- offset[depth - 1] = 0;
+ offset[idx + 1] = 0;
+ idx--;
fail:
- for (i = depth - 3; i >= 0; i--)
- f2fs_put_page(pages[i], 1);
+ for (i = idx; i >= 0; i--)
+ f2fs_folio_put(folios[i], true);
trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
@@ -674,31 +1167,41 @@ fail:
/*
* All the block addresses of data and nodes should be nullified.
*/
-int truncate_inode_blocks(struct inode *inode, pgoff_t from)
+int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct address_space *node_mapping = sbi->node_inode->i_mapping;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err = 0, cont = 1;
int level, offset[4], noffset[4];
unsigned int nofs = 0;
- struct f2fs_node *rn;
+ struct f2fs_inode *ri;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio;
trace_f2fs_truncate_inode_blocks_enter(inode, from);
- level = get_node_path(from, offset, noffset);
-restart:
- page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
- return PTR_ERR(page);
+ level = get_node_path(inode, from, offset, noffset);
+ if (level <= 0) {
+ if (!level) {
+ level = -EFSCORRUPTED;
+ f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
+ __func__, inode->i_ino,
+ from, ADDRS_PER_INODE(inode));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
+ trace_f2fs_truncate_inode_blocks_exit(inode, level);
+ return level;
+ }
+
+ folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- set_new_dnode(&dn, inode, page, NULL, 0);
- unlock_page(page);
+ set_new_dnode(&dn, inode, folio, NULL, 0);
+ folio_unlock(folio);
- rn = page_address(page);
+ ri = F2FS_INODE(folio);
switch (level) {
case 0:
case 1:
@@ -708,7 +1211,7 @@ restart:
nofs = noffset[1];
if (!offset[level - 1])
goto skip_partial;
- err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+ err = truncate_partial_nodes(&dn, ri, offset, level);
if (err < 0 && err != -ENOENT)
goto fail;
nofs += 1 + NIDS_PER_BLOCK;
@@ -717,7 +1220,7 @@ restart:
nofs = 5 + 2 * NIDS_PER_BLOCK;
if (!offset[level - 1])
goto skip_partial;
- err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+ err = truncate_partial_nodes(&dn, ri, offset, level);
if (err < 0 && err != -ENOENT)
goto fail;
break;
@@ -727,7 +1230,7 @@ restart:
skip_partial:
while (cont) {
- dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ dn.nid = get_nid(folio, offset[0], true);
switch (offset[0]) {
case NODE_DIR1_BLOCK:
case NODE_DIR2_BLOCK:
@@ -747,305 +1250,833 @@ skip_partial:
default:
BUG();
}
- if (err < 0 && err != -ENOENT)
+ if (err == -ENOENT) {
+ set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ f2fs_err_ratelimited(sbi,
+ "truncate node fail, ino:%lu, nid:%u, "
+ "offset[0]:%d, offset[1]:%d, nofs:%d",
+ inode->i_ino, dn.nid, offset[0],
+ offset[1], nofs);
+ err = 0;
+ }
+ if (err < 0)
goto fail;
- if (offset[1] == 0 &&
- rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
- lock_page(page);
- if (page->mapping != node_mapping) {
- f2fs_put_page(page, 1);
- goto restart;
- }
- wait_on_page_writeback(page);
- rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
- set_page_dirty(page);
- unlock_page(page);
+ if (offset[1] == 0 && get_nid(folio, offset[0], true)) {
+ folio_lock(folio);
+ BUG_ON(!is_node_folio(folio));
+ set_nid(folio, offset[0], 0, true);
+ folio_unlock(folio);
}
offset[1] = 0;
offset[0]++;
nofs += err;
}
fail:
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
trace_f2fs_truncate_inode_blocks_exit(inode, err);
return err > 0 ? 0 : err;
}
+/* caller must lock inode page */
+int f2fs_truncate_xattr_node(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ nid_t nid = F2FS_I(inode)->i_xattr_nid;
+ struct dnode_of_data dn;
+ struct folio *nfolio;
+ int err;
+
+ if (!nid)
+ return 0;
+
+ nfolio = f2fs_get_xnode_folio(sbi, nid);
+ if (IS_ERR(nfolio))
+ return PTR_ERR(nfolio);
+
+ set_new_dnode(&dn, inode, NULL, nfolio, nid);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_folio_put(nfolio, true);
+ return err;
+ }
+
+ f2fs_i_xnid_write(inode, 0);
+
+ return 0;
+}
+
/*
- * Caller should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op().
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
*/
-int remove_inode_page(struct inode *inode)
+int f2fs_remove_inode_page(struct inode *inode)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct page *page;
- nid_t ino = inode->i_ino;
struct dnode_of_data dn;
+ int err;
- page = get_node_page(sbi, ino);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
+ err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+ if (err)
+ return err;
- if (F2FS_I(inode)->i_xattr_nid) {
- nid_t nid = F2FS_I(inode)->i_xattr_nid;
- struct page *npage = get_node_page(sbi, nid);
+ err = f2fs_truncate_xattr_node(inode);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
- if (IS_ERR(npage))
- return PTR_ERR(npage);
+ /* remove potential inline_data blocks */
+ if (!IS_DEVICE_ALIASING(inode) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ f2fs_truncate_data_blocks_range(&dn, 1);
- F2FS_I(inode)->i_xattr_nid = 0;
- set_new_dnode(&dn, inode, page, npage, nid);
- dn.inode_page_locked = 1;
- truncate_node(&dn);
+ /* 0 is possible, after f2fs_new_inode() has failed */
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ f2fs_put_dnode(&dn);
+ return -EIO;
}
- /* 0 is possible, after f2fs_new_inode() is failed */
- BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
- set_new_dnode(&dn, inode, page, page, ino);
- truncate_node(&dn);
+ if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
+ f2fs_warn(F2FS_I_SB(inode),
+ "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
+ inode->i_ino, (unsigned long long)inode->i_blocks);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ }
+
+ /* will put inode & node pages */
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
return 0;
}
-struct page *new_inode_page(struct inode *inode, const struct qstr *name)
+struct folio *f2fs_new_inode_folio(struct inode *inode)
{
struct dnode_of_data dn;
/* allocate inode page for new inode */
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
- /* caller should f2fs_put_page(page, 1); */
- return new_node_page(&dn, 0, NULL);
+ /* caller should f2fs_folio_put(folio, true); */
+ return f2fs_new_node_folio(&dn, 0);
}
-struct page *new_node_page(struct dnode_of_data *dn,
- unsigned int ofs, struct page *ipage)
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
- struct address_space *mapping = sbi->node_inode->i_mapping;
- struct node_info old_ni, new_ni;
- struct page *page;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ struct node_info new_ni;
+ struct folio *folio;
int err;
- if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = grab_cache_page(mapping, dn->nid);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- get_node_info(sbi, dn->nid, &old_ni);
-
- SetPageUptodate(page);
- fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
+ if (IS_ERR(folio))
+ return folio;
- /* Reinitialize old_ni with new node page */
- BUG_ON(old_ni.blk_addr != NULL_ADDR);
- new_ni = old_ni;
- new_ni.ino = dn->inode->i_ino;
+ if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
+ goto fail;
- if (!inc_valid_node_count(sbi, dn->inode, 1)) {
- err = -ENOSPC;
+#ifdef CONFIG_F2FS_CHECK_FS
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
+ if (err) {
+ dec_valid_node_count(sbi, dn->inode, !ofs);
goto fail;
}
- set_node_addr(sbi, &new_ni, NEW_ADDR);
- set_cold_node(dn->inode, page);
+ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ err = -EFSCORRUPTED;
+ dec_valid_node_count(sbi, dn->inode, !ofs);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn_ratelimited(sbi,
+ "f2fs_new_node_folio: inconsistent nat entry, "
+ "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
+ new_ni.ino, new_ni.nid, new_ni.blk_addr,
+ new_ni.version, new_ni.flag);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ goto fail;
+ }
+#endif
+ new_ni.nid = dn->nid;
+ new_ni.ino = dn->inode->i_ino;
+ new_ni.blk_addr = NULL_ADDR;
+ new_ni.flag = 0;
+ new_ni.version = 0;
+ set_node_addr(sbi, &new_ni, NEW_ADDR, false);
+
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
+ fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true);
+ set_cold_node(folio, S_ISDIR(dn->inode->i_mode));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ if (folio_mark_dirty(folio))
+ dn->node_changed = true;
+
+ if (f2fs_has_xattr_block(ofs))
+ f2fs_i_xnid_write(dn->inode, dn->nid);
- dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
- set_page_dirty(page);
if (ofs == 0)
inc_valid_inode_count(sbi);
-
- return page;
-
+ return folio;
fail:
- clear_node_page_dirty(page);
- f2fs_put_page(page, 1);
+ clear_node_folio_dirty(folio);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
/*
* Caller should do after getting the following values.
- * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE: f2fs_put_page(page, 1)
- * error: nothing
+ * 0: f2fs_folio_put(folio, false)
+ * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
*/
-static int read_node_page(struct page *page, int type)
+static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
{
- struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
struct node_info ni;
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .type = NODE,
+ .op = REQ_OP_READ,
+ .op_flags = op_flags,
+ .folio = folio,
+ .encrypted_page = NULL,
+ };
+ int err;
- get_node_info(sbi, page->index, &ni);
+ if (folio_test_uptodate(folio)) {
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
+ folio_clear_uptodate(folio);
+ return -EFSBADCRC;
+ }
+ return LOCKED_PAGE;
+ }
- if (ni.blk_addr == NULL_ADDR) {
- f2fs_put_page(page, 1);
+ err = f2fs_get_node_info(sbi, folio->index, &ni, false);
+ if (err)
+ return err;
+
+ /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
+ if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
+ folio_clear_uptodate(folio);
return -ENOENT;
}
- if (PageUptodate(page))
- return LOCKED_PAGE;
+ fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
- return f2fs_readpage(sbi, page, ni.blk_addr, type);
+ err = f2fs_submit_page_bio(&fio);
+
+ if (!err)
+ f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
+
+ return err;
}
/*
* Readahead a node page
*/
-void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct address_space *mapping = sbi->node_inode->i_mapping;
- struct page *apage;
+ struct folio *afolio;
int err;
- apage = find_get_page(mapping, nid);
- if (apage && PageUptodate(apage)) {
- f2fs_put_page(apage, 0);
+ if (!nid)
+ return;
+ if (f2fs_check_nid_range(sbi, nid))
+ return;
+
+ afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
+ if (afolio)
return;
- }
- f2fs_put_page(apage, 0);
- apage = grab_cache_page(mapping, nid);
- if (!apage)
+ afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(afolio))
return;
- err = read_node_page(apage, READA);
- if (err == 0)
- f2fs_put_page(apage, 0);
- else if (err == LOCKED_PAGE)
- f2fs_put_page(apage, 1);
- return;
+ err = read_node_folio(afolio, REQ_RAHEAD);
+ f2fs_folio_put(afolio, err ? true : false);
}
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
+ struct folio *folio, pgoff_t nid,
+ enum node_type ntype)
{
- struct address_space *mapping = sbi->node_inode->i_mapping;
- struct page *page;
- int err;
-repeat:
- page = grab_cache_page(mapping, nid);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ if (unlikely(nid != nid_of_node(folio)))
+ goto out_err;
- err = read_node_page(page, READ_SYNC);
- if (err < 0)
- return ERR_PTR(err);
- else if (err == LOCKED_PAGE)
- goto got_it;
-
- lock_page(page);
- if (!PageUptodate(page)) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
- if (page->mapping != mapping) {
- f2fs_put_page(page, 1);
- goto repeat;
+ switch (ntype) {
+ case NODE_TYPE_INODE:
+ if (!IS_INODE(folio))
+ goto out_err;
+ break;
+ case NODE_TYPE_XATTR:
+ if (!f2fs_has_xattr_block(ofs_of_node(folio)))
+ goto out_err;
+ break;
+ case NODE_TYPE_NON_INODE:
+ if (IS_INODE(folio))
+ goto out_err;
+ break;
+ default:
+ break;
}
-got_it:
- BUG_ON(nid != nid_of_node(page));
- mark_page_accessed(page);
- return page;
+ if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))
+ goto out_err;
+ return 0;
+out_err:
+ f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
+ "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+ ntype, nid, nid_of_node(folio), ino_of_node(folio),
+ ofs_of_node(folio), cpver_of_node(folio),
+ next_blkaddr_of_node(folio));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
+ return -EFSCORRUPTED;
}
-/*
- * Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
- */
-struct page *get_node_page_ra(struct page *parent, int start)
+static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct folio *parent, int start, enum node_type ntype)
{
- struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
- struct address_space *mapping = sbi->node_inode->i_mapping;
- struct blk_plug plug;
- struct page *page;
- int err, i, end;
- nid_t nid;
+ struct folio *folio;
+ int err;
- /* First, try getting the desired direct node. */
- nid = get_nid(parent, start, false);
if (!nid)
return ERR_PTR(-ENOENT);
+ if (f2fs_check_nid_range(sbi, nid))
+ return ERR_PTR(-EINVAL);
repeat:
- page = grab_cache_page(mapping, nid);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(folio))
+ return folio;
- err = read_node_page(page, READ_SYNC);
+ err = read_node_folio(folio, 0);
if (err < 0)
- return ERR_PTR(err);
- else if (err == LOCKED_PAGE)
+ goto out_put_err;
+ if (err == LOCKED_PAGE)
goto page_hit;
- blk_start_plug(&plug);
+ if (parent)
+ f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
- /* Then, try readahead for siblings of the desired node */
- end = start + MAX_RA_NODE;
- end = min(end, NIDS_PER_BLOCK);
- for (i = start + 1; i < end; i++) {
- nid = get_nid(parent, i, false);
- if (!nid)
- continue;
- ra_node_page(sbi, nid);
+ folio_lock(folio);
+
+ if (unlikely(!is_node_folio(folio))) {
+ f2fs_folio_put(folio, true);
+ goto repeat;
}
- blk_finish_plug(&plug);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ err = -EIO;
+ goto out_put_err;
+ }
- lock_page(page);
- if (page->mapping != mapping) {
- f2fs_put_page(page, 1);
- goto repeat;
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
+ err = -EFSBADCRC;
+ goto out_err;
}
page_hit:
- if (!PageUptodate(page)) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
+ err = sanity_check_node_footer(sbi, folio, nid, ntype);
+ if (!err)
+ return folio;
+out_err:
+ folio_clear_uptodate(folio);
+out_put_err:
+ /* ENOENT comes from read_node_folio which is not an error. */
+ if (err != -ENOENT)
+ f2fs_handle_page_eio(sbi, folio, NODE);
+ f2fs_folio_put(folio, true);
+ return ERR_PTR(err);
+}
+
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ enum node_type node_type)
+{
+ return __get_node_folio(sbi, nid, NULL, 0, node_type);
+}
+
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
+{
+ return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
+}
+
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
+{
+ return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
+}
+
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
+{
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
+ nid_t nid = get_nid(parent, start, false);
+
+ return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
+}
+
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct inode *inode;
+ struct folio *folio;
+ int ret;
+
+ /* should flush inline_data before evict_inode */
+ inode = ilookup(sbi->sb, ino);
+ if (!inode)
+ return;
+
+ folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
+ FGP_LOCK|FGP_NOWAIT, 0);
+ if (IS_ERR(folio))
+ goto iput_out;
+
+ if (!folio_test_uptodate(folio))
+ goto folio_out;
+
+ if (!folio_test_dirty(folio))
+ goto folio_out;
+
+ if (!folio_clear_dirty_for_io(folio))
+ goto folio_out;
+
+ ret = f2fs_write_inline_data(inode, folio);
+ inode_dec_dirty_pages(inode);
+ f2fs_remove_dirty_inode(inode);
+ if (ret)
+ folio_mark_dirty(folio);
+folio_out:
+ f2fs_folio_put(folio, true);
+iput_out:
+ iput(inode);
+}
+
+static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ pgoff_t index;
+ struct folio_batch fbatch;
+ struct folio *last_folio = NULL;
+ int nr_folios;
+
+ folio_batch_init(&fbatch);
+ index = 0;
+
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
+ int i;
+
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
+ continue;
+ if (ino_of_node(folio) != ino)
+ continue;
+
+ folio_lock(folio);
+
+ if (unlikely(!is_node_folio(folio))) {
+continue_unlock:
+ folio_unlock(folio);
+ continue;
+ }
+ if (ino_of_node(folio) != ino)
+ goto continue_unlock;
+
+ if (!folio_test_dirty(folio)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ if (last_folio)
+ f2fs_folio_put(last_folio, false);
+
+ folio_get(folio);
+ last_folio = folio;
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
}
- mark_page_accessed(page);
- return page;
+ return last_folio;
}
-void sync_inode_page(struct dnode_of_data *dn)
+static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
+ struct writeback_control *wbc, bool do_balance,
+ enum iostat_type io_type, unsigned int *seq_id)
{
- if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
- update_inode(dn->inode, dn->node_page);
- } else if (dn->inode_page) {
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- update_inode(dn->inode, dn->inode_page);
- if (!dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
+ nid_t nid;
+ struct node_info ni;
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = ino_of_node(folio),
+ .type = NODE,
+ .op = REQ_OP_WRITE,
+ .op_flags = wbc_to_write_flags(wbc),
+ .folio = folio,
+ .encrypted_page = NULL,
+ .submitted = 0,
+ .io_type = io_type,
+ .io_wbc = wbc,
+ };
+ unsigned int seq;
+
+ trace_f2fs_writepage(folio, NODE);
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ /* keep node pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
+ folio_clear_uptodate(folio);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ folio_unlock(folio);
+ return true;
+ }
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto redirty_out;
+
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ wbc->sync_mode == WB_SYNC_NONE &&
+ IS_DNODE(folio) && is_cold_node(folio))
+ goto redirty_out;
+
+ /* get old block addr of this node page */
+ nid = nid_of_node(folio);
+ f2fs_bug_on(sbi, folio->index != nid);
+
+ if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
+ goto redirty_out;
+
+ f2fs_down_read(&sbi->node_write);
+
+ /* This page is already truncated */
+ if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ folio_clear_uptodate(folio);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ f2fs_up_read(&sbi->node_write);
+ folio_unlock(folio);
+ return true;
+ }
+
+ if (__is_valid_data_blkaddr(ni.blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
+ DATA_GENERIC_ENHANCE)) {
+ f2fs_up_read(&sbi->node_write);
+ goto redirty_out;
+ }
+
+ if (atomic && !test_opt(sbi, NOBARRIER))
+ fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
+
+ /* should add to global list before clearing PAGECACHE status */
+ if (f2fs_in_warm_node_list(sbi, folio)) {
+ seq = f2fs_add_fsync_node_entry(sbi, folio);
+ if (seq_id)
+ *seq_id = seq;
+ }
+
+ folio_start_writeback(folio);
+
+ fio.old_blkaddr = ni.blk_addr;
+ f2fs_do_write_node_page(nid, &fio);
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio));
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ f2fs_up_read(&sbi->node_write);
+
+ folio_unlock(folio);
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_submit_merged_write(sbi, NODE);
+ submitted = NULL;
+ }
+ if (submitted)
+ *submitted = fio.submitted;
+
+ if (do_balance)
+ f2fs_balance_fs(sbi, false);
+ return true;
+
+redirty_out:
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ return false;
+}
+
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
+{
+ int err = 0;
+
+ if (gc_type == FG_GC) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 1,
+ };
+
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
+
+ folio_mark_dirty(node_folio);
+
+ if (!folio_clear_dirty_for_io(node_folio)) {
+ err = -EAGAIN;
+ goto out_page;
+ }
+
+ if (!__write_node_folio(node_folio, false, NULL,
+ &wbc, false, FS_GC_NODE_IO, NULL))
+ err = -EAGAIN;
+ goto release_page;
} else {
- update_inode_page(dn->inode);
+ /* set page dirty and write it */
+ if (!folio_test_writeback(node_folio))
+ folio_mark_dirty(node_folio);
+ }
+out_page:
+ folio_unlock(node_folio);
+release_page:
+ f2fs_folio_put(node_folio, false);
+ return err;
+}
+
+int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id)
+{
+ pgoff_t index;
+ struct folio_batch fbatch;
+ int ret = 0;
+ struct folio *last_folio = NULL;
+ bool marked = false;
+ nid_t ino = inode->i_ino;
+ int nr_folios;
+ int nwritten = 0;
+
+ if (atomic) {
+ last_folio = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_folio))
+ return PTR_ERR_OR_ZERO(last_folio);
+ }
+retry:
+ folio_batch_init(&fbatch);
+ index = 0;
+
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
+ int i;
+
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+ bool submitted = false;
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
+ continue;
+ if (ino_of_node(folio) != ino)
+ continue;
+
+ folio_lock(folio);
+
+ if (unlikely(!is_node_folio(folio))) {
+continue_unlock:
+ folio_unlock(folio);
+ continue;
+ }
+ if (ino_of_node(folio) != ino)
+ goto continue_unlock;
+
+ if (!folio_test_dirty(folio) && folio != last_folio) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
+
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
+
+ if (!atomic || folio == last_folio) {
+ set_fsync_mark(folio, 1);
+ percpu_counter_inc(&sbi->rf_node_block_count);
+ if (IS_INODE(folio)) {
+ if (is_inode_flag_set(inode,
+ FI_DIRTY_INODE))
+ f2fs_update_inode(inode, folio);
+ set_dentry_mark(folio,
+ f2fs_need_dentry_mark(sbi, ino));
+ }
+ /* may be written by other thread */
+ if (!folio_test_dirty(folio))
+ folio_mark_dirty(folio);
+ }
+
+ if (!folio_clear_dirty_for_io(folio))
+ goto continue_unlock;
+
+ if (!__write_node_folio(folio, atomic &&
+ folio == last_folio,
+ &submitted, wbc, true,
+ FS_NODE_IO, seq_id)) {
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
+ }
+ if (submitted)
+ nwritten++;
+
+ if (folio == last_folio) {
+ f2fs_folio_put(folio, false);
+ folio_batch_release(&fbatch);
+ marked = true;
+ goto out;
+ }
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+ if (atomic && !marked) {
+ f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
+ ino, last_folio->index);
+ folio_lock(last_folio);
+ f2fs_folio_wait_writeback(last_folio, NODE, true, true);
+ folio_mark_dirty(last_folio);
+ folio_unlock(last_folio);
+ goto retry;
+ }
+out:
+ if (nwritten)
+ f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
+ return ret;
+}
+
+static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool clean;
+
+ if (inode->i_ino != ino)
+ return 0;
+
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ return 0;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ clean = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+
+ if (clean)
+ return 0;
+
+ inode = igrab(inode);
+ if (!inode)
+ return 0;
+ return 1;
+}
+
+static bool flush_dirty_inode(struct folio *folio)
+{
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
+ struct inode *inode;
+ nid_t ino = ino_of_node(folio);
+
+ inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
+ if (!inode)
+ return false;
+
+ f2fs_update_inode(inode, folio);
+ folio_unlock(folio);
+
+ iput(inode);
+ return true;
+}
+
+void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
+{
+ pgoff_t index = 0;
+ struct folio_batch fbatch;
+ int nr_folios;
+
+ folio_batch_init(&fbatch);
+
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
+ int i;
+
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ if (!IS_INODE(folio))
+ continue;
+
+ folio_lock(folio);
+
+ if (unlikely(!is_node_folio(folio)))
+ goto unlock;
+ if (!folio_test_dirty(folio))
+ goto unlock;
+
+ /* flush inline_data, if it's async context. */
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
+ continue;
+ }
+unlock:
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
}
}
-int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
- struct writeback_control *wbc)
+int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
+ struct writeback_control *wbc,
+ bool do_balance, enum iostat_type io_type)
{
- struct address_space *mapping = sbi->node_inode->i_mapping;
- pgoff_t index, end;
- struct pagevec pvec;
- int step = ino ? 2 : 0;
- int nwritten = 0, wrote = 0;
+ pgoff_t index;
+ struct folio_batch fbatch;
+ int step = 0;
+ int nwritten = 0;
+ int ret = 0;
+ int nr_folios, done = 0;
- pagevec_init(&pvec, 0);
+ folio_batch_init(&fbatch);
next_step:
index = 0;
- end = LONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
+ &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
+ int i;
+
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+ bool submitted = false;
+
+ /* give a priority to WB_SYNC threads */
+ if (atomic_read(&sbi->wb_sync_req[NODE]) &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
+ }
/*
* flushing sequence with step:
@@ -1053,58 +2084,68 @@ next_step:
* 1. dentry dnodes
* 2. file dnodes
*/
- if (step == 0 && IS_DNODE(page))
+ if (step == 0 && IS_DNODE(folio))
continue;
- if (step == 1 && (!IS_DNODE(page) ||
- is_cold_node(page)))
+ if (step == 1 && (!IS_DNODE(folio) ||
+ is_cold_node(folio)))
continue;
- if (step == 2 && (!IS_DNODE(page) ||
- !is_cold_node(page)))
+ if (step == 2 && (!IS_DNODE(folio) ||
+ !is_cold_node(folio)))
continue;
-
- /*
- * If an fsync mode,
- * we should not skip writing node pages.
- */
- if (ino && ino_of_node(page) == ino)
- lock_page(page);
- else if (!trylock_page(page))
+lock_node:
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
continue;
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (ino && ino_of_node(page) != ino)
- goto continue_unlock;
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (!clear_page_dirty_for_io(page))
+ /* flush inline_data/inode, if it's async context. */
+ if (!do_balance)
+ goto write_node;
+
+ /* flush inline_data */
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
+ goto lock_node;
+ }
+
+ /* flush dirty inode */
+ if (IS_INODE(folio) && flush_dirty_inode(folio))
+ goto lock_node;
+write_node:
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
+
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- /* called by fsync() */
- if (ino && IS_DNODE(page)) {
- int mark = !is_checkpointed_node(sbi, ino);
- set_fsync_mark(page, 1);
- if (IS_INODE(page))
- set_dentry_mark(page, mark);
- nwritten++;
- } else {
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
+
+ if (!__write_node_folio(folio, false, &submitted,
+ wbc, do_balance, io_type, NULL)) {
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
}
- mapping->a_ops->writepage(page, wbc);
- wrote++;
+ if (submitted)
+ nwritten++;
if (--wbc->nr_to_write == 0)
break;
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
if (wbc->nr_to_write == 0) {
@@ -1114,251 +2155,457 @@ continue_unlock:
}
if (step < 2) {
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ wbc->sync_mode == WB_SYNC_NONE && step == 1)
+ goto out;
step++;
goto next_step;
}
+out:
+ if (nwritten)
+ f2fs_submit_merged_write(sbi, NODE);
- if (wrote)
- f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
-
- return nwritten;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+ return ret;
}
-static int f2fs_write_node_page(struct page *page,
- struct writeback_control *wbc)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id)
{
- struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
- nid_t nid;
- block_t new_addr;
- struct node_info ni;
-
- wait_on_page_writeback(page);
+ struct fsync_node_entry *fn;
+ struct list_head *head = &sbi->fsync_node_list;
+ unsigned long flags;
+ unsigned int cur_seq_id = 0;
- /* get old block addr of this node page */
- nid = nid_of_node(page);
- BUG_ON(page->index != nid);
+ while (seq_id && cur_seq_id < seq_id) {
+ struct folio *folio;
- get_node_info(sbi, nid, &ni);
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ fn = list_first_entry(head, struct fsync_node_entry, list);
+ if (fn->seq_id > seq_id) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ cur_seq_id = fn->seq_id;
+ folio = fn->folio;
+ folio_get(folio);
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- /* This page is already truncated */
- if (ni.blk_addr == NULL_ADDR) {
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
- return 0;
- }
+ f2fs_folio_wait_writeback(folio, NODE, true, false);
- if (wbc->for_reclaim) {
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- wbc->pages_skipped++;
- set_page_dirty(page);
- return AOP_WRITEPAGE_ACTIVATE;
+ folio_put(folio);
}
- mutex_lock(&sbi->node_write);
- set_page_writeback(page);
- write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
- set_node_addr(sbi, &ni, new_addr);
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- mutex_unlock(&sbi->node_write);
- unlock_page(page);
- return 0;
+ return filemap_check_errors(NODE_MAPPING(sbi));
}
-/*
- * It is very important to gather dirty pages and write at once, so that we can
- * submit a big bio without interfering other data writes.
- * Be default, 512 pages (2MB), a segment size, is quite reasonable.
- */
-#define COLLECT_DIRTY_NODES 512
static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
- long nr_to_write = wbc->nr_to_write;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
+ long diff;
- /* First check balancing cached NAT entries */
- if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
- f2fs_sync_fs(sbi->sb, true);
- return 0;
- }
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
+
+ /* balancing f2fs's metadata in background */
+ f2fs_balance_fs_bg(sbi, true);
/* collect a number of dirty node pages and write together */
- if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
- return 0;
+ if (wbc->sync_mode != WB_SYNC_ALL &&
+ get_pages(sbi, F2FS_DIRTY_NODES) <
+ nr_pages_to_skip(sbi, NODE))
+ goto skip_write;
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_inc(&sbi->wb_sync_req[NODE]);
+ else if (atomic_read(&sbi->wb_sync_req[NODE])) {
+ /* to avoid potential deadlock */
+ if (current->plug)
+ blk_finish_plug(current->plug);
+ goto skip_write;
+ }
- /* if mounting is failed, skip writing node pages */
- wbc->nr_to_write = max_hw_blocks(sbi);
- sync_node_pages(sbi, 0, wbc);
- wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
- return 0;
-}
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
-static int f2fs_set_node_page_dirty(struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ diff = nr_pages_to_write(sbi, NODE, wbc);
+ blk_start_plug(&plug);
+ f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
+ blk_finish_plug(&plug);
+ wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
- SetPageUptodate(page);
- if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
- inc_page_count(sbi, F2FS_DIRTY_NODES);
- SetPagePrivate(page);
- return 1;
- }
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_dec(&sbi->wb_sync_req[NODE]);
return 0;
-}
-static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
- unsigned int length)
-{
- struct inode *inode = page->mapping->host;
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- if (PageDirty(page))
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- ClearPagePrivate(page);
+skip_write:
+ wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
+ return 0;
}
-static int f2fs_release_node_page(struct page *page, gfp_t wait)
+static bool f2fs_dirty_node_folio(struct address_space *mapping,
+ struct folio *folio)
{
- ClearPagePrivate(page);
- return 1;
+ trace_f2fs_set_page_dirty(folio, NODE);
+
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (IS_INODE(folio))
+ f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
+#endif
+ if (filemap_dirty_folio(mapping, folio)) {
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
+ folio_set_f2fs_reference(folio);
+ return true;
+ }
+ return false;
}
/*
* Structure of the f2fs node operations
*/
const struct address_space_operations f2fs_node_aops = {
- .writepage = f2fs_write_node_page,
.writepages = f2fs_write_node_pages,
- .set_page_dirty = f2fs_set_node_page_dirty,
- .invalidatepage = f2fs_invalidate_node_page,
- .releasepage = f2fs_release_node_page,
+ .dirty_folio = f2fs_dirty_node_folio,
+ .invalidate_folio = f2fs_invalidate_folio,
+ .release_folio = f2fs_release_folio,
+ .migrate_folio = filemap_migrate_folio,
};
-static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
+static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
+ nid_t n)
{
- struct list_head *this;
- struct free_nid *i;
- list_for_each(this, head) {
- i = list_entry(this, struct free_nid, list);
- if (i->nid == n)
- return i;
+ return radix_tree_lookup(&nm_i->free_nid_root, n);
+}
+
+static int __insert_free_nid(struct f2fs_sb_info *sbi,
+ struct free_nid *i)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+
+ if (err)
+ return err;
+
+ nm_i->nid_cnt[FREE_NID]++;
+ list_add_tail(&i->list, &nm_i->free_nid_list);
+ return 0;
+}
+
+static void __remove_free_nid(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_state state)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, state != i->state);
+ nm_i->nid_cnt[state]--;
+ if (state == FREE_NID)
+ list_del(&i->list);
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
+}
+
+static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
+ enum nid_state org_state, enum nid_state dst_state)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, org_state != i->state);
+ i->state = dst_state;
+ nm_i->nid_cnt[org_state]--;
+ nm_i->nid_cnt[dst_state]++;
+
+ switch (dst_state) {
+ case PREALLOC_NID:
+ list_del(&i->list);
+ break;
+ case FREE_NID:
+ list_add_tail(&i->list, &nm_i->free_nid_list);
+ break;
+ default:
+ BUG_ON(1);
}
- return NULL;
}
-static void __del_from_free_nid_list(struct free_nid *i)
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ bool set, bool build)
{
- list_del(&i->list);
- kmem_cache_free(free_nid_slab, i);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
+ unsigned int nid_ofs = nid - START_NID(nid);
+
+ if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ if (set) {
+ if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ nm_i->free_nid_count[nat_ofs]++;
+ } else {
+ if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+ }
}
-static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
+/* return if the nid is recognized as free */
+static bool add_free_nid(struct f2fs_sb_info *sbi,
+ nid_t nid, bool build, bool update)
{
- struct free_nid *i;
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *i, *e;
struct nat_entry *ne;
- bool allocated = false;
-
- if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
- return -1;
+ int err;
+ bool ret = false;
/* 0 nid should not be used */
- if (nid == 0)
- return 0;
+ if (unlikely(nid == 0))
+ return false;
- if (!build)
- goto retry;
+ if (unlikely(f2fs_check_nid_range(sbi, nid)))
+ return false;
- /* do not add allocated nids */
- read_lock(&nm_i->nat_tree_lock);
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
- allocated = true;
- read_unlock(&nm_i->nat_tree_lock);
- if (allocated)
- return 0;
-retry:
- i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
- if (!i) {
- cond_resched();
- goto retry;
- }
+ i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
i->nid = nid;
- i->state = NID_NEW;
+ i->state = FREE_NID;
- spin_lock(&nm_i->free_nid_list_lock);
- if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- kmem_cache_free(free_nid_slab, i);
- return 0;
+ err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, err);
+
+ err = -EINVAL;
+
+ spin_lock(&nm_i->nid_list_lock);
+
+ if (build) {
+ /*
+ * Thread A Thread B
+ * - f2fs_create
+ * - f2fs_new_inode
+ * - f2fs_alloc_nid
+ * - __insert_nid_to_list(PREALLOC_NID)
+ * - f2fs_balance_fs_bg
+ * - f2fs_build_free_nids
+ * - __f2fs_build_free_nids
+ * - scan_nat_page
+ * - add_free_nid
+ * - __lookup_nat_cache
+ * - f2fs_add_link
+ * - f2fs_init_inode_metadata
+ * - f2fs_new_inode_folio
+ * - f2fs_new_node_folio
+ * - set_node_addr
+ * - f2fs_alloc_nid_done
+ * - __remove_nid_from_list(PREALLOC_NID)
+ * - __insert_nid_to_list(FREE_NID)
+ */
+ ne = __lookup_nat_cache(nm_i, nid, false);
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ nat_get_blkaddr(ne) != NULL_ADDR))
+ goto err_out;
+
+ e = __lookup_free_nid_list(nm_i, nid);
+ if (e) {
+ if (e->state == FREE_NID)
+ ret = true;
+ goto err_out;
+ }
}
- list_add_tail(&i->list, &nm_i->free_nid_list);
- nm_i->fcnt++;
- spin_unlock(&nm_i->free_nid_list_lock);
- return 1;
+ ret = true;
+ err = __insert_free_nid(sbi, i);
+err_out:
+ if (update) {
+ update_free_nid_bitmap(sbi, nid, ret, build);
+ if (!build)
+ nm_i->available_nids++;
+ }
+ spin_unlock(&nm_i->nid_list_lock);
+ radix_tree_preload_end();
+
+ if (err)
+ kmem_cache_free(free_nid_slab, i);
+ return ret;
}
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
- if (i && i->state == NID_NEW) {
- __del_from_free_nid_list(i);
- nm_i->fcnt--;
+ bool need_free = false;
+
+ spin_lock(&nm_i->nid_list_lock);
+ i = __lookup_free_nid_list(nm_i, nid);
+ if (i && i->state == FREE_NID) {
+ __remove_free_nid(sbi, i, FREE_NID);
+ need_free = true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
+
+ if (need_free)
+ kmem_cache_free(free_nid_slab, i);
}
-static void scan_nat_page(struct f2fs_nm_info *nm_i,
- struct page *nat_page, nid_t start_nid)
+static int scan_nat_page(struct f2fs_sb_info *sbi,
+ struct f2fs_nat_block *nat_blk, nid_t start_nid)
{
- struct f2fs_nat_block *nat_blk = page_address(nat_page);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
block_t blk_addr;
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
+ __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+
i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
-
- if (start_nid >= nm_i->max_nid)
+ if (unlikely(start_nid >= nm_i->max_nid))
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- BUG_ON(blk_addr == NEW_ADDR);
+
+ if (blk_addr == NEW_ADDR)
+ return -EFSCORRUPTED;
+
if (blk_addr == NULL_ADDR) {
- if (add_free_nid(nm_i, start_nid, true) < 0)
+ add_free_nid(sbi, start_nid, true, true);
+ } else {
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, start_nid, false, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void scan_curseg_cache(struct f2fs_sb_info *sbi)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_journal *journal = curseg->journal;
+ int i;
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+ nid_t nid;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
+ if (addr == NULL_ADDR)
+ add_free_nid(sbi, nid, true, false);
+ else
+ remove_free_nid(sbi, nid);
+ }
+ up_read(&curseg->journal_rwsem);
+}
+
+static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int i, idx;
+ nid_t nid;
+
+ f2fs_down_read(&nm_i->nat_tree_lock);
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ if (!test_bit_le(i, nm_i->nat_block_bitmap))
+ continue;
+ if (!nm_i->free_nid_count[i])
+ continue;
+ for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
+ idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
+ NAT_ENTRY_PER_BLOCK, idx);
+ if (idx >= NAT_ENTRY_PER_BLOCK)
break;
+
+ nid = i * NAT_ENTRY_PER_BLOCK + idx;
+ add_free_nid(sbi, nid, true, false);
+
+ if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
+ goto out;
}
}
+out:
+ scan_curseg_cache(sbi);
+
+ f2fs_up_read(&nm_i->nat_tree_lock);
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+ bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
- int i = 0;
+ int i = 0, ret;
nid_t nid = nm_i->next_scan_nid;
+ if (unlikely(nid >= nm_i->max_nid))
+ nid = 0;
+
+ if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
+ nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
+
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
- return;
+ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
+ return 0;
+
+ if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
+ return 0;
+
+ if (!mount) {
+ /* try to find free nids in free_nid_bitmap */
+ scan_free_nid_bits(sbi);
+
+ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
+ return 0;
+ }
/* readahead nat pages to be scanned */
- ra_nat_pages(sbi, nid);
+ f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+ META_NAT, true);
+
+ f2fs_down_read(&nm_i->nat_tree_lock);
while (1) {
- struct page *page = get_current_nat_page(sbi, nid);
+ if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
+ nm_i->nat_block_bitmap)) {
+ struct folio *folio = get_current_nat_folio(sbi, nid);
+
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ } else {
+ ret = scan_nat_page(sbi, folio_address(folio),
+ nid);
+ f2fs_folio_put(folio, true);
+ }
- scan_nat_page(nm_i, page, nid);
- f2fs_put_page(page, 1);
+ if (ret) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+
+ if (ret == -EFSCORRUPTED) {
+ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi,
+ ERROR_INCONSISTENT_NAT);
+ }
+
+ return ret;
+ }
+ }
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
- if (nid >= nm_i->max_nid)
+ if (unlikely(nid >= nm_i->max_nid))
nid = 0;
- if (i++ == FREE_NID_PAGES)
+ if (++i >= FREE_NID_PAGES)
break;
}
@@ -1366,16 +2613,25 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
- block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(sum, i));
- if (addr == NULL_ADDR)
- add_free_nid(nm_i, nid, true);
- else
- remove_free_nid(nm_i, nid);
- }
- mutex_unlock(&curseg->curseg_mutex);
+ scan_curseg_cache(sbi);
+
+ f2fs_up_read(&nm_i->nat_tree_lock);
+
+ f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+ nm_i->ra_nid_pages, META_NAT, false);
+
+ return 0;
+}
+
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+{
+ int ret;
+
+ mutex_lock(&NM_I(sbi)->build_lock);
+ ret = __f2fs_build_free_nids(sbi, sync, mount);
+ mutex_unlock(&NM_I(sbi)->build_lock);
+
+ return ret;
}
/*
@@ -1383,306 +2639,643 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
* from second parameter of this function.
* The returned nid could be used ino as well as nid when inode is created.
*/
-bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
- struct list_head *this;
retry:
- if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
+ if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
- /* We should not use stale free nids created by build_free_nids */
- if (nm_i->fcnt && !sbi->on_build_free_nids) {
- BUG_ON(list_empty(&nm_i->free_nid_list));
- list_for_each(this, &nm_i->free_nid_list) {
- i = list_entry(this, struct free_nid, list);
- if (i->state == NID_NEW)
- break;
+ if (unlikely(nm_i->available_nids == 0)) {
+ spin_unlock(&nm_i->nid_list_lock);
+ return false;
+ }
+
+ /* We should not use stale free nids created by f2fs_build_free_nids */
+ if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
+ i = list_first_entry(&nm_i->free_nid_list,
+ struct free_nid, list);
+
+ if (unlikely(is_invalid_nid(sbi, i->nid))) {
+ spin_unlock(&nm_i->nid_list_lock);
+ f2fs_err(sbi, "Corrupted nid %u in free_nid_list",
+ i->nid);
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_NID);
+ return false;
}
- BUG_ON(i->state != NID_NEW);
*nid = i->nid;
- i->state = NID_ALLOC;
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
+ nm_i->available_nids--;
+
+ update_free_nid_bitmap(sbi, *nid, false, false);
+
+ spin_unlock(&nm_i->nid_list_lock);
return true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- mutex_lock(&nm_i->build_lock);
- sbi->on_build_free_nids = 1;
- build_free_nids(sbi);
- sbi->on_build_free_nids = 0;
- mutex_unlock(&nm_i->build_lock);
- goto retry;
+ if (!f2fs_build_free_nids(sbi, true, false))
+ goto retry;
+ return false;
}
/*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
*/
-void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
- BUG_ON(!i || i->state != NID_ALLOC);
- __del_from_free_nid_list(i);
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ i = __lookup_free_nid_list(nm_i, nid);
+ f2fs_bug_on(sbi, !i);
+ __remove_free_nid(sbi, i, PREALLOC_NID);
+ spin_unlock(&nm_i->nid_list_lock);
+
+ kmem_cache_free(free_nid_slab, i);
}
/*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
*/
-void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
+ bool need_free = false;
- spin_lock(&nm_i->free_nid_list_lock);
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
- BUG_ON(!i || i->state != NID_ALLOC);
- if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
- __del_from_free_nid_list(i);
+ if (!nid)
+ return;
+
+ spin_lock(&nm_i->nid_list_lock);
+ i = __lookup_free_nid_list(nm_i, nid);
+ f2fs_bug_on(sbi, !i);
+
+ if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
+ __remove_free_nid(sbi, i, PREALLOC_NID);
+ need_free = true;
} else {
- i->state = NID_NEW;
- nm_i->fcnt++;
+ __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
}
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ nm_i->available_nids++;
+
+ update_free_nid_bitmap(sbi, nid, true, false);
+
+ spin_unlock(&nm_i->nid_list_lock);
+
+ if (need_free)
+ kmem_cache_free(free_nid_slab, i);
}
-void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
- struct f2fs_summary *sum, struct node_info *ni,
- block_t new_blkaddr)
+int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
{
- rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
- set_node_addr(sbi, ni, new_blkaddr);
- clear_node_page_dirty(page);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int nr = nr_shrink;
+
+ if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+ return 0;
+
+ if (!mutex_trylock(&nm_i->build_lock))
+ return 0;
+
+ while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
+ struct free_nid *i, *next;
+ unsigned int batch = SHRINK_NID_BATCH_SIZE;
+
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+ if (!nr_shrink || !batch ||
+ nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+ break;
+ __remove_free_nid(sbi, i, FREE_NID);
+ kmem_cache_free(free_nid_slab, i);
+ nr_shrink--;
+ batch--;
+ }
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
+ mutex_unlock(&nm_i->build_lock);
+
+ return nr - nr_shrink;
}
-int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
{
- struct address_space *mapping = sbi->node_inode->i_mapping;
- struct f2fs_node *src, *dst;
- nid_t ino = ino_of_node(page);
- struct node_info old_ni, new_ni;
- struct page *ipage;
+ void *src_addr, *dst_addr;
+ size_t inline_size;
+ struct folio *ifolio;
+ struct f2fs_inode *ri;
+
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
+
+ ri = F2FS_INODE(folio);
+ if (ri->i_inline & F2FS_INLINE_XATTR) {
+ if (!f2fs_has_inline_xattr(inode)) {
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ stat_inc_inline_xattr(inode);
+ }
+ } else {
+ if (f2fs_has_inline_xattr(inode)) {
+ stat_dec_inline_xattr(inode);
+ clear_inode_flag(inode, FI_INLINE_XATTR);
+ }
+ goto update_inode;
+ }
- ipage = grab_cache_page(mapping, ino);
- if (!ipage)
- return -ENOMEM;
+ dst_addr = inline_xattr_addr(inode, ifolio);
+ src_addr = inline_xattr_addr(inode, folio);
+ inline_size = inline_xattr_size(inode);
- /* Should not use this inode from free nid list */
- remove_free_nid(NM_I(sbi), ino);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
+ memcpy(dst_addr, src_addr, inline_size);
+update_inode:
+ f2fs_update_inode(inode, ifolio);
+ f2fs_folio_put(ifolio, true);
+ return 0;
+}
- get_node_info(sbi, ino, &old_ni);
- SetPageUptodate(ipage);
- fill_node_footer(ipage, ino, ino, 0, true);
+int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
+ nid_t new_xnid;
+ struct dnode_of_data dn;
+ struct node_info ni;
+ struct folio *xfolio;
+ int err;
- src = (struct f2fs_node *)page_address(page);
- dst = (struct f2fs_node *)page_address(ipage);
+ if (!prev_xnid)
+ goto recover_xnid;
- memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
- dst->i.i_size = 0;
- dst->i.i_blocks = cpu_to_le64(1);
- dst->i.i_links = cpu_to_le32(1);
- dst->i.i_xattr_nid = 0;
+ /* 1: invalidate the previous xattr nid */
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
+ if (err)
+ return err;
+
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
+ dec_valid_node_count(sbi, inode, false);
+ set_node_addr(sbi, &ni, NULL_ADDR, false);
+
+recover_xnid:
+ /* 2: update xattr nid in inode */
+ if (!f2fs_alloc_nid(sbi, &new_xnid))
+ return -ENOSPC;
+
+ set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
+ xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xfolio)) {
+ f2fs_alloc_nid_failed(sbi, new_xnid);
+ return PTR_ERR(xfolio);
+ }
+
+ f2fs_alloc_nid_done(sbi, new_xnid);
+ f2fs_update_inode_page(inode);
+
+ /* 3: update and set xattr node page dirty */
+ if (folio) {
+ memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio),
+ VALID_XATTR_BLOCK_SIZE);
+ folio_mark_dirty(xfolio);
+ }
+ f2fs_folio_put(xfolio, true);
+
+ return 0;
+}
+
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio)
+{
+ struct f2fs_inode *src, *dst;
+ nid_t ino = ino_of_node(folio);
+ struct node_info old_ni, new_ni;
+ struct folio *ifolio;
+ int err;
+
+ err = f2fs_get_node_info(sbi, ino, &old_ni, false);
+ if (err)
+ return err;
+
+ if (unlikely(old_ni.blk_addr != NULL_ADDR))
+ return -EINVAL;
+retry:
+ ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
+ if (IS_ERR(ifolio)) {
+ memalloc_retry_wait(GFP_NOFS);
+ goto retry;
+ }
+
+ /* Should not use this inode from free nid list */
+ remove_free_nid(sbi, ino);
+
+ if (!folio_test_uptodate(ifolio))
+ folio_mark_uptodate(ifolio);
+ fill_node_footer(ifolio, ino, ino, 0, true);
+ set_cold_node(ifolio, false);
+
+ src = F2FS_INODE(folio);
+ dst = F2FS_INODE(ifolio);
+
+ memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
+ dst->i_size = 0;
+ dst->i_blocks = cpu_to_le64(1);
+ dst->i_links = cpu_to_le32(1);
+ dst->i_xattr_nid = 0;
+ dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
+ if (dst->i_inline & F2FS_EXTRA_ATTR) {
+ dst->i_extra_isize = src->i_extra_isize;
+
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_inline_xattr_size))
+ dst->i_inline_xattr_size = src->i_inline_xattr_size;
+
+ if (f2fs_sb_has_project_quota(sbi) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_projid))
+ dst->i_projid = src->i_projid;
+
+ if (f2fs_sb_has_inode_crtime(sbi) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_crtime_nsec)) {
+ dst->i_crtime = src->i_crtime;
+ dst->i_crtime_nsec = src->i_crtime_nsec;
+ }
+ }
new_ni = old_ni;
new_ni.ino = ino;
- if (!inc_valid_node_count(sbi, NULL, 1))
+ if (unlikely(inc_valid_node_count(sbi, NULL, true)))
WARN_ON(1);
- set_node_addr(sbi, &new_ni, NEW_ADDR);
+ set_node_addr(sbi, &new_ni, NEW_ADDR, false);
inc_valid_inode_count(sbi);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
-int restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
- struct page *page;
block_t addr;
- int i, last_offset;
-
- /* alloc temporal page for read node */
- page = alloc_page(GFP_NOFS | __GFP_ZERO);
- if (IS_ERR(page))
- return PTR_ERR(page);
- lock_page(page);
+ int i, idx, last_offset, nrpages;
/* scan the node segment */
- last_offset = sbi->blocks_per_seg;
+ last_offset = BLKS_PER_SEG(sbi);
addr = START_BLOCK(sbi, segno);
sum_entry = &sum->entries[0];
- for (i = 0; i < last_offset; i++, sum_entry++) {
- /*
- * In order to read next node page,
- * we must clear PageUptodate flag.
- */
- ClearPageUptodate(page);
+ for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
+ nrpages = bio_max_segs(last_offset - i);
- if (f2fs_readpage(sbi, page, addr, READ_SYNC))
- goto out;
+ /* readahead node pages */
+ f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
- lock_page(page);
- rn = (struct f2fs_node *)page_address(page);
- sum_entry->nid = rn->footer.nid;
- sum_entry->version = 0;
- sum_entry->ofs_in_node = 0;
- addr++;
+ for (idx = addr; idx < addr + nrpages; idx++) {
+ struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
+
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ rn = F2FS_NODE(folio);
+ sum_entry->nid = rn->footer.nid;
+ sum_entry->version = 0;
+ sum_entry->ofs_in_node = 0;
+ sum_entry++;
+ f2fs_folio_put(folio, true);
+ }
+
+ invalidate_mapping_pages(META_MAPPING(sbi), addr,
+ addr + nrpages);
}
- unlock_page(page);
-out:
- __free_pages(page, 0);
return 0;
}
-static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
+static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
+ bool init_dirty;
- mutex_lock(&curseg->curseg_mutex);
-
- if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
- mutex_unlock(&curseg->curseg_mutex);
- return false;
- }
-
- for (i = 0; i < nats_in_cursum(sum); i++) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
- nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+ nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
- raw_ne = nat_in_journal(sum, i);
-retry:
- write_lock(&nm_i->nat_tree_lock);
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne) {
- __set_nat_cache_dirty(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
+ if (f2fs_check_nid_range(sbi, nid))
continue;
- }
- ne = grab_nat_entry(nm_i, nid);
+
+ init_dirty = false;
+
+ raw_ne = nat_in_journal(journal, i);
+
+ ne = __lookup_nat_cache(nm_i, nid, true);
if (!ne) {
- write_unlock(&nm_i->nat_tree_lock);
- goto retry;
+ init_dirty = true;
+ ne = __alloc_nat_entry(sbi, nid, true);
+ __init_nat_entry(nm_i, ne, &raw_ne, true, true);
}
- nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
- nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
- nat_set_version(ne, raw_ne.version);
- __set_nat_cache_dirty(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
- }
- update_nats_in_cursum(sum, -i);
- mutex_unlock(&curseg->curseg_mutex);
- return true;
+
+ /*
+ * if a free nat in journal has not been used after last
+ * checkpoint, we should remove it from available nids,
+ * since later we will add it again.
+ */
+ if (!get_nat_flag(ne, IS_DIRTY) &&
+ le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+ spin_lock(&nm_i->nid_list_lock);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
+ __set_nat_cache_dirty(nm_i, ne, init_dirty);
+ }
+ update_nats_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
+}
+
+static void __adjust_nat_entry_set(struct nat_entry_set *nes,
+ struct list_head *head, int max)
+{
+ struct nat_entry_set *cur;
+
+ if (nes->entry_cnt >= max)
+ goto add_out;
+
+ list_for_each_entry(cur, head, set_list) {
+ if (cur->entry_cnt >= nes->entry_cnt) {
+ list_add(&nes->set_list, cur->set_list.prev);
+ return;
+ }
+ }
+add_out:
+ list_add_tail(&nes->set_list, head);
+}
+
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ const struct f2fs_nat_block *nat_blk)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
+ int valid = 0;
+ int i = 0;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return;
+
+ if (nat_index == 0) {
+ valid = 1;
+ i = 1;
+ }
+ for (; i < NAT_ENTRY_PER_BLOCK; i++) {
+ if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
+ valid++;
+ }
+ if (valid == 0) {
+ __set_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+ return;
+ }
+
+ __clear_bit_le(nat_index, nm_i->empty_nat_bits);
+ if (valid == NAT_ENTRY_PER_BLOCK)
+ __set_bit_le(nat_index, nm_i->full_nat_bits);
+ else
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+}
+
+static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ struct nat_entry_set *set, struct cp_control *cpc)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_journal *journal = curseg->journal;
+ nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
+ bool to_journal = true;
+ struct f2fs_nat_block *nat_blk;
+ struct nat_entry *ne, *cur;
+ struct folio *folio = NULL;
+
+ /*
+ * there are two steps to flush nat entries:
+ * #1, flush nat entries to journal in current hot data summary block.
+ * #2, flush nat entries to nat page.
+ */
+ if (enabled_nat_bits(sbi, cpc) ||
+ !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
+ to_journal = false;
+
+ if (to_journal) {
+ down_write(&curseg->journal_rwsem);
+ } else {
+ folio = get_next_nat_folio(sbi, start_nid);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ nat_blk = folio_address(folio);
+ f2fs_bug_on(sbi, !nat_blk);
+ }
+
+ /* flush dirty nats in nat entry set */
+ list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
+ struct f2fs_nat_entry *raw_ne;
+ nid_t nid = nat_get_nid(ne);
+ int offset;
+
+ f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
+
+ if (to_journal) {
+ offset = f2fs_lookup_journal_in_cursum(journal,
+ NAT_JOURNAL, nid, 1);
+ f2fs_bug_on(sbi, offset < 0);
+ raw_ne = &nat_in_journal(journal, offset);
+ nid_in_journal(journal, offset) = cpu_to_le32(nid);
+ } else {
+ raw_ne = &nat_blk->entries[nid - start_nid];
+ }
+ raw_nat_from_node_info(raw_ne, &ne->ni);
+ nat_reset_flag(ne);
+ __clear_nat_cache_dirty(NM_I(sbi), set, ne);
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
+ add_free_nid(sbi, nid, false, true);
+ } else {
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, nid, false, false);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
+ }
+
+ if (to_journal) {
+ up_write(&curseg->journal_rwsem);
+ } else {
+ __update_nat_bits(sbi, start_nid, nat_blk);
+ f2fs_folio_put(folio, true);
+ }
+
+ /* Allow dirty nats by node block allocation in write_begin */
+ if (!set->entry_cnt) {
+ radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+ kmem_cache_free(nat_entry_set_slab, set);
+ }
+ return 0;
}
/*
* This function is called during the checkpointing process.
*/
-void flush_nat_entries(struct f2fs_sb_info *sbi)
+int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
- struct list_head *cur, *n;
- struct page *page = NULL;
- struct f2fs_nat_block *nat_blk = NULL;
- nid_t start_nid = 0, end_nid = 0;
- bool flushed;
+ struct f2fs_journal *journal = curseg->journal;
+ struct nat_entry_set *setvec[NAT_VEC_SIZE];
+ struct nat_entry_set *set, *tmp;
+ unsigned int found;
+ nid_t set_idx = 0;
+ LIST_HEAD(sets);
+ int err = 0;
- flushed = flush_nats_in_journal(sbi);
+ /*
+ * during unmount, let's flush nat_bits before checking
+ * nat_cnt[DIRTY_NAT].
+ */
+ if (enabled_nat_bits(sbi, cpc)) {
+ f2fs_down_write(&nm_i->nat_tree_lock);
+ remove_nats_in_journal(sbi);
+ f2fs_up_write(&nm_i->nat_tree_lock);
+ }
- if (!flushed)
- mutex_lock(&curseg->curseg_mutex);
+ if (!nm_i->nat_cnt[DIRTY_NAT])
+ return 0;
- /* 1) flush dirty nat caches */
- list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
- struct nat_entry *ne;
- nid_t nid;
- struct f2fs_nat_entry raw_ne;
- int offset = -1;
- block_t new_blkaddr;
+ f2fs_down_write(&nm_i->nat_tree_lock);
- ne = list_entry(cur, struct nat_entry, list);
- nid = nat_get_nid(ne);
+ /*
+ * if there are no enough space in journal to store dirty nat
+ * entries, remove all entries from journal and merge them
+ * into nat entry set.
+ */
+ if (enabled_nat_bits(sbi, cpc) ||
+ !__has_cursum_space(journal,
+ nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
+ remove_nats_in_journal(sbi);
- if (nat_get_blkaddr(ne) == NEW_ADDR)
- continue;
- if (flushed)
- goto to_nat_page;
-
- /* if there is room for nat enries in curseg->sumpage */
- offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
- if (offset >= 0) {
- raw_ne = nat_in_journal(sum, offset);
- goto flush_now;
- }
-to_nat_page:
- if (!page || (start_nid > nid || nid > end_nid)) {
- if (page) {
- f2fs_put_page(page, 1);
- page = NULL;
- }
- start_nid = START_NID(nid);
- end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
+ while ((found = __gang_lookup_nat_set(nm_i,
+ set_idx, NAT_VEC_SIZE, setvec))) {
+ unsigned idx;
- /*
- * get nat block with dirty flag, increased reference
- * count, mapped and lock
- */
- page = get_next_nat_page(sbi, start_nid);
- nat_blk = page_address(page);
- }
+ set_idx = setvec[found - 1]->set + 1;
+ for (idx = 0; idx < found; idx++)
+ __adjust_nat_entry_set(setvec[idx], &sets,
+ MAX_NAT_JENTRIES(journal));
+ }
- BUG_ON(!nat_blk);
- raw_ne = nat_blk->entries[nid - start_nid];
-flush_now:
- new_blkaddr = nat_get_blkaddr(ne);
+ /* flush dirty nats in nat entry set */
+ list_for_each_entry_safe(set, tmp, &sets, set_list) {
+ err = __flush_nat_entry_set(sbi, set, cpc);
+ if (err)
+ break;
+ }
- raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
- raw_ne.block_addr = cpu_to_le32(new_blkaddr);
- raw_ne.version = nat_get_version(ne);
+ f2fs_up_write(&nm_i->nat_tree_lock);
+ /* Allow dirty nats by node block allocation in write_begin */
- if (offset < 0) {
- nat_blk->entries[nid - start_nid] = raw_ne;
- } else {
- nat_in_journal(sum, offset) = raw_ne;
- nid_in_journal(sum, offset) = cpu_to_le32(nid);
- }
+ return err;
+}
- if (nat_get_blkaddr(ne) == NULL_ADDR &&
- add_free_nid(NM_I(sbi), nid, false) <= 0) {
- write_lock(&nm_i->nat_tree_lock);
- __del_from_nat_cache(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
- } else {
- write_lock(&nm_i->nat_tree_lock);
- __clear_nat_cache_dirty(nm_i, ne);
- ne->checkpointed = true;
- write_unlock(&nm_i->nat_tree_lock);
- }
+static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
+ unsigned int i;
+ __u64 cp_ver = cur_cp_version(ckpt);
+ block_t nat_bits_addr;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return 0;
+
+ nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
+ nm_i->nat_bits = f2fs_kvzalloc(sbi,
+ F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
+ if (!nm_i->nat_bits)
+ return -ENOMEM;
+
+ nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
+ nm_i->nat_bits_blocks;
+ for (i = 0; i < nm_i->nat_bits_blocks; i++) {
+ struct folio *folio;
+
+ folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
+ folio_address(folio), F2FS_BLKSIZE);
+ f2fs_folio_put(folio, true);
+ }
+
+ cp_ver |= (cur_cp_crc(ckpt) << 32);
+ if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
+ disable_nat_bits(sbi, true);
+ return 0;
}
- if (!flushed)
- mutex_unlock(&curseg->curseg_mutex);
- f2fs_put_page(page, 1);
- /* 2) shrink nat caches if necessary */
- try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
+ nm_i->full_nat_bits = nm_i->nat_bits + 8;
+ nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
+
+ f2fs_notice(sbi, "Found nat_bits in checkpoint");
+ return 0;
+}
+
+static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int i = 0;
+ nid_t nid, last_nid;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return;
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+
+ nid = i * NAT_ENTRY_PER_BLOCK;
+ last_nid = nid + NAT_ENTRY_PER_BLOCK;
+
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ for (; nid < last_nid; nid++)
+ update_free_nid_bitmap(sbi, nid, true, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+ }
}
static int init_node_manager(struct f2fs_sb_info *sbi)
@@ -1690,44 +3283,101 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned char *version_bitmap;
- unsigned int nat_segs, nat_blocks;
+ unsigned int nat_segs;
+ int err;
nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
/* segment_count_nat includes pair segment so divide to 2. */
nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
- nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
- nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
- nm_i->fcnt = 0;
- nm_i->nat_cnt = 0;
-
+ nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
+ nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
+
+ /* not used nids: 0, node, meta, (and root counted as valid node) */
+ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+ F2FS_RESERVED_NODE_NUM;
+ nm_i->nid_cnt[FREE_NID] = 0;
+ nm_i->nid_cnt[PREALLOC_NID] = 0;
+ nm_i->ram_thresh = DEF_RAM_THRESHOLD;
+ nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+ nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
+ nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
+
+ INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->free_nid_list);
- INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
+ INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
+ INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
- INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
+ spin_lock_init(&nm_i->nat_list_lock);
mutex_init(&nm_i->build_lock);
- spin_lock_init(&nm_i->free_nid_list_lock);
- rwlock_init(&nm_i->nat_tree_lock);
+ spin_lock_init(&nm_i->nid_list_lock);
+ init_f2fs_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
- if (!version_bitmap)
- return -EFAULT;
-
nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
GFP_KERNEL);
if (!nm_i->nat_bitmap)
return -ENOMEM;
+
+ if (!test_opt(sbi, NAT_BITS))
+ disable_nat_bits(sbi, true);
+
+ err = __get_nat_bitmaps(sbi);
+ if (err)
+ return err;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
+ GFP_KERNEL);
+ if (!nm_i->nat_bitmap_mir)
+ return -ENOMEM;
+#endif
+
+ return 0;
+}
+
+static int init_free_nid_cache(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int i;
+
+ nm_i->free_nid_bitmap =
+ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap[i])
+ return -ENOMEM;
+ }
+
+ nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
+ GFP_KERNEL);
+ if (!nm_i->nat_block_bitmap)
+ return -ENOMEM;
+
+ nm_i->free_nid_count =
+ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
+ if (!nm_i->free_nid_count)
+ return -ENOMEM;
return 0;
}
-int build_node_manager(struct f2fs_sb_info *sbi)
+int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
{
int err;
- sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
+ sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
+ GFP_KERNEL);
if (!sbi->nm_info)
return -ENOMEM;
@@ -1735,15 +3385,23 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
- build_free_nids(sbi);
- return 0;
+ err = init_free_nid_cache(sbi);
+ if (err)
+ return err;
+
+ /* load free nid status from nat_bits table */
+ load_free_nid_bitmap(sbi);
+
+ return f2fs_build_free_nids(sbi, true, true);
}
-void destroy_node_manager(struct f2fs_sb_info *sbi)
+void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next_i;
- struct nat_entry *natvec[NATVEC_SIZE];
+ void *vec[NAT_VEC_SIZE];
+ struct nat_entry **natvec = (struct nat_entry **)vec;
+ struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
nid_t nid = 0;
unsigned int found;
@@ -1751,52 +3409,108 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
return;
/* destroy free nid list */
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- BUG_ON(i->state == NID_ALLOC);
- __del_from_free_nid_list(i);
- nm_i->fcnt--;
+ __remove_free_nid(sbi, i, FREE_NID);
+ spin_unlock(&nm_i->nid_list_lock);
+ kmem_cache_free(free_nid_slab, i);
+ spin_lock(&nm_i->nid_list_lock);
}
- BUG_ON(nm_i->fcnt);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
+ spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
- write_lock(&nm_i->nat_tree_lock);
+ f2fs_down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i,
- nid, NATVEC_SIZE, natvec))) {
+ nid, NAT_VEC_SIZE, natvec))) {
+ unsigned idx;
+
+ nid = nat_get_nid(natvec[found - 1]) + 1;
+ for (idx = 0; idx < found; idx++) {
+ spin_lock(&nm_i->nat_list_lock);
+ list_del(&natvec[idx]->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
+ __del_from_nat_cache(nm_i, natvec[idx]);
+ }
+ }
+ f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
+
+ /* destroy nat set cache */
+ nid = 0;
+ memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
+ while ((found = __gang_lookup_nat_set(nm_i,
+ nid, NAT_VEC_SIZE, setvec))) {
unsigned idx;
+
+ nid = setvec[found - 1]->set + 1;
for (idx = 0; idx < found; idx++) {
- struct nat_entry *e = natvec[idx];
- nid = nat_get_nid(e) + 1;
- __del_from_nat_cache(nm_i, e);
+ /* entry_cnt is not zero, when cp_error was occurred */
+ f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
+ radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
+ kmem_cache_free(nat_entry_set_slab, setvec[idx]);
}
}
- BUG_ON(nm_i->nat_cnt);
- write_unlock(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
+
+ kvfree(nm_i->nat_block_bitmap);
+ if (nm_i->free_nid_bitmap) {
+ int i;
+
+ for (i = 0; i < nm_i->nat_blocks; i++)
+ kvfree(nm_i->free_nid_bitmap[i]);
+ kvfree(nm_i->free_nid_bitmap);
+ }
+ kvfree(nm_i->free_nid_count);
kfree(nm_i->nat_bitmap);
+ kvfree(nm_i->nat_bits);
+#ifdef CONFIG_F2FS_CHECK_FS
+ kfree(nm_i->nat_bitmap_mir);
+#endif
sbi->nm_info = NULL;
kfree(nm_i);
}
-int __init create_node_manager_caches(void)
+int __init f2fs_create_node_manager_caches(void)
{
- nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
- sizeof(struct nat_entry), NULL);
+ nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
+ sizeof(struct nat_entry));
if (!nat_entry_slab)
- return -ENOMEM;
+ goto fail;
- free_nid_slab = f2fs_kmem_cache_create("free_nid",
- sizeof(struct free_nid), NULL);
- if (!free_nid_slab) {
- kmem_cache_destroy(nat_entry_slab);
- return -ENOMEM;
- }
+ free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
+ sizeof(struct free_nid));
+ if (!free_nid_slab)
+ goto destroy_nat_entry;
+
+ nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
+ sizeof(struct nat_entry_set));
+ if (!nat_entry_set_slab)
+ goto destroy_free_nid;
+
+ fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
+ sizeof(struct fsync_node_entry));
+ if (!fsync_node_entry_slab)
+ goto destroy_nat_entry_set;
return 0;
+
+destroy_nat_entry_set:
+ kmem_cache_destroy(nat_entry_set_slab);
+destroy_free_nid:
+ kmem_cache_destroy(free_nid_slab);
+destroy_nat_entry:
+ kmem_cache_destroy(nat_entry_slab);
+fail:
+ return -ENOMEM;
}
-void destroy_node_manager_caches(void)
+void f2fs_destroy_node_manager_caches(void)
{
+ kmem_cache_destroy(fsync_node_entry_slab);
+ kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);
kmem_cache_destroy(nat_entry_slab);
}