summaryrefslogtreecommitdiff
path: root/fs/f2fs/recovery.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 20:05:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 20:05:58 -0700
commit6d8ef53e8b2fed8b0f91df0c6da7cc92747d934a (patch)
treefabe39021985b631ea5d457126e9ed3d53e910f0 /fs/f2fs/recovery.c
parentcdb897e3279ad1677138d6bdf1cfaf1393718a08 (diff)
parente6c6de18f010d9a7d592f4044d2c30213cb3a7bc (diff)
Merge tag 'f2fs-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim: "In this round, we've mostly tuned f2fs to provide better user experience for Android. Especially, we've worked on atomic write feature again with SQLite community in order to support it officially. And we added or modified several facilities to analyze and enhance IO behaviors. Major changes include: - add app/fs io stat - add inode checksum feature - support project/journalled quota - enhance atomic write with new ioctl() which exposes feature set - enhance background gc/discard/fstrim flows with new gc_urgent mode - add F2FS_IOC_FS{GET,SET}XATTR - fix some quota flows" * tag 'f2fs-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (63 commits) f2fs: hurry up to issue discard after io interruption f2fs: fix to show correct discard_granularity in sysfs f2fs: detect dirty inode in evict_inode f2fs: clear radix tree dirty tag of pages whose dirty flag is cleared f2fs: speed up gc_urgent mode with SSR f2fs: better to wait for fstrim completion f2fs: avoid race in between read xattr & write xattr f2fs: make get_lock_data_page to handle encrypted inode f2fs: use generic terms used for encrypted block management f2fs: introduce f2fs_encrypted_file for clean-up Revert "f2fs: add a new function get_ssr_cost" f2fs: constify super_operations f2fs: fix to wake up all sleeping flusher f2fs: avoid race in between atomic_read & atomic_inc f2fs: remove unneeded parameter of change_curseg f2fs: update i_flags correctly f2fs: don't check inode's checksum if it was dirtied or writebacked f2fs: don't need to update inode checksum for recovery f2fs: trigger fdatasync for non-atomic_write file f2fs: fix to avoid race in between aio and gc ...
Diffstat (limited to 'fs/f2fs/recovery.c')
-rw-r--r--fs/f2fs/recovery.c83
1 files changed, 69 insertions, 14 deletions
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 907d6b7dde6a..9626758bc762 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -69,20 +69,34 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
}
static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
- struct list_head *head, nid_t ino)
+ struct list_head *head, nid_t ino, bool quota_inode)
{
struct inode *inode;
struct fsync_inode_entry *entry;
+ int err;
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
+ err = dquot_initialize(inode);
+ if (err)
+ goto err_out;
+
+ if (quota_inode) {
+ err = dquot_alloc_inode(inode);
+ if (err)
+ goto err_out;
+ }
+
entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
entry->inode = inode;
list_add_tail(&entry->list, head);
return entry;
+err_out:
+ iput(inode);
+ return ERR_PTR(err);
}
static void del_fsync_inode(struct fsync_inode_entry *entry)
@@ -107,7 +121,8 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
entry = get_fsync_inode(dir_list, pino);
if (!entry) {
- entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
+ entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
+ pino, false);
if (IS_ERR(entry)) {
dir = ERR_CAST(entry);
err = PTR_ERR(entry);
@@ -140,6 +155,13 @@ retry:
err = -EEXIST;
goto out_unmap_put;
}
+
+ err = dquot_initialize(einode);
+ if (err) {
+ iput(einode);
+ goto out_unmap_put;
+ }
+
err = acquire_orphan_inode(F2FS_I_SB(inode));
if (err) {
iput(einode);
@@ -226,18 +248,22 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
entry = get_fsync_inode(head, ino_of_node(page));
if (!entry) {
+ bool quota_inode = false;
+
if (!check_only &&
IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
break;
+ quota_inode = true;
}
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- entry = add_fsync_inode(sbi, head, ino_of_node(page));
+ entry = add_fsync_inode(sbi, head, ino_of_node(page),
+ quota_inode);
if (IS_ERR(entry)) {
err = PTR_ERR(entry);
if (err == -ENOENT) {
@@ -291,7 +317,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
return 0;
/* Get the previous summary */
- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
@@ -328,10 +354,18 @@ got_it:
f2fs_put_page(node_page, 1);
if (ino != dn->inode->i_ino) {
+ int ret;
+
/* Deallocate previous index in the node page */
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
+
+ ret = dquot_initialize(inode);
+ if (ret) {
+ iput(inode);
+ return ret;
+ }
} else {
inode = dn->inode;
}
@@ -361,7 +395,8 @@ out:
return 0;
truncate_out:
- if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
+ if (datablock_addr(tdn.inode, tdn.node_page,
+ tdn.ofs_in_node) == blkaddr)
truncate_data_blocks_range(&tdn, 1);
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
unlock_page(dn->inode_page);
@@ -414,8 +449,8 @@ retry_dn:
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
- src = datablock_addr(dn.node_page, dn.ofs_in_node);
- dest = datablock_addr(page, dn.ofs_in_node);
+ src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
+ dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
/* skip recovering if dest is the same as src */
if (src == dest)
@@ -557,12 +592,27 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
struct list_head dir_list;
int err;
int ret = 0;
+ unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
+ if (s_flags & MS_RDONLY) {
+ f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ sbi->sb->s_flags &= ~MS_RDONLY;
+ }
+
+#ifdef CONFIG_QUOTA
+ /* Needed for iput() to work correctly and not trash data */
+ sbi->sb->s_flags |= MS_ACTIVE;
+ /* Turn on quotas so that they are updated correctly */
+ f2fs_enable_quota_files(sbi);
+#endif
+
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry));
- if (!fsync_entry_slab)
- return -ENOMEM;
+ if (!fsync_entry_slab) {
+ err = -ENOMEM;
+ goto out;
+ }
INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&dir_list);
@@ -573,11 +623,11 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list, check_only);
if (err || list_empty(&inode_list))
- goto out;
+ goto skip;
if (check_only) {
ret = 1;
- goto out;
+ goto skip;
}
need_writecp = true;
@@ -586,7 +636,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
-out:
+skip:
destroy_fsync_dnodes(&inode_list);
/* truncate meta pages to be used by the recovery */
@@ -599,8 +649,6 @@ out:
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err)
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
mutex_unlock(&sbi->cp_mutex);
/* let's drop all the directory inodes for clean checkpoint */
@@ -614,5 +662,12 @@ out:
}
kmem_cache_destroy(fsync_entry_slab);
+out:
+#ifdef CONFIG_QUOTA
+ /* Turn quotas off */
+ f2fs_quota_off_umount(sbi->sb);
+#endif
+ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+
return ret ? ret: err;
}