diff options
Diffstat (limited to 'fs/btrfs/relocation.c')
| -rw-r--r-- | fs/btrfs/relocation.c | 1709 |
1 files changed, 800 insertions, 909 deletions
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 31ec4a7658ce..5bfefc3e9c06 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -36,6 +36,7 @@ #include "relocation.h" #include "super.h" #include "tree-checker.h" +#include "raid-stripe-tree.h" /* * Relocation overview @@ -89,10 +90,15 @@ * map address of tree root to tree */ struct mapping_node { - struct { - struct rb_node rb_node; - u64 bytenr; - }; /* Use rb_simle_node for search/insert */ + union { + /* Use rb_simple_node for search/insert */ + struct { + struct rb_node rb_node; + u64 bytenr; + }; + + struct rb_simple_node simple_node; + }; void *data; }; @@ -105,14 +111,19 @@ struct mapping_tree { * present a tree block to process */ struct tree_block { - struct { - struct rb_node rb_node; - u64 bytenr; - }; /* Use rb_simple_node for search/insert */ + union { + /* Use rb_simple_node for search/insert */ + struct { + struct rb_node rb_node; + u64 bytenr; + }; + + struct rb_simple_node simple_node; + }; u64 owner; struct btrfs_key key; - unsigned int level:8; - unsigned int key_ready:1; + u8 level; + bool key_ready; }; #define MAX_EXTENTS 128 @@ -122,6 +133,13 @@ struct file_extent_cluster { u64 end; u64 boundary[MAX_EXTENTS]; unsigned int nr; + u64 owning_root; +}; + +/* Stages of data relocation. */ +enum reloc_stage { + MOVE_DATA_EXTENTS, + UPDATE_DATA_PTRS }; struct reloc_control { @@ -155,16 +173,12 @@ struct reloc_control { u64 search_start; u64 extents_found; - unsigned int stage:8; - unsigned int create_reloc_tree:1; - unsigned int merge_reloc_tree:1; - unsigned int found_file_extent:1; + enum reloc_stage stage; + bool create_reloc_tree; + bool merge_reloc_tree; + bool found_file_extent; }; -/* stages of data relocation */ -#define MOVE_DATA_EXTENTS 0 -#define UPDATE_DATA_PTRS 1 - static void mark_block_processed(struct reloc_control *rc, struct btrfs_backref_node *node) { @@ -174,19 +188,13 @@ static void mark_block_processed(struct reloc_control *rc, in_range(node->bytenr, rc->block_group->start, rc->block_group->length)) { blocksize = rc->extent_root->fs_info->nodesize; - set_extent_bits(&rc->processed_blocks, node->bytenr, - node->bytenr + blocksize - 1, EXTENT_DIRTY); + btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr, + node->bytenr + blocksize - 1, EXTENT_DIRTY, + NULL); } node->processed = 1; } - -static void mapping_tree_init(struct mapping_tree *tree) -{ - tree->rb_root = RB_ROOT; - spin_lock_init(&tree->lock); -} - /* * walk up backref nodes until reach node presents tree root */ @@ -198,8 +206,8 @@ static struct btrfs_backref_node *walk_up_backref( int idx = *index; while (!list_empty(&node->upper)) { - edge = list_entry(node->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&node->upper, struct btrfs_backref_edge, + list[LOWER]); edges[idx++] = edge; node = edge->node[UPPER]; } @@ -225,8 +233,8 @@ static struct btrfs_backref_node *walk_down_backref( idx--; continue; } - edge = list_entry(edge->list[LOWER].next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge, + list[LOWER]); edges[idx - 1] = edge; *index = idx; return edge->node[UPPER]; @@ -235,71 +243,7 @@ static struct btrfs_backref_node *walk_down_backref( return NULL; } -static void update_backref_node(struct btrfs_backref_cache *cache, - struct btrfs_backref_node *node, u64 bytenr) -{ - struct rb_node *rb_node; - rb_erase(&node->rb_node, &cache->rb_root); - node->bytenr = bytenr; - rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); - if (rb_node) - btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); -} - -/* - * update backref cache after a transaction commit - */ -static int update_backref_cache(struct btrfs_trans_handle *trans, - struct btrfs_backref_cache *cache) -{ - struct btrfs_backref_node *node; - int level = 0; - - if (cache->last_trans == 0) { - cache->last_trans = trans->transid; - return 0; - } - - if (cache->last_trans == trans->transid) - return 0; - - /* - * detached nodes are used to avoid unnecessary backref - * lookup. transaction commit changes the extent tree. - * so the detached nodes are no longer useful. - */ - while (!list_empty(&cache->detached)) { - node = list_entry(cache->detached.next, - struct btrfs_backref_node, list); - btrfs_backref_cleanup_node(cache, node); - } - - while (!list_empty(&cache->changed)) { - node = list_entry(cache->changed.next, - struct btrfs_backref_node, list); - list_del_init(&node->list); - BUG_ON(node->pending); - update_backref_node(cache, node, node->new_bytenr); - } - - /* - * some nodes can be left in the pending list if there were - * errors during processing the pending nodes. - */ - for (level = 0; level < BTRFS_MAX_LEVEL; level++) { - list_for_each_entry(node, &cache->pending[level], list) { - BUG_ON(!node->pending); - if (node->bytenr == node->new_bytenr) - continue; - update_backref_node(cache, node, node->new_bytenr); - } - } - - cache->last_trans = 0; - return 1; -} - -static bool reloc_root_is_dead(struct btrfs_root *root) +static bool reloc_root_is_dead(const struct btrfs_root *root) { /* * Pair with set_bit/clear_bit in clean_dirty_subvols and @@ -320,7 +264,7 @@ static bool reloc_root_is_dead(struct btrfs_root *root) * from no reloc root. But btrfs_should_ignore_reloc_root() below is a * special case. */ -static bool have_reloc_root(struct btrfs_root *root) +static bool have_reloc_root(const struct btrfs_root *root) { if (reloc_root_is_dead(root)) return false; @@ -329,31 +273,30 @@ static bool have_reloc_root(struct btrfs_root *root) return true; } -int btrfs_should_ignore_reloc_root(struct btrfs_root *root) +bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root) { struct btrfs_root *reloc_root; if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) - return 0; + return false; /* This root has been merged with its reloc tree, we can ignore it */ if (reloc_root_is_dead(root)) - return 1; + return true; reloc_root = root->reloc_root; if (!reloc_root) - return 0; + return false; if (btrfs_header_generation(reloc_root->commit_root) == root->fs_info->running_transaction->transid) - return 0; + return false; /* - * if there is reloc tree and it was created in previous - * transaction backref lookup can find the reloc tree, - * so backref node for the fs tree root is useless for - * relocation. + * If there is reloc tree and it was created in previous transaction + * backref lookup can find the reloc tree, so backref node for the fs + * tree root is useless for relocation. */ - return 1; + return true; } /* @@ -410,19 +353,13 @@ static bool handle_useless_nodes(struct reloc_control *rc, if (cur == node) ret = true; - /* The node is the lowest node */ - if (cur->lowest) { - list_del_init(&cur->lower); - cur->lowest = 0; - } - /* Cleanup the lower edges */ while (!list_empty(&cur->lower)) { struct btrfs_backref_edge *edge; struct btrfs_backref_node *lower; - edge = list_entry(cur->lower.next, - struct btrfs_backref_edge, list[UPPER]); + edge = list_first_entry(&cur->lower, struct btrfs_backref_edge, + list[UPPER]); list_del(&edge->list[UPPER]); list_del(&edge->list[LOWER]); lower = edge->node[LOWER]; @@ -441,7 +378,6 @@ static bool handle_useless_nodes(struct reloc_control *rc, * cache to avoid unnecessary backref lookup. */ if (cur->level > 0) { - list_add(&cur->list, &cache->detached); cur->detached = 1; } else { rb_erase(&cur->rb_node, &cache->rb_root); @@ -466,6 +402,7 @@ static bool handle_useless_nodes(struct reloc_control *rc, * cached. */ static noinline_for_stack struct btrfs_backref_node *build_backref_tree( + struct btrfs_trans_handle *trans, struct reloc_control *rc, struct btrfs_key *node_key, int level, u64 bytenr) { @@ -477,34 +414,31 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree( struct btrfs_backref_node *node = NULL; struct btrfs_backref_edge *edge; int ret; - int err = 0; iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); if (!iter) return ERR_PTR(-ENOMEM); path = btrfs_alloc_path(); if (!path) { - err = -ENOMEM; + ret = -ENOMEM; goto out; } node = btrfs_backref_alloc_node(cache, bytenr, level); if (!node) { - err = -ENOMEM; + ret = -ENOMEM; goto out; } - node->lowest = 1; cur = node; /* Breadth-first search to build backref cache */ do { - ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, - cur); - if (ret < 0) { - err = ret; + ret = btrfs_backref_add_tree_node(trans, cache, path, iter, + node_key, cur); + if (ret < 0) goto out; - } + edge = list_first_entry_or_null(&cache->pending_edge, struct btrfs_backref_edge, list[UPPER]); /* @@ -519,19 +453,18 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree( /* Finish the upper linkage of newly added edges/nodes */ ret = btrfs_backref_finish_upper_links(cache, node); - if (ret < 0) { - err = ret; + if (ret < 0) goto out; - } if (handle_useless_nodes(rc, node)) node = NULL; out: - btrfs_backref_iter_free(iter); + btrfs_free_path(iter->path); + kfree(iter); btrfs_free_path(path); - if (err) { + if (ret) { btrfs_backref_error_cleanup(cache, node); - return ERR_PTR(err); + return ERR_PTR(ret); } ASSERT(!node || !node->detached); ASSERT(list_empty(&cache->useless_node) && @@ -540,98 +473,9 @@ out: } /* - * helper to add backref node for the newly created snapshot. - * the backref node is created by cloning backref node that - * corresponds to root of source tree - */ -static int clone_backref_node(struct btrfs_trans_handle *trans, - struct reloc_control *rc, - struct btrfs_root *src, - struct btrfs_root *dest) -{ - struct btrfs_root *reloc_root = src->reloc_root; - struct btrfs_backref_cache *cache = &rc->backref_cache; - struct btrfs_backref_node *node = NULL; - struct btrfs_backref_node *new_node; - struct btrfs_backref_edge *edge; - struct btrfs_backref_edge *new_edge; - struct rb_node *rb_node; - - if (cache->last_trans > 0) - update_backref_cache(trans, cache); - - rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); - if (rb_node) { - node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); - if (node->detached) - node = NULL; - else - BUG_ON(node->new_bytenr != reloc_root->node->start); - } - - if (!node) { - rb_node = rb_simple_search(&cache->rb_root, - reloc_root->commit_root->start); - if (rb_node) { - node = rb_entry(rb_node, struct btrfs_backref_node, - rb_node); - BUG_ON(node->detached); - } - } - - if (!node) - return 0; - - new_node = btrfs_backref_alloc_node(cache, dest->node->start, - node->level); - if (!new_node) - return -ENOMEM; - - new_node->lowest = node->lowest; - new_node->checked = 1; - new_node->root = btrfs_grab_root(dest); - ASSERT(new_node->root); - - if (!node->lowest) { - list_for_each_entry(edge, &node->lower, list[UPPER]) { - new_edge = btrfs_backref_alloc_edge(cache); - if (!new_edge) - goto fail; - - btrfs_backref_link_edge(new_edge, edge->node[LOWER], - new_node, LINK_UPPER); - } - } else { - list_add_tail(&new_node->lower, &cache->leaves); - } - - rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, - &new_node->rb_node); - if (rb_node) - btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); - - if (!new_node->lowest) { - list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { - list_add_tail(&new_edge->list[LOWER], - &new_edge->node[LOWER]->upper); - } - } - return 0; -fail: - while (!list_empty(&new_node->lower)) { - new_edge = list_entry(new_node->lower.next, - struct btrfs_backref_edge, list[UPPER]); - list_del(&new_edge->list[UPPER]); - btrfs_backref_free_edge(cache, new_edge); - } - btrfs_backref_free_node(cache, new_node); - return -ENOMEM; -} - -/* * helper to add 'address of tree root -> reloc tree' mapping */ -static int __must_check __add_reloc_root(struct btrfs_root *root) +static int __add_reloc_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *rb_node; @@ -646,8 +490,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) node->data = root; spin_lock(&rc->reloc_root_tree.lock); - rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) { btrfs_err(fs_info, @@ -668,7 +511,7 @@ static void __del_reloc_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *rb_node; - struct mapping_node *node = NULL; + struct mapping_node AUTO_KFREE(node); struct reloc_control *rc = fs_info->reloc_ctl; bool put_ref = false; @@ -701,7 +544,6 @@ static void __del_reloc_root(struct btrfs_root *root) spin_unlock(&fs_info->trans_lock); if (put_ref) btrfs_put_root(root); - kfree(node); } /* @@ -730,8 +572,7 @@ static int __update_reloc_root(struct btrfs_root *root) spin_lock(&rc->reloc_root_tree.lock); node->bytenr = root->node->start; - rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); @@ -744,10 +585,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *reloc_root; struct extent_buffer *eb; - struct btrfs_root_item *root_item; + struct btrfs_root_item AUTO_KFREE(root_item); struct btrfs_key root_key; int ret = 0; - bool must_abort = false; root_item = kmalloc(sizeof(*root_item), GFP_NOFS); if (!root_item) @@ -757,14 +597,32 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = objectid; - if (root->root_key.objectid == objectid) { + if (btrfs_root_id(root) == objectid) { u64 commit_root_gen; + /* + * Relocation will wait for cleaner thread, and any half-dropped + * subvolume will be fully cleaned up at mount time. + * So here we shouldn't hit a subvolume with non-zero drop_progress. + * + * If this isn't the case, error out since it can make us attempt to + * drop references for extents that were already dropped before. + */ + if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { + struct btrfs_key cpu_key; + + btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); + btrfs_err(fs_info, + "cannot relocate partially dropped subvolume %llu, drop progress key " BTRFS_KEY_FMT, + objectid, BTRFS_KEY_FMT_VALUE(&cpu_key)); + return ERR_PTR(-EUCLEAN); + } + /* called by btrfs_init_reloc_root */ ret = btrfs_copy_root(trans, root, root->commit_root, &eb, BTRFS_TREE_RELOC_OBJECTID); if (ret) - goto fail; + return ERR_PTR(ret); /* * Set the last_snapshot field to the generation of the commit @@ -787,21 +645,20 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_copy_root(trans, root, root->node, &eb, BTRFS_TREE_RELOC_OBJECTID); if (ret) - goto fail; + return ERR_PTR(ret); } /* * We have changed references at this point, we must abort the - * transaction if anything fails. + * transaction if anything fails (i.e. 'goto abort'). */ - must_abort = true; memcpy(root_item, &root->root_item, sizeof(*root_item)); btrfs_set_root_bytenr(root_item, eb->start); btrfs_set_root_level(root_item, btrfs_header_level(eb)); btrfs_set_root_generation(root_item, trans->transid); - if (root->root_key.objectid == objectid) { + if (btrfs_root_id(root) == objectid) { btrfs_set_root_refs(root_item, 0); memset(&root_item->drop_progress, 0, sizeof(struct btrfs_disk_key)); @@ -814,9 +671,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_insert_root(trans, fs_info->tree_root, &root_key, root_item); if (ret) - goto fail; - - kfree(root_item); + goto abort; reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); if (IS_ERR(reloc_root)) { @@ -824,13 +679,11 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, goto abort; } set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); return reloc_root; -fail: - kfree(root_item); + abort: - if (must_abort) - btrfs_abort_transaction(trans, ret); + btrfs_abort_transaction(trans, ret); return ERR_PTR(ret); } @@ -871,7 +724,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, */ if (root->reloc_root) { reloc_root = root->reloc_root; - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); return 0; } @@ -879,8 +732,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, * We are merging reloc roots, we do not need new reloc trees. Also * reloc trees never need their own reloc tree. */ - if (!rc->create_reloc_tree || - root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) return 0; if (!trans->reloc_reserved) { @@ -888,7 +740,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, trans->block_rsv = rc->block_rsv; clear_rsv = 1; } - reloc_root = create_reloc_root(trans, root, root->root_key.objectid); + reloc_root = create_reloc_root(trans, root, btrfs_root_id(root)); if (clear_rsv) trans->block_rsv = rsv; if (IS_ERR(reloc_root)) @@ -930,7 +782,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, btrfs_grab_root(reloc_root); /* root->reloc_root will stay until current relocation finished */ - if (fs_info->reloc_ctl->merge_reloc_tree && + if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree && btrfs_root_refs(root_item) == 0) { set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); /* @@ -955,67 +807,13 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, } /* - * helper to find first cached inode with inode number >= objectid - * in a subvolume - */ -static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) -{ - struct rb_node *node; - struct rb_node *prev; - struct btrfs_inode *entry; - struct inode *inode; - - spin_lock(&root->inode_lock); -again: - node = root->inode_tree.rb_node; - prev = NULL; - while (node) { - prev = node; - entry = rb_entry(node, struct btrfs_inode, rb_node); - - if (objectid < btrfs_ino(entry)) - node = node->rb_left; - else if (objectid > btrfs_ino(entry)) - node = node->rb_right; - else - break; - } - if (!node) { - while (prev) { - entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= btrfs_ino(entry)) { - node = prev; - break; - } - prev = rb_next(prev); - } - } - while (node) { - entry = rb_entry(node, struct btrfs_inode, rb_node); - inode = igrab(&entry->vfs_inode); - if (inode) { - spin_unlock(&root->inode_lock); - return inode; - } - - objectid = btrfs_ino(entry) + 1; - if (cond_resched_lock(&root->inode_lock)) - goto again; - - node = rb_next(node); - } - spin_unlock(&root->inode_lock); - return NULL; -} - -/* * get new location of data */ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, u64 bytenr, u64 num_bytes) { struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_file_extent_item *fi; struct extent_buffer *leaf; int ret; @@ -1024,15 +822,13 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, if (!path) return -ENOMEM; - bytenr -= BTRFS_I(reloc_inode)->index_cnt; + bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start; ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); if (ret < 0) - goto out; - if (ret > 0) { - ret = -ENOENT; - goto out; - } + return ret; + if (ret > 0) + return -ENOENT; leaf = path->nodes[0]; fi = btrfs_item_ptr(leaf, path->slots[0], @@ -1043,16 +839,11 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, btrfs_file_extent_encryption(leaf, fi) || btrfs_file_extent_other_encoding(leaf, fi)); - if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { - ret = -EINVAL; - goto out; - } + if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) + return -EINVAL; *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); - ret = 0; -out: - btrfs_free_path(path); - return ret; + return 0; } /* @@ -1068,7 +859,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key key; struct btrfs_file_extent_item *fi; - struct inode *inode = NULL; + struct btrfs_inode *inode = NULL; u64 parent; u64 bytenr; u64 new_bytenr = 0; @@ -1078,13 +869,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans, u32 i; int ret = 0; int first = 1; - int dirty = 0; if (rc->stage != UPDATE_DATA_PTRS) return 0; /* reloc trees always use full backref */ - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) parent = leaf->start; else parent = 0; @@ -1113,15 +903,15 @@ int replace_file_extents(struct btrfs_trans_handle *trans, * if we are modifying block in fs tree, wait for read_folio * to complete and drop the extent cache */ - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { + if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) { if (first) { - inode = find_next_inode(root, key.objectid); + inode = btrfs_find_first_inode(root, key.objectid); first = 0; - } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { - btrfs_add_delayed_iput(BTRFS_I(inode)); - inode = find_next_inode(root, key.objectid); + } else if (inode && btrfs_ino(inode) < key.objectid) { + btrfs_add_delayed_iput(inode); + inode = btrfs_find_first_inode(root, key.objectid); } - if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { + if (inode && btrfs_ino(inode) == key.objectid) { struct extent_state *cached_state = NULL; end = key.offset + @@ -1130,16 +920,20 @@ int replace_file_extents(struct btrfs_trans_handle *trans, fs_info->sectorsize)); WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); end--; - ret = try_lock_extent(&BTRFS_I(inode)->io_tree, - key.offset, end, - &cached_state); - if (!ret) + /* Take mmap lock to serialize with reflinks. */ + if (!down_read_trylock(&inode->i_mmap_lock)) + continue; + ret = btrfs_try_lock_extent(&inode->io_tree, key.offset, + end, &cached_state); + if (!ret) { + up_read(&inode->i_mmap_lock); continue; + } - btrfs_drop_extent_map_range(BTRFS_I(inode), - key.offset, end, true); - unlock_extent(&BTRFS_I(inode)->io_tree, - key.offset, end, &cached_state); + btrfs_drop_extent_map_range(inode, key.offset, end, true); + btrfs_unlock_extent(&inode->io_tree, key.offset, end, + &cached_state); + up_read(&inode->i_mmap_lock); } } @@ -1154,41 +948,44 @@ int replace_file_extents(struct btrfs_trans_handle *trans, } btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); - dirty = 1; key.offset -= btrfs_file_extent_offset(leaf, fi); - btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, - num_bytes, parent); - btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), - key.objectid, key.offset, - root->root_key.objectid, false); + ref.action = BTRFS_ADD_DELAYED_REF; + ref.bytenr = new_bytenr; + ref.num_bytes = num_bytes; + ref.parent = parent; + ref.owning_root = btrfs_root_id(root); + ref.ref_root = btrfs_header_owner(leaf); + btrfs_init_data_ref(&ref, key.objectid, key.offset, + btrfs_root_id(root), false); ret = btrfs_inc_extent_ref(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } - btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, - num_bytes, parent); - btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), - key.objectid, key.offset, - root->root_key.objectid, false); + ref.action = BTRFS_DROP_DELAYED_REF; + ref.bytenr = bytenr; + ref.num_bytes = num_bytes; + ref.parent = parent; + ref.owning_root = btrfs_root_id(root); + ref.ref_root = btrfs_header_owner(leaf); + btrfs_init_data_ref(&ref, key.objectid, key.offset, + btrfs_root_id(root), false); ret = btrfs_free_extent(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } } - if (dirty) - btrfs_mark_buffer_dirty(leaf); if (inode) - btrfs_add_delayed_iput(BTRFS_I(inode)); + btrfs_add_delayed_iput(inode); return ret; } -static noinline_for_stack -int memcmp_node_keys(struct extent_buffer *eb, int slot, - struct btrfs_path *path, int level) +static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb, + int slot, const struct btrfs_path *path, + int level) { struct btrfs_disk_key key1; struct btrfs_disk_key key2; @@ -1228,8 +1025,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, int ret; int slot; - ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); - ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); + ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID); + ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID); last_snapshot = btrfs_root_last_snapshot(&src->root_item); again: @@ -1266,7 +1063,7 @@ again: level = btrfs_header_level(parent); ASSERT(level >= lowest_level); - ret = btrfs_bin_search(parent, &key, &slot); + ret = btrfs_bin_search(parent, 0, &key, &slot); if (ret < 0) break; if (ret && slot > 0) @@ -1362,7 +1159,7 @@ again: * The real subtree rescan is delayed until we have new * CoW on the subtree root node before transaction commit. */ - ret = btrfs_qgroup_add_swapped_blocks(trans, dest, + ret = btrfs_qgroup_add_swapped_blocks(dest, rc->block_group, parent, slot, path->nodes[level], path->slots[level], last_snapshot); @@ -1373,49 +1170,62 @@ again: */ btrfs_set_node_blockptr(parent, slot, new_bytenr); btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); - btrfs_mark_buffer_dirty(parent); btrfs_set_node_blockptr(path->nodes[level], path->slots[level], old_bytenr); btrfs_set_node_ptr_generation(path->nodes[level], path->slots[level], old_ptr_gen); - btrfs_mark_buffer_dirty(path->nodes[level]); - btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr, - blocksize, path->nodes[level]->start); - btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, - 0, true); + ref.action = BTRFS_ADD_DELAYED_REF; + ref.bytenr = old_bytenr; + ref.num_bytes = blocksize; + ref.parent = path->nodes[level]->start; + ref.owning_root = btrfs_root_id(src); + ref.ref_root = btrfs_root_id(src); + btrfs_init_tree_ref(&ref, level - 1, 0, true); ret = btrfs_inc_extent_ref(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } - btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, - blocksize, 0); - btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0, - true); + + ref.action = BTRFS_ADD_DELAYED_REF; + ref.bytenr = new_bytenr; + ref.num_bytes = blocksize; + ref.parent = 0; + ref.owning_root = btrfs_root_id(dest); + ref.ref_root = btrfs_root_id(dest); + btrfs_init_tree_ref(&ref, level - 1, 0, true); ret = btrfs_inc_extent_ref(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } - btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr, - blocksize, path->nodes[level]->start); - btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, - 0, true); + /* We don't know the real owning_root, use 0. */ + ref.action = BTRFS_DROP_DELAYED_REF; + ref.bytenr = new_bytenr; + ref.num_bytes = blocksize; + ref.parent = path->nodes[level]->start; + ref.owning_root = 0; + ref.ref_root = btrfs_root_id(src); + btrfs_init_tree_ref(&ref, level - 1, 0, true); ret = btrfs_free_extent(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } - btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr, - blocksize, 0); - btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, - 0, true); + /* We don't know the real owning_root, use 0. */ + ref.action = BTRFS_DROP_DELAYED_REF; + ref.bytenr = old_bytenr; + ref.num_bytes = blocksize; + ref.parent = 0; + ref.owning_root = 0; + ref.ref_root = btrfs_root_id(dest); + btrfs_init_tree_ref(&ref, level - 1, 0, true); ret = btrfs_free_extent(trans, &ref); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); break; } @@ -1517,11 +1327,11 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, * [min_key, max_key) */ static int invalidate_extent_cache(struct btrfs_root *root, - struct btrfs_key *min_key, - struct btrfs_key *max_key) + const struct btrfs_key *min_key, + const struct btrfs_key *max_key) { struct btrfs_fs_info *fs_info = root->fs_info; - struct inode *inode = NULL; + struct btrfs_inode *inode = NULL; u64 objectid; u64 start, end; u64 ino; @@ -1531,23 +1341,24 @@ static int invalidate_extent_cache(struct btrfs_root *root, struct extent_state *cached_state = NULL; cond_resched(); - iput(inode); + if (inode) + iput(&inode->vfs_inode); if (objectid > max_key->objectid) break; - inode = find_next_inode(root, objectid); + inode = btrfs_find_first_inode(root, objectid); if (!inode) break; - ino = btrfs_ino(BTRFS_I(inode)); + ino = btrfs_ino(inode); if (ino > max_key->objectid) { - iput(inode); + iput(&inode->vfs_inode); break; } objectid = ino + 1; - if (!S_ISREG(inode->i_mode)) + if (!S_ISREG(inode->vfs_inode.i_mode)) continue; if (unlikely(min_key->objectid == ino)) { @@ -1580,9 +1391,9 @@ static int invalidate_extent_cache(struct btrfs_root *root, } /* the lock_extent waits for read_folio to complete */ - lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); - btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_drop_extent_map_range(inode, start, end, true); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); } return 0; } @@ -1617,7 +1428,7 @@ static int insert_dirty_subvol(struct btrfs_trans_handle *trans, int ret; /* @root must be a subvolume tree root with a valid reloc tree */ - ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); + ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID); ASSERT(reloc_root); reloc_root_item = &reloc_root->root_item; @@ -1646,7 +1457,7 @@ static int clean_dirty_subvols(struct reloc_control *rc) list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, reloc_dirty_list) { - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { + if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) { /* Merged subvolume, cleanup its reloc root */ struct btrfs_root *reloc_root = root->reloc_root; @@ -1664,7 +1475,7 @@ static int clean_dirty_subvols(struct reloc_control *rc) * ->reloc_root. If it fails however we must * drop the ref ourselves. */ - ret2 = btrfs_drop_snapshot(reloc_root, 0, 1); + ret2 = btrfs_drop_snapshot(reloc_root, false, true); if (ret2 < 0) { btrfs_put_root(reloc_root); if (!ret) @@ -1674,7 +1485,7 @@ static int clean_dirty_subvols(struct reloc_control *rc) btrfs_put_root(root); } else { /* Orphan reloc tree, just clean it up */ - ret2 = btrfs_drop_snapshot(root, 0, 1); + ret2 = btrfs_drop_snapshot(root, false, true); if (ret2 < 0) { btrfs_put_root(root); if (!ret) @@ -1717,7 +1528,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_root_level(root_item); - atomic_inc(&reloc_root->node->refs); + refcount_inc(&reloc_root->node->refs); path->nodes[level] = reloc_root->node; path->slots[level] = 0; } else { @@ -1775,7 +1586,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, * btrfs_update_reloc_root() and update our root item * appropriately. */ - reloc_root->last_trans = trans->transid; + btrfs_set_root_last_trans(reloc_root, trans->transid); trans->block_rsv = rc->block_rsv; replaced = 0; @@ -1896,11 +1707,11 @@ again: } } - rc->merge_reloc_tree = 1; + rc->merge_reloc_tree = true; while (!list_empty(&rc->reloc_roots)) { - reloc_root = list_entry(rc->reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&rc->reloc_roots, + struct btrfs_root, root_list); list_del_init(&reloc_root->root_list); root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, @@ -1916,7 +1727,39 @@ again: err = PTR_ERR(root); break; } - ASSERT(root->reloc_root == reloc_root); + + if (unlikely(root->reloc_root != reloc_root)) { + if (root->reloc_root) { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu", + btrfs_root_id(root), + btrfs_root_id(root->reloc_root), + root->reloc_root->root_key.type, + root->reloc_root->root_key.offset, + btrfs_root_generation( + &root->reloc_root->root_item), + btrfs_root_id(reloc_root), + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } else { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", + btrfs_root_id(root), + btrfs_root_id(reloc_root), + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } + list_add(&reloc_root->root_list, &reloc_roots); + btrfs_put_root(root); + btrfs_abort_transaction(trans, -EUCLEAN); + if (!err) + err = -EUCLEAN; + break; + } /* * set reference count to 1, so btrfs_recover_relocation @@ -1933,7 +1776,7 @@ again: list_add(&reloc_root->root_list, &reloc_roots); btrfs_put_root(root); - if (ret) { + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); if (!err) err = ret; @@ -1983,13 +1826,12 @@ again: while (!list_empty(&reloc_roots)) { found = 1; - reloc_root = list_entry(reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - if (IS_ERR(root)) { + if (WARN_ON(IS_ERR(root))) { /* * For recovery we read the fs roots on mount, * and if we didn't find the root then we marked @@ -1998,17 +1840,14 @@ again: * memory. However there's no reason we can't * handle the error properly here just in case. */ - ASSERT(0); ret = PTR_ERR(root); goto out; } - if (root->reloc_root != reloc_root) { + if (WARN_ON(root->reloc_root != reloc_root)) { /* - * This is actually impossible without something - * going really wrong (like weird race condition - * or cosmic rays). + * This can happen if on-disk metadata has some + * corruption, e.g. bad reloc tree key offset. */ - ASSERT(0); ret = -EINVAL; goto out; } @@ -2089,7 +1928,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root; int ret; - if (reloc_root->last_trans == trans->transid) + if (btrfs_get_root_last_trans(reloc_root) == trans->transid) return 0; root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); @@ -2103,11 +1942,11 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, * reloc root without a corresponding root this could return ENOENT. */ if (IS_ERR(root)) { - ASSERT(0); + DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root)); return PTR_ERR(root); } - if (root->reloc_root != reloc_root) { - ASSERT(0); + if (unlikely(root->reloc_root != reloc_root)) { + DEBUG_WARN("unexpected reloc root found"); btrfs_err(fs_info, "root %llu has two reloc roots associated with it", reloc_root->root_key.offset); @@ -2131,100 +1970,72 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, int index = 0; int ret; - next = node; - while (1) { - cond_resched(); - next = walk_up_backref(next, edges, &index); - root = next->root; + next = walk_up_backref(node, edges, &index); + root = next->root; - /* - * If there is no root, then our references for this block are - * incomplete, as we should be able to walk all the way up to a - * block that is owned by a root. - * - * This path is only for SHAREABLE roots, so if we come upon a - * non-SHAREABLE root then we have backrefs that resolve - * improperly. - * - * Both of these cases indicate file system corruption, or a bug - * in the backref walking code. - */ - if (!root) { - ASSERT(0); - btrfs_err(trans->fs_info, - "bytenr %llu doesn't have a backref path ending in a root", - node->bytenr); - return ERR_PTR(-EUCLEAN); - } - if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { - ASSERT(0); - btrfs_err(trans->fs_info, - "bytenr %llu has multiple refs with one ending in a non-shareable root", - node->bytenr); - return ERR_PTR(-EUCLEAN); - } - - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { - ret = record_reloc_root_in_trans(trans, root); - if (ret) - return ERR_PTR(ret); - break; - } + /* + * If there is no root, then our references for this block are + * incomplete, as we should be able to walk all the way up to a block + * that is owned by a root. + * + * This path is only for SHAREABLE roots, so if we come upon a + * non-SHAREABLE root then we have backrefs that resolve improperly. + * + * Both of these cases indicate file system corruption, or a bug in the + * backref walking code. + */ + if (unlikely(!root)) { + btrfs_err(trans->fs_info, + "bytenr %llu doesn't have a backref path ending in a root", + node->bytenr); + return ERR_PTR(-EUCLEAN); + } + if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { + btrfs_err(trans->fs_info, + "bytenr %llu has multiple refs with one ending in a non-shareable root", + node->bytenr); + return ERR_PTR(-EUCLEAN); + } - ret = btrfs_record_root_in_trans(trans, root); + if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { + ret = record_reloc_root_in_trans(trans, root); if (ret) return ERR_PTR(ret); - root = root->reloc_root; - - /* - * We could have raced with another thread which failed, so - * root->reloc_root may not be set, return ENOENT in this case. - */ - if (!root) - return ERR_PTR(-ENOENT); + goto found; + } - if (next->new_bytenr != root->node->start) { - /* - * We just created the reloc root, so we shouldn't have - * ->new_bytenr set and this shouldn't be in the changed - * list. If it is then we have multiple roots pointing - * at the same bytenr which indicates corruption, or - * we've made a mistake in the backref walking code. - */ - ASSERT(next->new_bytenr == 0); - ASSERT(list_empty(&next->list)); - if (next->new_bytenr || !list_empty(&next->list)) { - btrfs_err(trans->fs_info, - "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", - node->bytenr, next->bytenr); - return ERR_PTR(-EUCLEAN); - } + ret = btrfs_record_root_in_trans(trans, root); + if (ret) + return ERR_PTR(ret); + root = root->reloc_root; - next->new_bytenr = root->node->start; - btrfs_put_root(next->root); - next->root = btrfs_grab_root(root); - ASSERT(next->root); - list_add_tail(&next->list, - &rc->backref_cache.changed); - mark_block_processed(rc, next); - break; - } + /* + * We could have raced with another thread which failed, so + * root->reloc_root may not be set, return ENOENT in this case. + */ + if (!root) + return ERR_PTR(-ENOENT); - WARN_ON(1); - root = NULL; - next = walk_down_backref(edges, &index); - if (!next || next->level <= node->level) - break; - } - if (!root) { + if (unlikely(next->new_bytenr)) { /* - * This can happen if there's fs corruption or if there's a bug - * in the backref lookup code. + * We just created the reloc root, so we shouldn't have + * ->new_bytenr set yet. If it is then we have multiple roots + * pointing at the same bytenr which indicates corruption, or + * we've made a mistake in the backref walking code. */ - ASSERT(0); - return ERR_PTR(-ENOENT); + ASSERT(next->new_bytenr == 0); + btrfs_err(trans->fs_info, + "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", + node->bytenr, next->bytenr); + return ERR_PTR(-EUCLEAN); } + next->new_bytenr = root->node->start; + btrfs_put_root(next->root); + next->root = btrfs_grab_root(root); + ASSERT(next->root); + mark_block_processed(rc, next); +found: next = node; /* setup backref node path for btrfs_reloc_cow_block */ while (1) { @@ -2264,14 +2075,14 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node) * This can occur if we have incomplete extent refs leading all * the way up a particular path, in this case return -EUCLEAN. */ - if (!root) + if (unlikely(!root)) return ERR_PTR(-EUCLEAN); /* No other choice for non-shareable tree */ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return root; - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) + if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) fs_root = root; if (next != node) @@ -2287,9 +2098,8 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node) return fs_root; } -static noinline_for_stack -u64 calcu_metadata_size(struct reloc_control *rc, - struct btrfs_backref_node *node, int reserve) +static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc, + struct btrfs_backref_node *node) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; struct btrfs_backref_node *next = node; @@ -2298,12 +2108,12 @@ u64 calcu_metadata_size(struct reloc_control *rc, u64 num_bytes = 0; int index = 0; - BUG_ON(reserve && node->processed); + BUG_ON(node->processed); while (next) { cond_resched(); while (1) { - if (next->processed && (reserve || next != node)) + if (next->processed) break; num_bytes += fs_info->nodesize; @@ -2311,8 +2121,8 @@ u64 calcu_metadata_size(struct reloc_control *rc, if (list_empty(&next->upper)) break; - edge = list_entry(next->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&next->upper, struct btrfs_backref_edge, + list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -2321,17 +2131,11 @@ u64 calcu_metadata_size(struct reloc_control *rc, return num_bytes; } -static int reserve_metadata_space(struct btrfs_trans_handle *trans, - struct reloc_control *rc, - struct btrfs_backref_node *node) +static int refill_metadata_space(struct btrfs_trans_handle *trans, + struct reloc_control *rc, u64 num_bytes) { - struct btrfs_root *root = rc->extent_root; - struct btrfs_fs_info *fs_info = root->fs_info; - u64 num_bytes; + struct btrfs_fs_info *fs_info = trans->fs_info; int ret; - u64 tmp; - - num_bytes = calcu_metadata_size(rc, node, 1) * 2; trans->block_rsv = rc->block_rsv; rc->reserved_bytes += num_bytes; @@ -2344,7 +2148,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, BTRFS_RESERVE_FLUSH_LIMIT); if (ret) { - tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; + u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; + while (tmp <= rc->reserved_bytes) tmp <<= 1; /* @@ -2362,6 +2167,16 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, return 0; } +static int reserve_metadata_space(struct btrfs_trans_handle *trans, + struct reloc_control *rc, + struct btrfs_backref_node *node) +{ + u64 num_bytes; + + num_bytes = calcu_metadata_size(rc, node) * 2; + return refill_metadata_space(trans, rc, num_bytes); +} + /* * relocate a block tree, and then update pointers in upper level * blocks that reference the block to point to the new location. @@ -2394,8 +2209,6 @@ static int do_relocation(struct btrfs_trans_handle *trans, path->lowest_level = node->level + 1; rc->backref_cache.path[node->level] = node; list_for_each_entry(edge, &node->upper, list[LOWER]) { - struct btrfs_ref ref = { 0 }; - cond_resched(); upper = edge->node[UPPER]; @@ -2407,7 +2220,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, if (upper->eb && !upper->locked) { if (!lowest) { - ret = btrfs_bin_search(upper->eb, key, &slot); + ret = btrfs_bin_search(upper->eb, 0, key, &slot); if (ret < 0) goto next; BUG_ON(ret); @@ -2441,7 +2254,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, slot = path->slots[upper->level]; btrfs_release_path(path); } else { - ret = btrfs_bin_search(upper->eb, key, &slot); + ret = btrfs_bin_search(upper->eb, 0, key, &slot); if (ret < 0) goto next; BUG_ON(ret); @@ -2449,7 +2262,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, bytenr = btrfs_node_blockptr(upper->eb, slot); if (lowest) { - if (bytenr != node->bytenr) { + if (unlikely(bytenr != node->bytenr)) { btrfs_err(root->fs_info, "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", bytenr, node->bytenr, slot, @@ -2483,23 +2296,28 @@ static int do_relocation(struct btrfs_trans_handle *trans, */ ASSERT(node->eb == eb); } else { + struct btrfs_ref ref = { + .action = BTRFS_ADD_DELAYED_REF, + .bytenr = node->eb->start, + .num_bytes = blocksize, + .parent = upper->eb->start, + .owning_root = btrfs_header_owner(upper->eb), + .ref_root = btrfs_header_owner(upper->eb), + }; + btrfs_set_node_blockptr(upper->eb, slot, node->eb->start); btrfs_set_node_ptr_generation(upper->eb, slot, trans->transid); - btrfs_mark_buffer_dirty(upper->eb); + btrfs_mark_buffer_dirty(trans, upper->eb); - btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, - node->eb->start, blocksize, - upper->eb->start); btrfs_init_tree_ref(&ref, node->level, - btrfs_header_owner(upper->eb), - root->root_key.objectid, false); + btrfs_root_id(root), false); ret = btrfs_inc_extent_ref(trans, &ref); if (!ret) ret = btrfs_drop_subtree(trans, root, eb, upper->eb); - if (ret) + if (unlikely(ret)) btrfs_abort_transaction(trans, ret); } next: @@ -2513,7 +2331,7 @@ next: if (!ret && node->pending) { btrfs_backref_drop_node_buffer(node); - list_move_tail(&node->list, &rc->backref_cache.changed); + list_del_init(&node->list); node->pending = 0; } @@ -2550,8 +2368,8 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans, for (level = 0; level < BTRFS_MAX_LEVEL; level++) { while (!list_empty(&cache->pending[level])) { - node = list_entry(cache->pending[level].next, - struct btrfs_backref_node, list); + node = list_first_entry(&cache->pending[level], + struct btrfs_backref_node, list); list_move_tail(&node->list, &list); BUG_ON(!node->pending); @@ -2589,8 +2407,8 @@ static void update_processed_blocks(struct reloc_control *rc, if (list_empty(&next->upper)) break; - edge = list_entry(next->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&next->upper, struct btrfs_backref_edge, + list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -2602,8 +2420,8 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc) { u32 blocksize = rc->extent_root->fs_info->nodesize; - if (test_range_bit(&rc->processed_blocks, bytenr, - bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) + if (btrfs_test_range_bit(&rc->processed_blocks, bytenr, + bytenr + blocksize - 1, EXTENT_DIRTY, NULL)) return 1; return 0; } @@ -2621,7 +2439,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info, eb = read_tree_block(fs_info, block->bytenr, &check); if (IS_ERR(eb)) return PTR_ERR(eb); - if (!extent_buffer_uptodate(eb)) { + if (unlikely(!extent_buffer_uptodate(eb))) { free_extent_buffer(eb); return -EIO; } @@ -2630,7 +2448,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info, else btrfs_node_key_to_cpu(eb, &block->key, 0); free_extent_buffer(eb); - block->key_ready = 1; + block->key_ready = true; return 0; } @@ -2676,8 +2494,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, /* * This block was the root block of a root, and this is * the first time we're processing the block and thus it - * should not have had the ->new_bytenr modified and - * should have not been included on the changed list. + * should not have had the ->new_bytenr modified. * * However in the case of corruption we could have * multiple refs pointing to the same block improperly, @@ -2687,8 +2504,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, * normal user in the case of corruption. */ ASSERT(node->new_bytenr == 0); - ASSERT(list_empty(&node->list)); - if (node->new_bytenr || !list_empty(&node->list)) { + if (unlikely(node->new_bytenr)) { btrfs_err(root->fs_info, "bytenr %llu has improper references to it", node->bytenr); @@ -2711,17 +2527,12 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, btrfs_put_root(node->root); node->root = btrfs_grab_root(root); ASSERT(node->root); - list_add_tail(&node->list, &rc->backref_cache.changed); } else { - path->lowest_level = node->level; - if (root == root->fs_info->chunk_root) - btrfs_reserve_chunk_metadata(trans, false); - ret = btrfs_search_slot(trans, root, key, path, 0, 1); - btrfs_release_path(path); - if (root == root->fs_info->chunk_root) - btrfs_trans_release_chunk_metadata(trans); - if (ret > 0) - ret = 0; + btrfs_err(root->fs_info, + "bytenr %llu resolved to a non-shareable root", + node->bytenr); + ret = -EUCLEAN; + goto out; } if (!ret) update_processed_blocks(rc, node); @@ -2729,11 +2540,50 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, ret = do_relocation(trans, rc, node, key, path, 1); } out: - if (ret || node->level == 0 || node->cowonly) + if (ret || node->level == 0) btrfs_backref_cleanup_node(&rc->backref_cache, node); return ret; } +static int relocate_cowonly_block(struct btrfs_trans_handle *trans, + struct reloc_control *rc, struct tree_block *block, + struct btrfs_path *path) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *root; + u64 num_bytes; + int nr_levels; + int ret; + + root = btrfs_get_fs_root(fs_info, block->owner, true); + if (IS_ERR(root)) + return PTR_ERR(root); + + nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1; + + num_bytes = fs_info->nodesize * nr_levels; + ret = refill_metadata_space(trans, rc, num_bytes); + if (ret) { + btrfs_put_root(root); + return ret; + } + path->lowest_level = block->level; + if (root == root->fs_info->chunk_root) + btrfs_reserve_chunk_metadata(trans, false); + + ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1); + path->lowest_level = 0; + btrfs_release_path(path); + + if (root == root->fs_info->chunk_root) + btrfs_trans_release_chunk_metadata(trans); + if (ret > 0) + ret = 0; + btrfs_put_root(root); + + return ret; +} + /* * relocate a list of blocks */ @@ -2746,12 +2596,11 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct tree_block *block; struct tree_block *next; - int ret; - int err = 0; + int ret = 0; path = btrfs_alloc_path(); if (!path) { - err = -ENOMEM; + ret = -ENOMEM; goto out_free_blocks; } @@ -2766,112 +2615,79 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, /* Get first keys */ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { if (!block->key_ready) { - err = get_tree_block_key(fs_info, block); - if (err) + ret = get_tree_block_key(fs_info, block); + if (ret) goto out_free_path; } } /* Do tree relocation */ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { - node = build_backref_tree(rc, &block->key, + /* + * For COWonly blocks, or the data reloc tree, we only need to + * COW down to the block, there's no need to generate a backref + * tree. + */ + if (block->owner && + (!btrfs_is_fstree(block->owner) || + block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) { + ret = relocate_cowonly_block(trans, rc, block, path); + if (ret) + break; + continue; + } + + node = build_backref_tree(trans, rc, &block->key, block->level, block->bytenr); if (IS_ERR(node)) { - err = PTR_ERR(node); + ret = PTR_ERR(node); goto out; } ret = relocate_tree_block(trans, rc, node, &block->key, path); - if (ret < 0) { - err = ret; + if (ret < 0) break; - } } out: - err = finish_pending_nodes(trans, rc, path, err); + ret = finish_pending_nodes(trans, rc, path, ret); out_free_path: btrfs_free_path(path); out_free_blocks: free_block_list(blocks); - return err; + return ret; } -static noinline_for_stack int prealloc_file_extent_cluster( - struct btrfs_inode *inode, - struct file_extent_cluster *cluster) +static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc) { + const struct file_extent_cluster *cluster = &rc->cluster; + struct btrfs_inode *inode = BTRFS_I(rc->data_inode); u64 alloc_hint = 0; u64 start; u64 end; - u64 offset = inode->index_cnt; + u64 offset = inode->reloc_block_group_start; u64 num_bytes; int nr; int ret = 0; - u64 i_size = i_size_read(&inode->vfs_inode); u64 prealloc_start = cluster->start - offset; u64 prealloc_end = cluster->end - offset; u64 cur_offset = prealloc_start; /* - * For subpage case, previous i_size may not be aligned to PAGE_SIZE. - * This means the range [i_size, PAGE_END + 1) is filled with zeros by - * btrfs_do_readpage() call of previously relocated file cluster. + * For blocksize < folio size case (either bs < page size or large folios), + * beyond i_size, all blocks are filled with zero. * - * If the current cluster starts in the above range, btrfs_do_readpage() - * will skip the read, and relocate_one_page() will later writeback + * If the current cluster covers the above range, btrfs_do_readpage() + * will skip the read, and relocate_one_folio() will later writeback * the padding zeros as new data, causing data corruption. * - * Here we have to manually invalidate the range (i_size, PAGE_END + 1). + * Here we have to invalidate the cache covering our cluster. */ - if (!IS_ALIGNED(i_size, PAGE_SIZE)) { - struct address_space *mapping = inode->vfs_inode.i_mapping; - struct btrfs_fs_info *fs_info = inode->root->fs_info; - const u32 sectorsize = fs_info->sectorsize; - struct page *page; - - ASSERT(sectorsize < PAGE_SIZE); - ASSERT(IS_ALIGNED(i_size, sectorsize)); - - /* - * Subpage can't handle page with DIRTY but without UPTODATE - * bit as it can lead to the following deadlock: - * - * btrfs_read_folio() - * | Page already *locked* - * |- btrfs_lock_and_flush_ordered_range() - * |- btrfs_start_ordered_extent() - * |- extent_write_cache_pages() - * |- lock_page() - * We try to lock the page we already hold. - * - * Here we just writeback the whole data reloc inode, so that - * we will be ensured to have no dirty range in the page, and - * are safe to clear the uptodate bits. - * - * This shouldn't cause too much overhead, as we need to write - * the data back anyway. - */ - ret = filemap_write_and_wait(mapping); - if (ret < 0) - return ret; - - clear_extent_bits(&inode->io_tree, i_size, - round_up(i_size, PAGE_SIZE) - 1, - EXTENT_UPTODATE); - page = find_lock_page(mapping, i_size >> PAGE_SHIFT); - /* - * If page is freed we don't need to do anything then, as we - * will re-read the whole page anyway. - */ - if (page) { - btrfs_subpage_clear_uptodate(fs_info, page, i_size, - round_up(i_size, PAGE_SIZE) - i_size); - unlock_page(page); - put_page(page); - } - } + ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start, + prealloc_end); + if (ret < 0) + return ret; BUG_ON(cluster->start != cluster->boundary[0]); ret = btrfs_alloc_data_chunk_ondemand(inode, @@ -2889,45 +2705,49 @@ static noinline_for_stack int prealloc_file_extent_cluster( else end = cluster->end - offset; - lock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); num_bytes = end + 1 - start; ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, num_bytes, num_bytes, end + 1, &alloc_hint); cur_offset = end + 1; - unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); if (ret) break; } btrfs_inode_unlock(inode, 0); if (cur_offset < prealloc_end) - btrfs_free_reserved_data_space_noquota(inode->root->fs_info, - prealloc_end + 1 - cur_offset); + btrfs_free_reserved_data_space_noquota(inode, + prealloc_end + 1 - cur_offset); return ret; } -static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode, - u64 start, u64 end, u64 block_start) +static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc) { + struct btrfs_inode *inode = BTRFS_I(rc->data_inode); struct extent_map *em; struct extent_state *cached_state = NULL; + u64 offset = inode->reloc_block_group_start; + u64 start = rc->cluster.start - offset; + u64 end = rc->cluster.end - offset; int ret = 0; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) return -ENOMEM; em->start = start; em->len = end + 1 - start; - em->block_len = em->len; - em->block_start = block_start; - set_bit(EXTENT_FLAG_PINNED, &em->flags); + em->disk_bytenr = rc->cluster.start; + em->disk_num_bytes = em->len; + em->ram_bytes = em->len; + em->flags |= EXTENT_FLAG_PINNED; - lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); - ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); - free_extent_map(em); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); + ret = btrfs_replace_extent_map_range(inode, em, false); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_free_extent_map(em); return ret; } @@ -2935,7 +2755,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod /* * Allow error injection to test balance/relocation cancellation */ -noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) +noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info) { return atomic_read(&fs_info->balance_cancel_req) || atomic_read(&fs_info->reloc_cancel_req) || @@ -2943,7 +2763,7 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) } ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); -static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster, +static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster, int cluster_nr) { /* Last extent, use cluster end directly */ @@ -2954,62 +2774,91 @@ static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster, return cluster->boundary[cluster_nr + 1] - 1; } -static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, - struct file_extent_cluster *cluster, - int *cluster_nr, unsigned long page_index) +static int relocate_one_folio(struct reloc_control *rc, + struct file_ra_state *ra, + int *cluster_nr, u64 *file_offset_ret) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - u64 offset = BTRFS_I(inode)->index_cnt; - const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; + const struct file_extent_cluster *cluster = &rc->cluster; + struct inode *inode = rc->data_inode; + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + const u64 orig_file_offset = *file_offset_ret; + u64 offset = BTRFS_I(inode)->reloc_block_group_start; + const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT; + const pgoff_t index = orig_file_offset >> PAGE_SHIFT; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); - struct page *page; - u64 page_start; - u64 page_end; + struct folio *folio; + u64 folio_start; + u64 folio_end; u64 cur; int ret; + const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags); - ASSERT(page_index <= last_index); - page = find_lock_page(inode->i_mapping, page_index); - if (!page) { - page_cache_sync_readahead(inode->i_mapping, ra, NULL, - page_index, last_index + 1 - page_index); - page = find_or_create_page(inode->i_mapping, page_index, mask); - if (!page) - return -ENOMEM; + ASSERT(index <= last_index); +again: + folio = filemap_lock_folio(inode->i_mapping, index); + if (IS_ERR(folio)) { + + /* + * On relocation we're doing readahead on the relocation inode, + * but if the filesystem is backed by a RAID stripe tree we can + * get ENOENT (e.g. due to preallocated extents not being + * mapped in the RST) from the lookup. + * + * But readahead doesn't handle the error and submits invalid + * reads to the device, causing a assertion failures. + */ + if (!use_rst) + page_cache_sync_readahead(inode->i_mapping, ra, NULL, + index, last_index + 1 - index); + folio = __filemap_get_folio(inode->i_mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + mask); + if (IS_ERR(folio)) + return PTR_ERR(folio); } - ret = set_page_extent_mapped(page); - if (ret < 0) - goto release_page; - if (PageReadahead(page)) + if (folio_test_readahead(folio) && !use_rst) page_cache_async_readahead(inode->i_mapping, ra, NULL, - page_folio(page), page_index, - last_index + 1 - page_index); + folio, last_index + 1 - index); - if (!PageUptodate(page)) { - btrfs_read_folio(NULL, page_folio(page)); - lock_page(page); - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { + btrfs_read_folio(NULL, folio); + folio_lock(folio); + if (unlikely(!folio_test_uptodate(folio))) { ret = -EIO; - goto release_page; + goto release_folio; + } + if (folio->mapping != inode->i_mapping) { + folio_unlock(folio); + folio_put(folio); + goto again; } } - page_start = page_offset(page); - page_end = page_start + PAGE_SIZE - 1; + /* + * We could have lost folio private when we dropped the lock to read the + * folio above, make sure we set_folio_extent_mapped() here so we have any + * of the subpage blocksize stuff we need in place. + */ + ret = set_folio_extent_mapped(folio); + if (ret < 0) + goto release_folio; + + folio_start = folio_pos(folio); + folio_end = folio_start + folio_size(folio) - 1; /* * Start from the cluster, as for subpage case, the cluster can start - * inside the page. + * inside the folio. */ - cur = max(page_start, cluster->boundary[*cluster_nr] - offset); - while (cur <= page_end) { + cur = max(folio_start, cluster->boundary[*cluster_nr] - offset); + while (cur <= folio_end) { struct extent_state *cached_state = NULL; u64 extent_start = cluster->boundary[*cluster_nr] - offset; u64 extent_end = get_cluster_boundary_end(cluster, *cluster_nr) - offset; - u64 clamped_start = max(page_start, extent_start); - u64 clamped_end = min(page_end, extent_end); + u64 clamped_start = max(folio_start, extent_start); + u64 clamped_end = min(folio_end, extent_end); u32 clamped_len = clamped_end + 1 - clamped_start; /* Reserve metadata for this range */ @@ -3017,46 +2866,46 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, clamped_len, clamped_len, false); if (ret) - goto release_page; + goto release_folio; /* Mark the range delalloc and dirty for later writeback */ - lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, - &cached_state); + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, + clamped_end, &cached_state); ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, clamped_end, 0, &cached_state); if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, - clamped_start, clamped_end, - EXTENT_LOCKED | EXTENT_BOUNDARY, - &cached_state); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, + clamped_start, clamped_end, + EXTENT_LOCKED | EXTENT_BOUNDARY, + &cached_state); btrfs_delalloc_release_metadata(BTRFS_I(inode), clamped_len, true); btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); - goto release_page; + goto release_folio; } - btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len); + btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len); /* - * Set the boundary if it's inside the page. + * Set the boundary if it's inside the folio. * Data relocation requires the destination extents to have the * same size as the source. * EXTENT_BOUNDARY bit prevents current extent from being merged * with previous extent. */ if (in_range(cluster->boundary[*cluster_nr] - offset, - page_start, PAGE_SIZE)) { + folio_start, folio_size(folio))) { u64 boundary_start = cluster->boundary[*cluster_nr] - offset; u64 boundary_end = boundary_start + fs_info->sectorsize - 1; - set_extent_bits(&BTRFS_I(inode)->io_tree, - boundary_start, boundary_end, - EXTENT_BOUNDARY); + btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree, + boundary_start, boundary_end, + EXTENT_BOUNDARY, NULL); } - unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, + &cached_state); btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); cur += clamped_len; @@ -3068,28 +2917,29 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, break; } } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); balance_dirty_pages_ratelimited(inode->i_mapping); btrfs_throttle(fs_info); if (btrfs_should_cancel_balance(fs_info)) ret = -ECANCELED; + *file_offset_ret = folio_end + 1; return ret; -release_page: - unlock_page(page); - put_page(page); +release_folio: + folio_unlock(folio); + folio_put(folio); return ret; } -static int relocate_file_extent_cluster(struct inode *inode, - struct file_extent_cluster *cluster) +static int relocate_file_extent_cluster(struct reloc_control *rc) { - u64 offset = BTRFS_I(inode)->index_cnt; - unsigned long index; - unsigned long last_index; - struct file_ra_state *ra; + struct inode *inode = rc->data_inode; + const struct file_extent_cluster *cluster = &rc->cluster; + u64 offset = BTRFS_I(inode)->reloc_block_group_start; + u64 cur_file_offset = cluster->start - offset; + struct file_ra_state AUTO_KFREE(ra); int cluster_nr = 0; int ret = 0; @@ -3100,43 +2950,73 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!ra) return -ENOMEM; - ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster); + ret = prealloc_file_extent_cluster(rc); if (ret) - goto out; + return ret; file_ra_state_init(ra, inode->i_mapping); - ret = setup_relocation_extent_mapping(inode, cluster->start - offset, - cluster->end - offset, cluster->start); + ret = setup_relocation_extent_mapping(rc); if (ret) - goto out; + return ret; - last_index = (cluster->end - offset) >> PAGE_SHIFT; - for (index = (cluster->start - offset) >> PAGE_SHIFT; - index <= last_index && !ret; index++) - ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index); + while (cur_file_offset < cluster->end - offset) { + ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset); + if (ret) + break; + } if (ret == 0) WARN_ON(cluster_nr != cluster->nr); -out: - kfree(ra); return ret; } -static noinline_for_stack -int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, - struct file_extent_cluster *cluster) +static noinline_for_stack int relocate_data_extent(struct reloc_control *rc, + const struct btrfs_key *extent_key) { + struct inode *inode = rc->data_inode; + struct file_extent_cluster *cluster = &rc->cluster; int ret; + struct btrfs_root *root = BTRFS_I(inode)->root; if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { - ret = relocate_file_extent_cluster(inode, cluster); + ret = relocate_file_extent_cluster(rc); if (ret) return ret; cluster->nr = 0; } - if (!cluster->nr) + /* + * Under simple quotas, we set root->relocation_src_root when we find + * the extent. If adjacent extents have different owners, we can't merge + * them while relocating. Handle this by storing the owning root that + * started a cluster and if we see an extent from a different root break + * cluster formation (just like the above case of non-adjacent extents). + * + * Without simple quotas, relocation_src_root is always 0, so we should + * never see a mismatch, and it should have no effect on relocation + * clusters. + */ + if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) { + u64 tmp = root->relocation_src_root; + + /* + * root->relocation_src_root is the state that actually affects + * the preallocation we do here, so set it to the root owning + * the cluster we need to relocate. + */ + root->relocation_src_root = cluster->owning_root; + ret = relocate_file_extent_cluster(rc); + if (ret) + return ret; + cluster->nr = 0; + /* And reset it back for the current extent's owning root. */ + root->relocation_src_root = tmp; + } + + if (!cluster->nr) { cluster->start = extent_key->objectid; + cluster->owning_root = root->relocation_src_root; + } else BUG_ON(cluster->nr >= MAX_EXTENTS); cluster->end = extent_key->objectid + extent_key->offset - 1; @@ -3144,7 +3024,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, cluster->nr++; if (cluster->nr >= MAX_EXTENTS) { - ret = relocate_file_extent_cluster(inode, cluster); + ret = relocate_file_extent_cluster(rc); if (ret) return ret; cluster->nr = 0; @@ -3157,7 +3037,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, * the major work is getting the generation and level of the block */ static int add_tree_block(struct reloc_control *rc, - struct btrfs_key *extent_key, + const struct btrfs_key *extent_key, struct btrfs_path *path, struct rb_root *blocks) { @@ -3221,12 +3101,13 @@ static int add_tree_block(struct reloc_control *rc, if (type == BTRFS_TREE_BLOCK_REF_KEY) owner = btrfs_extent_inline_ref_offset(eb, iref); } - } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { - btrfs_print_v0_err(eb->fs_info); - btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); - return -EINVAL; } else { - BUG(); + btrfs_print_leaf(eb); + btrfs_err(rc->block_group->fs_info, + "unrecognized tree backref at tree block %llu slot %u", + eb->start, path->slots[0]); + btrfs_release_path(path); + return -EUCLEAN; } btrfs_release_path(path); @@ -3241,10 +3122,10 @@ static int add_tree_block(struct reloc_control *rc, block->key.objectid = rc->extent_root->fs_info->nodesize; block->key.offset = generation; block->level = level; - block->key_ready = 0; + block->key_ready = false; block->owner = owner; - rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); + rb_node = rb_simple_insert(blocks, &block->simple_node); if (rb_node) btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, -EEXIST); @@ -3260,7 +3141,7 @@ static int __add_tree_block(struct reloc_control *rc, struct rb_root *blocks) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; int ret; bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); @@ -3284,11 +3165,11 @@ again: key.offset = blocksize; } - path->search_commit_root = 1; - path->skip_locking = 1; + path->search_commit_root = true; + path->skip_locking = true; ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); if (ret < 0) - goto out; + return ret; if (ret > 0 && skinny) { if (path->slots[0]) { @@ -3315,31 +3196,29 @@ again: "tree block extent item (%llu) is not found in extent tree", bytenr); WARN_ON(1); - ret = -EINVAL; - goto out; + return -EINVAL; } - ret = add_tree_block(rc, &key, path, blocks); -out: - btrfs_free_path(path); - return ret; + return add_tree_block(rc, &key, path, blocks); } -static int delete_block_group_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group *block_group, +static int delete_block_group_cache(struct btrfs_block_group *block_group, struct inode *inode, u64 ino) { + struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; + struct btrfs_inode *btrfs_inode; int ret = 0; if (inode) goto truncate; - inode = btrfs_iget(fs_info->sb, ino, root); - if (IS_ERR(inode)) + btrfs_inode = btrfs_iget(ino, root); + if (IS_ERR(btrfs_inode)) return -ENOENT; + inode = &btrfs_inode->vfs_inode; truncate: ret = btrfs_check_trunc_cache_free_space(fs_info, @@ -3399,19 +3278,17 @@ static int delete_v1_space_cache(struct extent_buffer *leaf, } if (!found) return -ENOENT; - ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, - space_cache_ino); + ret = delete_block_group_cache(block_group, NULL, space_cache_ino); return ret; } /* * helper to find all tree blocks that reference a given data extent */ -static noinline_for_stack -int add_data_references(struct reloc_control *rc, - struct btrfs_key *extent_key, - struct btrfs_path *path, - struct rb_root *blocks) +static noinline_for_stack int add_data_references(struct reloc_control *rc, + const struct btrfs_key *extent_key, + struct btrfs_path *path, + struct rb_root *blocks) { struct btrfs_backref_walk_ctx ctx = { 0 }; struct ulist_iterator leaf_uiter; @@ -3422,7 +3299,7 @@ int add_data_references(struct reloc_control *rc, btrfs_release_path(path); ctx.bytenr = extent_key->objectid; - ctx.ignore_extent_item_pos = true; + ctx.skip_inode_ref_list = true; ctx.fs_info = rc->extent_root->fs_info; ret = btrfs_find_all_leafs(&ctx); @@ -3469,6 +3346,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, last = rc->block_group->start + rc->block_group->length; while (1) { + bool block_found; + cond_resched(); if (rc->search_start >= last) { ret = 1; @@ -3479,8 +3358,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = 0; - path->search_commit_root = 1; - path->skip_locking = 1; + path->search_commit_root = true; + path->skip_locking = true; ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); if (ret < 0) @@ -3519,11 +3398,11 @@ next: goto next; } - ret = find_first_extent_bit(&rc->processed_blocks, - key.objectid, &start, &end, - EXTENT_DIRTY, NULL); + block_found = btrfs_find_first_extent_bit(&rc->processed_blocks, + key.objectid, &start, &end, + EXTENT_DIRTY, NULL); - if (ret == 0 && start <= key.objectid) { + if (block_found && start <= key.objectid) { btrfs_release_path(path); rc->search_start = end + 1; } else { @@ -3583,7 +3462,7 @@ int prepare_to_relocate(struct reloc_control *rc) if (ret) return ret; - rc->create_reloc_tree = 1; + rc->create_reloc_tree = true; set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root); @@ -3610,7 +3489,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) struct rb_root blocks = RB_ROOT; struct btrfs_key key; struct btrfs_trans_handle *trans = NULL; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_extent_item *ei; u64 flags; int ret; @@ -3645,11 +3524,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) break; } restart: - if (update_backref_cache(trans, &rc->backref_cache)) { - btrfs_end_transaction(trans); - trans = NULL; - continue; - } + if (rc->backref_cache.last_trans != trans->transid) + btrfs_backref_release_cache(&rc->backref_cache); + rc->backref_cache.last_trans = trans->transid; ret = find_next_extent(rc, path, &key); if (ret < 0) @@ -3663,6 +3540,21 @@ restart: struct btrfs_extent_item); flags = btrfs_extent_flags(path->nodes[0], ei); + /* + * If we are relocating a simple quota owned extent item, we + * need to note the owner on the reloc data root so that when + * we allocate the replacement item, we can attribute it to the + * correct eventual owner (rather than the reloc data root). + */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { + struct btrfs_root *root = BTRFS_I(rc->data_inode)->root; + u64 owning_root_id = btrfs_get_extent_owner_root(fs_info, + path->nodes[0], + path->slots[0]); + + root->relocation_src_root = owning_root_id; + } + if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { ret = add_tree_block(rc, &key, path, &blocks); } else if (rc->stage == UPDATE_DATA_PTRS && @@ -3695,9 +3587,8 @@ restart: if (rc->stage == MOVE_DATA_EXTENTS && (flags & BTRFS_EXTENT_FLAG_DATA)) { - rc->found_file_extent = 1; - ret = relocate_data_extent(rc->data_inode, - &key, &rc->cluster); + rc->found_file_extent = true; + ret = relocate_data_extent(rc, &key); if (ret < 0) { err = ret; break; @@ -3718,7 +3609,7 @@ restart: } btrfs_release_path(path); - clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); + btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL); if (trans) { btrfs_end_transaction_throttle(trans); @@ -3726,13 +3617,12 @@ restart: } if (!err) { - ret = relocate_file_extent_cluster(rc->data_inode, - &rc->cluster); + ret = relocate_file_extent_cluster(rc); if (ret < 0) err = ret; } - rc->create_reloc_tree = 0; + rc->create_reloc_tree = false; set_reloc_control(rc); btrfs_backref_release_cache(&rc->backref_cache); @@ -3750,7 +3640,7 @@ restart: merge_reloc_roots(rc); - rc->merge_reloc_tree = 0; + rc->merge_reloc_tree = false; unset_reloc_control(rc); btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); @@ -3768,14 +3658,13 @@ out_free: if (ret < 0 && !err) err = ret; btrfs_free_block_rsv(fs_info, rc->block_rsv); - btrfs_free_path(path); return err; } static int __insert_orphan_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid) { - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_inode_item *item; struct extent_buffer *leaf; int ret; @@ -3786,7 +3675,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_inode(trans, root, path, objectid); if (ret) - goto out; + return ret; leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); @@ -3796,16 +3685,13 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC); - btrfs_mark_buffer_dirty(leaf); -out: - btrfs_free_path(path); - return ret; + return 0; } static void delete_orphan_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid) { - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; int ret = 0; @@ -3828,22 +3714,21 @@ static void delete_orphan_inode(struct btrfs_trans_handle *trans, out: if (ret) btrfs_abort_transaction(trans, ret); - btrfs_free_path(path); } /* * helper to create inode for data relocation. * the inode is in data relocation tree and its link count is 0 */ -static noinline_for_stack -struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, - struct btrfs_block_group *group) +static noinline_for_stack struct inode *create_reloc_inode( + const struct btrfs_block_group *group) { - struct inode *inode = NULL; + struct btrfs_fs_info *fs_info = group->fs_info; + struct btrfs_inode *inode = NULL; struct btrfs_trans_handle *trans; struct btrfs_root *root; u64 objectid; - int err = 0; + int ret = 0; root = btrfs_grab_root(fs_info->data_reloc_root); trans = btrfs_start_transaction(root, 6); @@ -3852,38 +3737,40 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, return ERR_CAST(trans); } - err = btrfs_get_free_objectid(root, &objectid); - if (err) + ret = btrfs_get_free_objectid(root, &objectid); + if (ret) goto out; - err = __insert_orphan_inode(trans, root, objectid); - if (err) + ret = __insert_orphan_inode(trans, root, objectid); + if (ret) goto out; - inode = btrfs_iget(fs_info->sb, objectid, root); + inode = btrfs_iget(objectid, root); if (IS_ERR(inode)) { delete_orphan_inode(trans, root, objectid); - err = PTR_ERR(inode); + ret = PTR_ERR(inode); inode = NULL; goto out; } - BTRFS_I(inode)->index_cnt = group->start; + inode->reloc_block_group_start = group->start; - err = btrfs_orphan_add(trans, BTRFS_I(inode)); + ret = btrfs_orphan_add(trans, inode); out: btrfs_put_root(root); btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); - if (err) { - iput(inode); - inode = ERR_PTR(err); + if (ret) { + if (inode) + iput(&inode->vfs_inode); + return ERR_PTR(ret); } - return inode; + return &inode->vfs_inode; } /* * Mark start of chunk relocation that is cancellable. Check if the cancellation * has been requested meanwhile and don't start in that case. + * NOTE: if this returns an error, reloc_chunk_end() must not be called. * * Return: * 0 success @@ -3900,10 +3787,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) if (atomic_read(&fs_info->reloc_cancel_req) > 0) { btrfs_info(fs_info, "chunk relocation canceled on start"); - /* - * On cancel, clear all requests but let the caller mark - * the end after cleanup operations. - */ + /* On cancel, clear all requests. */ + clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); atomic_set(&fs_info->reloc_cancel_req, 0); return -ECANCELED; } @@ -3912,9 +3797,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) /* * Mark end of chunk relocation that is cancellable and wake any waiters. + * NOTE: call only if a previous call to reloc_chunk_start() succeeded. */ static void reloc_chunk_end(struct btrfs_fs_info *fs_info) { + ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)); /* Requested after start, clear bit first so any waiters can continue */ if (atomic_read(&fs_info->reloc_cancel_req) > 0) btrfs_info(fs_info, "chunk relocation canceled during operation"); @@ -3932,9 +3819,10 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&rc->reloc_roots); INIT_LIST_HEAD(&rc->dirty_subvol_roots); - btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); - mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); + btrfs_backref_init_cache(fs_info, &rc->backref_cache, true); + rc->reloc_root_tree.rb_root = RB_ROOT; + spin_lock_init(&rc->reloc_root_tree.lock); + btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); return rc; } @@ -3953,19 +3841,17 @@ static void free_reloc_control(struct reloc_control *rc) /* * Print the block group being relocated */ -static void describe_relocation(struct btrfs_fs_info *fs_info, - struct btrfs_block_group *block_group) +static void describe_relocation(struct btrfs_block_group *block_group) { - char buf[128] = {'\0'}; + char buf[128] = "NONE"; btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); - btrfs_info(fs_info, - "relocating block group %llu flags %s", + btrfs_info(block_group->fs_info, "relocating block group %llu flags %s", block_group->start, buf); } -static const char *stage_to_string(int stage) +static const char *stage_to_string(enum reloc_stage stage) { if (stage == MOVE_DATA_EXTENTS) return "move data extents"; @@ -3977,7 +3863,8 @@ static const char *stage_to_string(int stage) /* * function to relocate all extents in a block group. */ -int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) +int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start, + bool verbose) { struct btrfs_block_group *bg; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); @@ -3985,8 +3872,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) struct inode *inode; struct btrfs_path *path; int ret; - int rw = 0; - int err = 0; + bool bg_is_ro = false; /* * This only gets set if we had a half-deleted snapshot on mount. We @@ -4028,24 +3914,20 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) } ret = reloc_chunk_start(fs_info); - if (ret < 0) { - err = ret; + if (ret < 0) goto out_put_bg; - } rc->extent_root = extent_root; rc->block_group = bg; ret = btrfs_inc_block_group_ro(rc->block_group, true); - if (ret) { - err = ret; + if (ret) goto out; - } - rw = 1; + bg_is_ro = true; path = btrfs_alloc_path(); if (!path) { - err = -ENOMEM; + ret = -ENOMEM; goto out; } @@ -4053,41 +3935,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) btrfs_free_path(path); if (!IS_ERR(inode)) - ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); + ret = delete_block_group_cache(rc->block_group, inode, 0); else ret = PTR_ERR(inode); - if (ret && ret != -ENOENT) { - err = ret; + if (ret && ret != -ENOENT) goto out; - } - rc->data_inode = create_reloc_inode(fs_info, rc->block_group); + rc->data_inode = create_reloc_inode(rc->block_group); if (IS_ERR(rc->data_inode)) { - err = PTR_ERR(rc->data_inode); + ret = PTR_ERR(rc->data_inode); rc->data_inode = NULL; goto out; } - describe_relocation(fs_info, rc->block_group); + if (verbose) + describe_relocation(rc->block_group); btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_nocow_writers(rc->block_group); - btrfs_wait_ordered_roots(fs_info, U64_MAX, - rc->block_group->start, - rc->block_group->length); + btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group); ret = btrfs_zone_finish(rc->block_group); WARN_ON(ret && ret != -EAGAIN); while (1) { - int finishes_stage; + enum reloc_stage finishes_stage; mutex_lock(&fs_info->cleaner_mutex); ret = relocate_block_group(rc); mutex_unlock(&fs_info->cleaner_mutex); - if (ret < 0) - err = ret; finishes_stage = rc->stage; /* @@ -4100,37 +3977,41 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) * out of the loop if we hit an error. */ if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { - ret = btrfs_wait_ordered_range(rc->data_inode, 0, - (u64)-1); - if (ret) - err = ret; + int wb_ret; + + wb_ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0, + (u64)-1); + if (wb_ret && ret == 0) + ret = wb_ret; invalidate_mapping_pages(rc->data_inode->i_mapping, 0, -1); rc->stage = UPDATE_DATA_PTRS; } - if (err < 0) + if (ret < 0) goto out; if (rc->extents_found == 0) break; - btrfs_info(fs_info, "found %llu extents, stage: %s", - rc->extents_found, stage_to_string(finishes_stage)); + if (verbose) + btrfs_info(fs_info, "found %llu extents, stage: %s", + rc->extents_found, + stage_to_string(finishes_stage)); } WARN_ON(rc->block_group->pinned > 0); WARN_ON(rc->block_group->reserved > 0); WARN_ON(rc->block_group->used > 0); out: - if (err && rw) + if (ret && bg_is_ro) btrfs_dec_block_group_ro(rc->block_group); iput(rc->data_inode); + reloc_chunk_end(fs_info); out_put_bg: btrfs_put_block_group(bg); - reloc_chunk_end(fs_info); free_reloc_control(rc); - return err; + return ret; } static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) @@ -4172,8 +4053,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) struct extent_buffer *leaf; struct reloc_control *rc = NULL; struct btrfs_trans_handle *trans; - int ret; - int err = 0; + int ret2; + int ret = 0; path = btrfs_alloc_path(); if (!path) @@ -4187,15 +4068,14 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) while (1) { ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); - if (ret < 0) { - err = ret; + if (ret < 0) goto out; - } if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } + ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); @@ -4206,7 +4086,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); if (IS_ERR(reloc_root)) { - err = PTR_ERR(reloc_root); + ret = PTR_ERR(reloc_root); goto out; } @@ -4218,15 +4098,12 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) reloc_root->root_key.offset, false); if (IS_ERR(fs_root)) { ret = PTR_ERR(fs_root); - if (ret != -ENOENT) { - err = ret; + if (ret != -ENOENT) goto out; - } ret = mark_garbage_root(reloc_root); - if (ret < 0) { - err = ret; + if (ret < 0) goto out; - } + ret = 0; } else { btrfs_put_root(fs_root); } @@ -4244,15 +4121,13 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) rc = alloc_reloc_control(fs_info); if (!rc) { - err = -ENOMEM; + ret = -ENOMEM; goto out; } ret = reloc_chunk_start(fs_info); - if (ret < 0) { - err = ret; + if (ret < 0) goto out_end; - } rc->extent_root = btrfs_extent_root(fs_info, 0); @@ -4260,15 +4135,14 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); goto out_unset; } - rc->merge_reloc_tree = 1; + rc->merge_reloc_tree = true; while (!list_empty(&reloc_roots)) { - reloc_root = list_entry(reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); list_del(&reloc_root->root_list); if (btrfs_root_refs(&reloc_root->root_item) == 0) { @@ -4280,15 +4154,15 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); if (IS_ERR(fs_root)) { - err = PTR_ERR(fs_root); + ret = PTR_ERR(fs_root); list_add_tail(&reloc_root->root_list, &reloc_roots); btrfs_end_transaction(trans); goto out_unset; } - err = __add_reloc_root(reloc_root); - ASSERT(err != -EEXIST); - if (err) { + ret = __add_reloc_root(reloc_root); + ASSERT(ret != -EEXIST); + if (ret) { list_add_tail(&reloc_root->root_list, &reloc_roots); btrfs_put_root(fs_root); btrfs_end_transaction(trans); @@ -4298,8 +4172,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) btrfs_put_root(fs_root); } - err = btrfs_commit_transaction(trans); - if (err) + ret = btrfs_commit_transaction(trans); + if (ret) goto out_unset; merge_reloc_roots(rc); @@ -4308,32 +4182,32 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); goto out_clean; } - err = btrfs_commit_transaction(trans); + ret = btrfs_commit_transaction(trans); out_clean: - ret = clean_dirty_subvols(rc); - if (ret < 0 && !err) - err = ret; + ret2 = clean_dirty_subvols(rc); + if (ret2 < 0 && !ret) + ret = ret2; out_unset: unset_reloc_control(rc); -out_end: reloc_chunk_end(fs_info); +out_end: free_reloc_control(rc); out: free_reloc_roots(&reloc_roots); btrfs_free_path(path); - if (err == 0) { + if (ret == 0) { /* cleanup orphan inode in data relocation tree */ fs_root = btrfs_grab_root(fs_info->data_reloc_root); ASSERT(fs_root); - err = btrfs_orphan_cleanup(fs_root); + ret = btrfs_orphan_cleanup(fs_root); btrfs_put_root(fs_root); } - return err; + return ret; } /* @@ -4342,29 +4216,27 @@ out: * cloning checksum properly handles the nodatasum extents. * it also saves CPU time to re-calculate the checksum. */ -int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) +int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered) { + struct btrfs_inode *inode = ordered->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; - struct btrfs_root *csum_root; - struct btrfs_ordered_sum *sums; - struct btrfs_ordered_extent *ordered; - int ret; - u64 disk_bytenr; - u64 new_bytenr; + u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start; + struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr); LIST_HEAD(list); + int ret; - ordered = btrfs_lookup_ordered_extent(inode, file_pos); - BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); - - disk_bytenr = file_pos + inode->index_cnt; - csum_root = btrfs_csum_root(fs_info, disk_bytenr); ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, - disk_bytenr + len - 1, &list, 0, false); - if (ret) - goto out; + disk_bytenr + ordered->num_bytes - 1, + &list, false); + if (ret < 0) { + btrfs_mark_ordered_extent_error(ordered); + return ret; + } while (!list_empty(&list)) { - sums = list_entry(list.next, struct btrfs_ordered_sum, list); + struct btrfs_ordered_sum *sums = + list_first_entry(&list, struct btrfs_ordered_sum, list); + list_del_init(&sums->list); /* @@ -4379,18 +4251,16 @@ int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) * disk_len vs real len like with real inodes since it's all * disk length. */ - new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; - sums->bytenr = new_bytenr; - + sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr; btrfs_add_ordered_sum(ordered, sums); } -out: - btrfs_put_ordered_extent(ordered); - return ret; + + return 0; } int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct extent_buffer *buf, + struct btrfs_root *root, + const struct extent_buffer *buf, struct extent_buffer *cow) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -4411,16 +4281,25 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, btrfs_root_last_snapshot(&root->root_item)) first_cow = 1; - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && - rc->create_reloc_tree) { + if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) { WARN_ON(!first_cow && level == 0); node = rc->backref_cache.path[level]; - BUG_ON(node->bytenr != buf->start && - node->new_bytenr != buf->start); + + /* + * If node->bytenr != buf->start and node->new_bytenr != + * buf->start then we've got the wrong backref node for what we + * expected to see here and the cache is incorrect. + */ + if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) { + btrfs_err(fs_info, +"bytenr %llu was found but our backref cache was expecting %llu or %llu", + buf->start, node->bytenr, node->new_bytenr); + return -EUCLEAN; + } btrfs_backref_drop_node_buffer(node); - atomic_inc(&cow->refs); + refcount_inc(&cow->refs); node->eb = cow; node->new_bytenr = cow->start; @@ -4505,8 +4384,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, } new_root = pending->snap; - reloc_root = create_reloc_root(trans, root->reloc_root, - new_root->root_key.objectid); + reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root)); if (IS_ERR(reloc_root)) return PTR_ERR(reloc_root); @@ -4518,8 +4396,21 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, return ret; } new_root->reloc_root = btrfs_grab_root(reloc_root); + return 0; +} - if (rc->create_reloc_tree) - ret = clone_backref_node(trans, rc, root, reloc_root); - return ret; +/* + * Get the current bytenr for the block group which is being relocated. + * + * Return U64_MAX if no running relocation. + */ +u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info) +{ + u64 logical = U64_MAX; + + lockdep_assert_held(&fs_info->reloc_mutex); + + if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group) + logical = fs_info->reloc_ctl->block_group->start; + return logical; } |
