diff options
Diffstat (limited to 'fs/btrfs/qgroup.c')
| -rw-r--r-- | fs/btrfs/qgroup.c | 3790 |
1 files changed, 2798 insertions, 992 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 4ce351efe281..9e2b53e90dcb 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1,19 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011 STRATO. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> @@ -24,6 +11,7 @@ #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/btrfs.h> +#include <linux/sched/mm.h> #include "ctree.h" #include "transaction.h" @@ -33,20 +21,110 @@ #include "backref.h" #include "extent_io.h" #include "qgroup.h" +#include "block-group.h" +#include "sysfs.h" +#include "tree-mod-log.h" +#include "fs.h" +#include "accessors.h" +#include "extent-tree.h" +#include "root-tree.h" +#include "tree-checker.h" + +enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info) +{ + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return BTRFS_QGROUP_MODE_DISABLED; + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) + return BTRFS_QGROUP_MODE_SIMPLE; + return BTRFS_QGROUP_MODE_FULL; +} +bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info) +{ + return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; +} + +bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info) +{ + return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; +} -/* TODO XXX FIXME - * - subvol delete -> delete when ref goes to 0? delete limits also? - * - reorganize keys - * - compressed - * - sync - * - copy also limits on subvol creation - * - limit - * - caches fuer ulists - * - performance benchmarks - * - check all ioctl parameters +/* + * Helpers to access qgroup reservation + * + * Callers should ensure the lock context and type are valid */ +static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) +{ + u64 ret = 0; + int i; + + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) + ret += qgroup->rsv.values[i]; + + return ret; +} + +#ifdef CONFIG_BTRFS_DEBUG +static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) +{ + if (type == BTRFS_QGROUP_RSV_DATA) + return "data"; + if (type == BTRFS_QGROUP_RSV_META_PERTRANS) + return "meta_pertrans"; + if (type == BTRFS_QGROUP_RSV_META_PREALLOC) + return "meta_prealloc"; + return NULL; +} +#endif + +static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup, u64 num_bytes, + enum btrfs_qgroup_rsv_type type) +{ + trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); + qgroup->rsv.values[type] += num_bytes; +} + +static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup, u64 num_bytes, + enum btrfs_qgroup_rsv_type type) +{ + trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); + if (qgroup->rsv.values[type] >= num_bytes) { + qgroup->rsv.values[type] -= num_bytes; + return; + } +#ifdef CONFIG_BTRFS_DEBUG + WARN_RATELIMIT(1, + "qgroup %llu %s reserved space underflow, have %llu to free %llu", + qgroup->qgroupid, qgroup_rsv_type_str(type), + qgroup->rsv.values[type], num_bytes); +#endif + qgroup->rsv.values[type] = 0; +} + +static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *dest, + const struct btrfs_qgroup *src) +{ + int i; + + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) + qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); +} + +static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *dest, + const struct btrfs_qgroup *src) +{ + int i; + + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) + qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); +} + static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, int mod) { @@ -63,97 +141,86 @@ static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, qg->new_refcnt += mod; } -static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) +static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq) { if (qg->old_refcnt < seq) return 0; return qg->old_refcnt - seq; } -static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) +static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq) { if (qg->new_refcnt < seq) return 0; return qg->new_refcnt - seq; } -/* - * glue structure to represent the relations between qgroups. - */ -struct btrfs_qgroup_list { - struct list_head next_group; - struct list_head next_member; - struct btrfs_qgroup *group; - struct btrfs_qgroup *member; -}; - -static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) -{ - return (u64)(uintptr_t)qg; -} - -static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) -{ - return (struct btrfs_qgroup *)(uintptr_t)n->aux; -} - static int qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, int init_flags); static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); +static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *qgroupid = key; + const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node); + + if (qgroup->qgroupid < *qgroupid) + return -1; + else if (qgroup->qgroupid > *qgroupid) + return 1; + + return 0; +} + /* must be called with qgroup_ioctl_lock held */ -static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, +static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info, u64 qgroupid) { - struct rb_node *n = fs_info->qgroup_tree.rb_node; - struct btrfs_qgroup *qgroup; + struct rb_node *node; - while (n) { - qgroup = rb_entry(n, struct btrfs_qgroup, node); - if (qgroup->qgroupid < qgroupid) - n = n->rb_left; - else if (qgroup->qgroupid > qgroupid) - n = n->rb_right; - else - return qgroup; - } - return NULL; + node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp); + return rb_entry_safe(node, struct btrfs_qgroup, node); } -/* must be called with qgroup_lock held */ +static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node); + + return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing); +} + +/* + * Add qgroup to the filesystem's qgroup tree. + * + * Must be called with qgroup_lock held and @prealloc preallocated. + * + * The control on the lifespan of @prealloc would be transferred to this + * function, thus caller should no longer touch @prealloc. + */ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *prealloc, u64 qgroupid) { - struct rb_node **p = &fs_info->qgroup_tree.rb_node; - struct rb_node *parent = NULL; - struct btrfs_qgroup *qgroup; + struct rb_node *node; - while (*p) { - parent = *p; - qgroup = rb_entry(parent, struct btrfs_qgroup, node); + /* Caller must have pre-allocated @prealloc. */ + ASSERT(prealloc); - if (qgroup->qgroupid < qgroupid) - p = &(*p)->rb_left; - else if (qgroup->qgroupid > qgroupid) - p = &(*p)->rb_right; - else - return qgroup; + prealloc->qgroupid = qgroupid; + node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp); + if (node) { + kfree(prealloc); + return rb_entry(node, struct btrfs_qgroup, node); } - qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); - if (!qgroup) - return ERR_PTR(-ENOMEM); - - qgroup->qgroupid = qgroupid; - INIT_LIST_HEAD(&qgroup->groups); - INIT_LIST_HEAD(&qgroup->members); - INIT_LIST_HEAD(&qgroup->dirty); + INIT_LIST_HEAD(&prealloc->groups); + INIT_LIST_HEAD(&prealloc->members); + INIT_LIST_HEAD(&prealloc->dirty); + INIT_LIST_HEAD(&prealloc->iterator); + INIT_LIST_HEAD(&prealloc->nested_iterator); - rb_link_node(&qgroup->node, parent, p); - rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); - - return qgroup; + return prealloc; } static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) @@ -176,7 +243,6 @@ static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) list_del(&list->next_member); kfree(list); } - kfree(qgroup); } /* must be called with qgroup_lock held */ @@ -192,32 +258,56 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) return 0; } -/* must be called with qgroup_lock held */ +/* + * Add relation specified by two qgroups. + * + * Must be called with qgroup_lock held, the ownership of @prealloc is + * transferred to this function and caller should not touch it anymore. + * + * Return: 0 on success + * -ENOENT if one of the qgroups is NULL + * <0 other errors + */ +static int __add_relation_rb(struct btrfs_qgroup_list *prealloc, + struct btrfs_qgroup *member, + struct btrfs_qgroup *parent) +{ + if (!member || !parent) { + kfree(prealloc); + return -ENOENT; + } + + prealloc->group = parent; + prealloc->member = member; + list_add_tail(&prealloc->next_group, &member->groups); + list_add_tail(&prealloc->next_member, &parent->members); + + return 0; +} + +/* + * Add relation specified by two qgroup ids. + * + * Must be called with qgroup_lock held. + * + * Return: 0 on success + * -ENOENT if one of the ids does not exist + * <0 other errors + */ static int add_relation_rb(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup_list *prealloc, u64 memberid, u64 parentid) { struct btrfs_qgroup *member; struct btrfs_qgroup *parent; - struct btrfs_qgroup_list *list; member = find_qgroup_rb(fs_info, memberid); parent = find_qgroup_rb(fs_info, parentid); - if (!member || !parent) - return -ENOENT; - list = kzalloc(sizeof(*list), GFP_ATOMIC); - if (!list) - return -ENOMEM; - - list->group = parent; - list->member = member; - list_add_tail(&list->next_group, &member->groups); - list_add_tail(&list->next_member, &parent->members); - - return 0; + return __add_relation_rb(prealloc, member, parent); } -/* must be called with qgroup_lock held */ +/* Must be called with qgroup_lock held */ static int del_relation_rb(struct btrfs_fs_info *fs_info, u64 memberid, u64 parentid) { @@ -242,7 +332,7 @@ static int del_relation_rb(struct btrfs_fs_info *fs_info, } #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS -int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, +int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid, u64 rfer, u64 excl) { struct btrfs_qgroup *qgroup; @@ -256,6 +346,38 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, } #endif +__printf(2, 3) +static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...) +{ + const u64 old_flags = fs_info->qgroup_flags; + + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) + return; + fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | + BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | + BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); + if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf); + va_end(args); + } +} + +static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, + struct extent_buffer *leaf, int slot, + struct btrfs_qgroup_status_item *ptr) +{ + ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); + ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr)); + fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr); +} + /* * The full config is read in one go, only called from open_ctree() * It doesn't use any locking, as at this point we're still single-threaded @@ -272,21 +394,18 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) u64 flags = 0; u64 rescan_progress = 0; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + if (!fs_info->quota_root) return 0; - fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); - if (!fs_info->qgroup_ulist) { - ret = -ENOMEM; - goto out; - } - path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } + ret = btrfs_sysfs_add_qgroups(fs_info); + if (ret < 0) + goto out; /* default this to quota off, in case no status key is found */ fs_info->qgroup_flags = 0; @@ -319,14 +438,11 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) "old qgroup version, quota disabled"); goto out; } - if (btrfs_qgroup_status_generation(l, ptr) != - fs_info->generation) { - flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_err(fs_info, - "qgroup generation mismatch, marked as inconsistent"); - } - fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, - ptr); + fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) + qgroup_read_enable_gen(fs_info, l, slot, ptr); + else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) + qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch"); rescan_progress = btrfs_qgroup_status_rescan(l, ptr); goto next1; } @@ -337,17 +453,42 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) qgroup = find_qgroup_rb(fs_info, found_key.offset); if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || - (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { - btrfs_err(fs_info, "inconsistent qgroup config"); - flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - } + (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) + qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config"); if (!qgroup) { - qgroup = add_qgroup_rb(fs_info, found_key.offset); - if (IS_ERR(qgroup)) { - ret = PTR_ERR(qgroup); + struct btrfs_qgroup *prealloc; + struct btrfs_root *tree_root = fs_info->tree_root; + + prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); + if (!prealloc) { + ret = -ENOMEM; goto out; } + qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); + /* + * If a qgroup exists for a subvolume ID, it is possible + * that subvolume has been deleted, in which case + * reusing that ID would lead to incorrect accounting. + * + * Ensure that we skip any such subvol ids. + * + * We don't need to lock because this is only called + * during mount before we start doing things like creating + * subvolumes. + */ + if (btrfs_is_fstree(qgroup->qgroupid) && + qgroup->qgroupid > tree_root->free_objectid) + /* + * Don't need to check against BTRFS_LAST_FREE_OBJECTID, + * as it will get checked on the next call to + * btrfs_get_free_objectid. + */ + tree_root->free_objectid = qgroup->qgroupid + 1; } + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (ret < 0) + goto out; + switch (found_key.type) { case BTRFS_QGROUP_INFO_KEY: { struct btrfs_qgroup_info_item *ptr; @@ -393,6 +534,8 @@ next1: if (ret) goto out; while (1) { + struct btrfs_qgroup_list *list = NULL; + slot = path->slots[0]; l = path->nodes[0]; btrfs_item_key_to_cpu(l, &found_key, slot); @@ -406,8 +549,14 @@ next1: goto next2; } - ret = add_relation_rb(fs_info, found_key.objectid, + list = kzalloc(sizeof(*list), GFP_KERNEL); + if (!list) { + ret = -ENOMEM; + goto out; + } + ret = add_relation_rb(fs_info, list, found_key.objectid, found_key.offset); + list = NULL; if (ret == -ENOENT) { btrfs_warn(fs_info, "orphan qgroup relation 0x%llx->0x%llx", @@ -424,54 +573,94 @@ next2: break; } out: - fs_info->qgroup_flags |= flags; - if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) - clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && - ret >= 0) - ret = qgroup_rescan_init(fs_info, rescan_progress, 0); btrfs_free_path(path); - - if (ret < 0) { - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; + fs_info->qgroup_flags |= flags; + if (ret >= 0) { + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON) + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) + ret = qgroup_rescan_init(fs_info, rescan_progress, 0); + } else { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; + btrfs_sysfs_del_qgroups(fs_info); } return ret < 0 ? ret : 0; } /* + * Called in close_ctree() when quota is still enabled. This verifies we don't + * leak some reserved space. + * + * Return false if no reserved space is left. + * Return true if some reserved space is leaked. + */ +bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info) +{ + struct rb_node *node; + bool ret = false; + + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) + return ret; + /* + * Since we're unmounting, there is no race and no need to grab qgroup + * lock. And here we don't go post-order to provide a more user + * friendly sorted result. + */ + for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { + struct btrfs_qgroup *qgroup; + int i; + + qgroup = rb_entry(node, struct btrfs_qgroup, node); + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { + if (qgroup->rsv.values[i]) { + ret = true; + btrfs_warn(fs_info, + "qgroup %hu/%llu has unreleased space, type %d rsv %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + i, qgroup->rsv.values[i]); + } + } + } + return ret; +} + +/* * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), - * first two are in single-threaded paths.And for the third one, we have set - * quota_root to be null with qgroup_lock held before, so it is safe to clean - * up the in-memory structures without qgroup_lock held. + * first two are in single-threaded paths. */ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) { struct rb_node *n; struct btrfs_qgroup *qgroup; + /* + * btrfs_quota_disable() can be called concurrently with + * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the + * lock. + */ + spin_lock(&fs_info->qgroup_lock); while ((n = rb_first(&fs_info->qgroup_tree))) { qgroup = rb_entry(n, struct btrfs_qgroup, node); rb_erase(n, &fs_info->qgroup_tree); __del_qgroup_rb(qgroup); + spin_unlock(&fs_info->qgroup_lock); + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); + kfree(qgroup); + spin_lock(&fs_info->qgroup_lock); } - /* - * we call btrfs_free_qgroup_config() when umounting - * filesystem and disabling quota, so we set qgroup_ulist - * to be null here to avoid double free. - */ - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; + spin_unlock(&fs_info->qgroup_lock); + + btrfs_sysfs_del_qgroups(fs_info); } -static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, - struct btrfs_root *quota_root, - u64 src, u64 dst) +static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, + u64 dst) { int ret; - struct btrfs_path *path; + struct btrfs_root *quota_root = trans->fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; path = btrfs_alloc_path(); @@ -483,19 +672,15 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, key.offset = dst; ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); - - btrfs_mark_buffer_dirty(path->nodes[0]); - - btrfs_free_path(path); return ret; } -static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, - struct btrfs_root *quota_root, - u64 src, u64 dst) +static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, + u64 dst) { int ret; - struct btrfs_path *path; + struct btrfs_root *quota_root = trans->fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; path = btrfs_alloc_path(); @@ -508,24 +693,19 @@ static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); if (ret < 0) - goto out; + return ret; - if (ret > 0) { - ret = -ENOENT; - goto out; - } + if (ret > 0) + return -ENOENT; - ret = btrfs_del_item(trans, quota_root, path); -out: - btrfs_free_path(path); - return ret; + return btrfs_del_item(trans, quota_root, path); } static int add_qgroup_item(struct btrfs_trans_handle *trans, struct btrfs_root *quota_root, u64 qgroupid) { int ret; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_qgroup_info_item *qgroup_info; struct btrfs_qgroup_limit_item *qgroup_limit; struct extent_buffer *leaf; @@ -551,7 +731,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, quota_root, path, &key, sizeof(*qgroup_info)); if (ret && ret != -EEXIST) - goto out; + return ret; leaf = path->nodes[0]; qgroup_info = btrfs_item_ptr(leaf, path->slots[0], @@ -562,15 +742,13 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans, btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); - btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(path); key.type = BTRFS_QGROUP_LIMIT_KEY; ret = btrfs_insert_empty_item(trans, quota_root, path, &key, sizeof(*qgroup_limit)); if (ret && ret != -EEXIST) - goto out; + return ret; leaf = path->nodes[0]; qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], @@ -581,19 +759,14 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans, btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); - btrfs_mark_buffer_dirty(leaf); - - ret = 0; -out: - btrfs_free_path(path); - return ret; + return 0; } -static int del_qgroup_item(struct btrfs_trans_handle *trans, - struct btrfs_root *quota_root, u64 qgroupid) +static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) { int ret; - struct btrfs_path *path; + struct btrfs_root *quota_root = trans->fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; path = btrfs_alloc_path(); @@ -605,41 +778,35 @@ static int del_qgroup_item(struct btrfs_trans_handle *trans, key.offset = qgroupid; ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); if (ret < 0) - goto out; + return ret; - if (ret > 0) { - ret = -ENOENT; - goto out; - } + if (ret > 0) + return -ENOENT; ret = btrfs_del_item(trans, quota_root, path); if (ret) - goto out; + return ret; btrfs_release_path(path); key.type = BTRFS_QGROUP_LIMIT_KEY; ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); if (ret < 0) - goto out; + return ret; - if (ret > 0) { - ret = -ENOENT; - goto out; - } + if (ret > 0) + return -ENOENT; ret = btrfs_del_item(trans, quota_root, path); -out: - btrfs_free_path(path); return ret; } static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_qgroup *qgroup) { - struct btrfs_path *path; + struct btrfs_root *quota_root = trans->fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *l; struct btrfs_qgroup_limit_item *qgroup_limit; @@ -654,12 +821,12 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); + ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); if (ret > 0) ret = -ENOENT; if (ret) - goto out; + return ret; l = path->nodes[0]; slot = path->slots[0]; @@ -670,25 +837,22 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); - btrfs_mark_buffer_dirty(l); - -out: - btrfs_free_path(path); return ret; } static int update_qgroup_info_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_qgroup *qgroup) { - struct btrfs_path *path; + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *quota_root = fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *l; struct btrfs_qgroup_info_item *qgroup_info; int ret; int slot; - if (btrfs_is_testing(root->fs_info)) + if (btrfs_is_testing(fs_info)) return 0; key.objectid = 0; @@ -699,12 +863,12 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); + ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); if (ret > 0) ret = -ENOENT; if (ret) - goto out; + return ret; l = path->nodes[0]; slot = path->slots[0]; @@ -715,18 +879,14 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans, btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); - btrfs_mark_buffer_dirty(l); - -out: - btrfs_free_path(path); return ret; } -static int update_qgroup_status_item(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_root *root) +static int update_qgroup_status_item(struct btrfs_trans_handle *trans) { - struct btrfs_path *path; + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *quota_root = fs_info->quota_root; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *l; struct btrfs_qgroup_status_item *ptr; @@ -741,25 +901,22 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); + ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); if (ret > 0) ret = -ENOENT; if (ret) - goto out; + return ret; l = path->nodes[0]; slot = path->slots[0]; ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); - btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); + btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAGS_MASK); btrfs_set_qgroup_status_generation(l, ptr, trans->transid); btrfs_set_qgroup_status_rescan(l, ptr, fs_info->qgroup_rescan_progress.objectid); - btrfs_mark_buffer_dirty(l); - -out: - btrfs_free_path(path); return ret; } @@ -769,7 +926,7 @@ out: static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *leaf = NULL; int ret; @@ -779,16 +936,14 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - path->leave_spinning = 1; - key.objectid = 0; - key.offset = 0; key.type = 0; + key.offset = 0; while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) - goto out; + return ret; leaf = path->nodes[0]; nr = btrfs_header_nritems(leaf); if (!nr) @@ -801,19 +956,16 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, path->slots[0] = 0; ret = btrfs_del_items(trans, root, path, 0, nr); if (ret) - goto out; + return ret; btrfs_release_path(path); } - ret = 0; -out: - set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags); - btrfs_free_path(path); - return ret; + + return 0; } -int btrfs_quota_enable(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +int btrfs_quota_enable(struct btrfs_fs_info *fs_info, + struct btrfs_ioctl_quota_ctl_args *quota_ctl_args) { struct btrfs_root *quota_root; struct btrfs_root *tree_root = fs_info->tree_root; @@ -823,34 +975,83 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, struct btrfs_key key; struct btrfs_key found_key; struct btrfs_qgroup *qgroup = NULL; + struct btrfs_qgroup *prealloc = NULL; + struct btrfs_trans_handle *trans = NULL; + const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); int ret = 0; int slot; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to enable quotas, because we will unlock + * and relock qgroup_ioctl_lock before setting fs_info->quota_root + * and before setting BTRFS_FS_QUOTA_ENABLED. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_err(fs_info, + "qgroups are currently unsupported in extent tree v2"); + return -EINVAL; + } + mutex_lock(&fs_info->qgroup_ioctl_lock); - if (fs_info->quota_root) { - set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags); + if (fs_info->quota_root) goto out; - } - fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); - if (!fs_info->qgroup_ulist) { - ret = -ENOMEM; + ret = btrfs_sysfs_add_qgroups(fs_info); + if (ret < 0) + goto out; + + /* + * Unlock qgroup_ioctl_lock before starting the transaction. This is to + * avoid lock acquisition inversion problems (reported by lockdep) between + * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we + * start a transaction. + * After we started the transaction lock qgroup_ioctl_lock again and + * check if someone else created the quota root in the meanwhile. If so, + * just return success and release the transaction handle. + * + * Also we don't need to worry about someone else calling + * btrfs_sysfs_add_qgroups() after we unlock and getting an error because + * that function returns 0 (success) when the sysfs entries already exist. + */ + mutex_unlock(&fs_info->qgroup_ioctl_lock); + + /* + * 1 for quota root item + * 1 for BTRFS_QGROUP_STATUS item + * + * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items + * per subvolume. However those are not currently reserved since it + * would be a lot of overkill. + */ + trans = btrfs_start_transaction(tree_root, 2); + + mutex_lock(&fs_info->qgroup_ioctl_lock); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; goto out; } + if (fs_info->quota_root) + goto out; + /* * initially create the quota tree */ - quota_root = btrfs_create_tree(trans, fs_info, - BTRFS_QUOTA_TREE_OBJECTID); + quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); if (IS_ERR(quota_root)) { ret = PTR_ERR(quota_root); + btrfs_abort_transaction(trans, ret); goto out; } path = btrfs_alloc_path(); - if (!path) { + if (unlikely(!path)) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out_free_root; } @@ -860,21 +1061,28 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, quota_root, path, &key, sizeof(*ptr)); - if (ret) + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; + } leaf = path->nodes[0]; ptr = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_qgroup_status_item); btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); - fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | - BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); + fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; + if (simple) { + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; + btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); + btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); + } else { + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + } + btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAGS_MASK); btrfs_set_qgroup_status_rescan(leaf, ptr, 0); - btrfs_mark_buffer_dirty(leaf); - key.objectid = 0; key.type = BTRFS_ROOT_REF_KEY; key.offset = 0; @@ -883,9 +1091,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); if (ret > 0) goto out_add_root; - if (ret < 0) + if (unlikely(ret < 0)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; - + } while (1) { slot = path->slots[0]; @@ -893,20 +1102,53 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.type == BTRFS_ROOT_REF_KEY) { + + /* Release locks on tree_root before we access quota_root */ + btrfs_release_path(path); + + /* We should not have a stray @prealloc pointer. */ + ASSERT(prealloc == NULL); + prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); + if (unlikely(!prealloc)) { + ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); + goto out_free_path; + } + ret = add_qgroup_item(trans, quota_root, found_key.offset); - if (ret) + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; + } - qgroup = add_qgroup_rb(fs_info, found_key.offset); - if (IS_ERR(qgroup)) { - ret = PTR_ERR(qgroup); + qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); + prealloc = NULL; + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (unlikely(ret < 0)) { + btrfs_abort_transaction(trans, ret); + goto out_free_path; + } + ret = btrfs_search_slot_for_read(tree_root, &found_key, + path, 1, 0); + if (unlikely(ret < 0)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; } + if (ret > 0) { + /* + * Shouldn't happen, but in case it does we + * don't need to do the btrfs_next_item, just + * continue. + */ + continue; + } } ret = btrfs_next_item(tree_root, path); - if (ret < 0) + if (unlikely(ret < 0)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; + } if (ret) break; } @@ -914,76 +1156,244 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, out_add_root: btrfs_release_path(path); ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); - if (ret) + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; + } - qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); - if (IS_ERR(qgroup)) { - ret = PTR_ERR(qgroup); + ASSERT(prealloc == NULL); + prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); + if (!prealloc) { + ret = -ENOMEM; + goto out_free_path; + } + qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID); + prealloc = NULL; + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); + if (unlikely(ret < 0)) { + btrfs_abort_transaction(trans, ret); goto out_free_path; } + + fs_info->qgroup_enable_gen = trans->transid; + + mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* + * Commit the transaction while not holding qgroup_ioctl_lock, to avoid + * a deadlock with tasks concurrently doing other qgroup operations, such + * adding/removing qgroups or adding/deleting qgroup relations for example, + * because all qgroup operations first start or join a transaction and then + * lock the qgroup_ioctl_lock mutex. + * We are safe from a concurrent task trying to enable quotas, by calling + * this function, since we are serialized by fs_info->subvol_sem. + */ + ret = btrfs_commit_transaction(trans); + trans = NULL; + mutex_lock(&fs_info->qgroup_ioctl_lock); + if (ret) + goto out_free_path; + + /* + * Set quota enabled flag after committing the transaction, to avoid + * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot + * creation. + */ spin_lock(&fs_info->qgroup_lock); fs_info->quota_root = quota_root; - set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags); + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); spin_unlock(&fs_info->qgroup_lock); + + /* Skip rescan for simple qgroups. */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) + goto out_free_path; + + ret = qgroup_rescan_init(fs_info, 0, 1); + if (!ret) { + qgroup_rescan_zero_tracking(fs_info); + fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } else { + /* + * We have set both BTRFS_FS_QUOTA_ENABLED and + * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with + * -EINPROGRESS. That can happen because someone started the + * rescan worker by calling quota rescan ioctl before we + * attempted to initialize the rescan worker. Failure due to + * quotas disabled in the meanwhile is not possible, because + * we are holding a write lock on fs_info->subvol_sem, which + * is also acquired when disabling quotas. + * Ignore such error, and any other error would need to undo + * everything we did in the transaction we just committed. + */ + ASSERT(ret == -EINPROGRESS); + ret = 0; + } + out_free_path: btrfs_free_path(path); out_free_root: - if (ret) { - free_extent_buffer(quota_root->node); - free_extent_buffer(quota_root->commit_root); - kfree(quota_root); - } + if (ret) + btrfs_put_root(quota_root); out: - if (ret) { - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; - } + if (ret) + btrfs_sysfs_del_qgroups(fs_info); mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (ret && trans) + btrfs_end_transaction(trans); + else if (trans) + ret = btrfs_end_transaction(trans); + + /* + * At this point we either failed at allocating prealloc, or we + * succeeded and passed the ownership to it to add_qgroup_rb(). In any + * case, this needs to be NULL or there is something wrong. + */ + ASSERT(prealloc == NULL); + return ret; } -int btrfs_quota_disable(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +/* + * It is possible to have outstanding ordered extents which reserved bytes + * before we disabled. We need to fully flush delalloc, ordered extents, and a + * commit to ensure that we don't leak such reservations, only to have them + * come back if we re-enable. + * + * - enable simple quotas + * - reserve space + * - release it, store rsv_bytes in OE + * - disable quotas + * - enable simple quotas (qgroup rsv are all 0) + * - OE finishes + * - run delayed refs + * - free rsv_bytes, resulting in miscounting or even underflow + */ +static int flush_reservations(struct btrfs_fs_info *fs_info) +{ + int ret; + + ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); + if (ret) + return ret; + btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); + + return btrfs_commit_current_transaction(fs_info->tree_root); +} + +int btrfs_quota_disable(struct btrfs_fs_info *fs_info) { - struct btrfs_root *tree_root = fs_info->tree_root; - struct btrfs_root *quota_root; + struct btrfs_root *quota_root = NULL; + struct btrfs_trans_handle *trans = NULL; int ret = 0; + /* + * We need to have subvol_sem write locked to prevent races with + * snapshot creation. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + + /* + * Relocation will mess with backrefs, so make sure we have the + * cleaner_mutex held to protect us from relocate. + */ + lockdep_assert_held(&fs_info->cleaner_mutex); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; + + /* + * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to + * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs + * to lock that mutex while holding a transaction handle and the rescan + * worker needs to commit a transaction. + */ + mutex_unlock(&fs_info->qgroup_ioctl_lock); + + /* + * Request qgroup rescan worker to complete and wait for it. This wait + * must be done before transaction start for quota disable since it may + * deadlock with transaction by the qgroup rescan worker. + */ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags); btrfs_qgroup_wait_for_completion(fs_info, false); + + /* + * We have nothing held here and no trans handle, just return the error + * if there is one and set back the quota enabled bit since we didn't + * actually disable quotas. + */ + ret = flush_reservations(fs_info); + if (ret) { + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + return ret; + } + + /* + * 1 For the root item + * + * We should also reserve enough items for the quota tree deletion in + * btrfs_clean_quota_tree but this is not done. + * + * Also, we must always start a transaction without holding the mutex + * qgroup_ioctl_lock, see btrfs_quota_enable(). + */ + trans = btrfs_start_transaction(fs_info->tree_root, 1); + + mutex_lock(&fs_info->qgroup_ioctl_lock); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + goto out; + } + + if (!fs_info->quota_root) + goto out; + spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; + fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT; spin_unlock(&fs_info->qgroup_lock); btrfs_free_qgroup_config(fs_info); ret = btrfs_clean_quota_tree(trans, quota_root); - if (ret) + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); goto out; + } - ret = btrfs_del_root(trans, tree_root, "a_root->root_key); - if (ret) + ret = btrfs_del_root(trans, "a_root->root_key); + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); goto out; + } + spin_lock(&fs_info->trans_lock); list_del("a_root->dirty_list); + spin_unlock(&fs_info->trans_lock); btrfs_tree_lock(quota_root->node); - clean_tree_block(fs_info, quota_root->node); + btrfs_clear_buffer_dirty(trans, quota_root->node); btrfs_tree_unlock(quota_root->node); - btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); + ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root), + quota_root->node, 0, 1); + + if (ret < 0) + btrfs_abort_transaction(trans, ret); - free_extent_buffer(quota_root->node); - free_extent_buffer(quota_root->commit_root); - kfree(quota_root); out: + btrfs_put_root(quota_root); mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (ret && trans) + btrfs_end_transaction(trans); + else if (trans) + ret = btrfs_commit_transaction(trans); return ret; } @@ -994,93 +1404,76 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, list_add(&qgroup->dirty, &fs_info->dirty_qgroups); } -static void report_reserved_underflow(struct btrfs_fs_info *fs_info, - struct btrfs_qgroup *qgroup, - u64 num_bytes) +static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) { -#ifdef CONFIG_BTRFS_DEBUG - WARN_ON(qgroup->reserved < num_bytes); - btrfs_debug(fs_info, - "qgroup %llu reserved space underflow, have: %llu, to free: %llu", - qgroup->qgroupid, qgroup->reserved, num_bytes); -#endif - qgroup->reserved = 0; + if (!list_empty(&qgroup->iterator)) + return; + + list_add_tail(&qgroup->iterator, head); +} + +static void qgroup_iterator_clean(struct list_head *head) +{ + while (!list_empty(head)) { + struct btrfs_qgroup *qgroup; + + qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); + list_del_init(&qgroup->iterator); + } } + /* - * The easy accounting, if we are adding/removing the only ref for an extent - * then this qgroup and all of the parent qgroups get their reference and - * exclusive counts adjusted. + * The easy accounting, we're updating qgroup relationship whose child qgroup + * only has exclusive extents. + * + * In this case, all exclusive extents will also be exclusive for parent, so + * excl/rfer just get added/removed. + * + * So is qgroup reservation space, which should also be added/removed to + * parent. + * Or when child tries to release reservation space, parent will underflow its + * reservation (for relationship adding case). * * Caller should hold fs_info->qgroup_lock. */ -static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, - struct ulist *tmp, u64 ref_root, - u64 num_bytes, int sign) +static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, + struct btrfs_qgroup *src, int sign) { struct btrfs_qgroup *qgroup; - struct btrfs_qgroup_list *glist; - struct ulist_node *unode; - struct ulist_iterator uiter; + LIST_HEAD(qgroup_list); + u64 num_bytes = src->excl; + u64 num_bytes_cmpr = src->excl_cmpr; int ret = 0; qgroup = find_qgroup_rb(fs_info, ref_root); if (!qgroup) goto out; - qgroup->rfer += sign * num_bytes; - qgroup->rfer_cmpr += sign * num_bytes; - - WARN_ON(sign < 0 && qgroup->excl < num_bytes); - qgroup->excl += sign * num_bytes; - qgroup->excl_cmpr += sign * num_bytes; - if (sign > 0) { - trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes); - if (qgroup->reserved < num_bytes) - report_reserved_underflow(fs_info, qgroup, num_bytes); - else - qgroup->reserved -= num_bytes; - } - - qgroup_dirty(fs_info, qgroup); - - /* Get all of the parent groups that contain this qgroup */ - list_for_each_entry(glist, &qgroup->groups, next_group) { - ret = ulist_add(tmp, glist->group->qgroupid, - qgroup_to_aux(glist->group), GFP_ATOMIC); - if (ret < 0) - goto out; - } + qgroup_iterator_add(&qgroup_list, qgroup); + list_for_each_entry(qgroup, &qgroup_list, iterator) { + struct btrfs_qgroup_list *glist; - /* Iterate all of the parents and adjust their reference counts */ - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(tmp, &uiter))) { - qgroup = unode_aux_to_qgroup(unode); qgroup->rfer += sign * num_bytes; - qgroup->rfer_cmpr += sign * num_bytes; + qgroup->rfer_cmpr += sign * num_bytes_cmpr; + WARN_ON(sign < 0 && qgroup->excl < num_bytes); + WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr); qgroup->excl += sign * num_bytes; - if (sign > 0) { - trace_qgroup_update_reserve(fs_info, qgroup, - -(s64)num_bytes); - if (qgroup->reserved < num_bytes) - report_reserved_underflow(fs_info, qgroup, - num_bytes); - else - qgroup->reserved -= num_bytes; - } - qgroup->excl_cmpr += sign * num_bytes; + qgroup->excl_cmpr += sign * num_bytes_cmpr; + + if (sign > 0) + qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); + else + qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); qgroup_dirty(fs_info, qgroup); - /* Add any parents of the parents */ - list_for_each_entry(glist, &qgroup->groups, next_group) { - ret = ulist_add(tmp, glist->group->qgroupid, - qgroup_to_aux(glist->group), GFP_ATOMIC); - if (ret < 0) - goto out; - } + /* Append parent qgroups to @qgroup_list. */ + list_for_each_entry(glist, &qgroup->groups, next_group) + qgroup_iterator_add(&qgroup_list, glist->group); } ret = 0; out: + qgroup_iterator_clean(&qgroup_list); return ret; } @@ -1097,24 +1490,19 @@ out: * Return < 0 for other error. */ static int quick_update_accounting(struct btrfs_fs_info *fs_info, - struct ulist *tmp, u64 src, u64 dst, - int sign) + u64 src, u64 dst, int sign) { struct btrfs_qgroup *qgroup; int ret = 1; - int err = 0; qgroup = find_qgroup_rb(fs_info, src); if (!qgroup) goto out; if (qgroup->excl == qgroup->rfer) { - ret = 0; - err = __qgroup_excl_accounting(fs_info, tmp, dst, - qgroup->excl, sign); - if (err < 0) { - ret = err; + ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign); + if (ret < 0) goto out; - } + ret = 0; } out: if (ret) @@ -1122,28 +1510,30 @@ out: return ret; } -int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 src, u64 dst) +/* + * Add relation between @src and @dst qgroup. The @prealloc is allocated by the + * callers and transferred here (either used or freed on error). + */ +int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst, + struct btrfs_qgroup_list *prealloc) { - struct btrfs_root *quota_root; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup *parent; struct btrfs_qgroup *member; struct btrfs_qgroup_list *list; - struct ulist *tmp; int ret = 0; + ASSERT(prealloc); + /* Check the level of src and dst first */ - if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) + if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) { + kfree(prealloc); return -EINVAL; - - tmp = ulist_alloc(GFP_KERNEL); - if (!tmp) - return -ENOMEM; + } mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { - ret = -EINVAL; + if (!fs_info->quota_root) { + ret = -ENOTCONN; goto out; } member = find_qgroup_rb(fs_info, src); @@ -1161,138 +1551,204 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, } } - ret = add_qgroup_relation_item(trans, quota_root, src, dst); + ret = add_qgroup_relation_item(trans, src, dst); if (ret) goto out; - ret = add_qgroup_relation_item(trans, quota_root, dst, src); + ret = add_qgroup_relation_item(trans, dst, src); if (ret) { - del_qgroup_relation_item(trans, quota_root, src, dst); + del_qgroup_relation_item(trans, src, dst); goto out; } spin_lock(&fs_info->qgroup_lock); - ret = add_relation_rb(fs_info, src, dst); + ret = __add_relation_rb(prealloc, member, parent); + prealloc = NULL; if (ret < 0) { spin_unlock(&fs_info->qgroup_lock); goto out; } - ret = quick_update_accounting(fs_info, tmp, src, dst, 1); + ret = quick_update_accounting(fs_info, src, dst, 1); spin_unlock(&fs_info->qgroup_lock); out: + kfree(prealloc); mutex_unlock(&fs_info->qgroup_ioctl_lock); - ulist_free(tmp); return ret; } -static int __del_qgroup_relation(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 src, u64 dst) +static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, + u64 dst) { - struct btrfs_root *quota_root; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup *parent; struct btrfs_qgroup *member; struct btrfs_qgroup_list *list; - struct ulist *tmp; + bool found = false; int ret = 0; - int err; + int ret2; - tmp = ulist_alloc(GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - quota_root = fs_info->quota_root; - if (!quota_root) { - ret = -EINVAL; + if (!fs_info->quota_root) { + ret = -ENOTCONN; goto out; } member = find_qgroup_rb(fs_info, src); parent = find_qgroup_rb(fs_info, dst); - if (!member || !parent) { - ret = -EINVAL; - goto out; - } + /* + * The parent/member pair doesn't exist, then try to delete the dead + * relation items only. + */ + if (!member || !parent) + goto delete_item; /* check if such qgroup relation exist firstly */ list_for_each_entry(list, &member->groups, next_group) { - if (list->group == parent) - goto exist; + if (list->group == parent) { + found = true; + break; + } } - ret = -ENOENT; - goto out; -exist: - ret = del_qgroup_relation_item(trans, quota_root, src, dst); - err = del_qgroup_relation_item(trans, quota_root, dst, src); - if (err && !ret) - ret = err; - spin_lock(&fs_info->qgroup_lock); - del_relation_rb(fs_info, src, dst); - ret = quick_update_accounting(fs_info, tmp, src, dst, -1); - spin_unlock(&fs_info->qgroup_lock); +delete_item: + ret = del_qgroup_relation_item(trans, src, dst); + if (ret < 0 && ret != -ENOENT) + goto out; + ret2 = del_qgroup_relation_item(trans, dst, src); + if (ret2 < 0 && ret2 != -ENOENT) + goto out; + + /* At least one deletion succeeded, return 0 */ + if (!ret || !ret2) + ret = 0; + + if (found) { + spin_lock(&fs_info->qgroup_lock); + del_relation_rb(fs_info, src, dst); + ret = quick_update_accounting(fs_info, src, dst, -1); + spin_unlock(&fs_info->qgroup_lock); + } out: - ulist_free(tmp); return ret; } -int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 src, u64 dst) +int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, + u64 dst) { + struct btrfs_fs_info *fs_info = trans->fs_info; int ret = 0; mutex_lock(&fs_info->qgroup_ioctl_lock); - ret = __del_qgroup_relation(trans, fs_info, src, dst); + ret = __del_qgroup_relation(trans, src, dst); mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; } -int btrfs_create_qgroup(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 qgroupid) +int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) { + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; + struct btrfs_qgroup *prealloc = NULL; int ret = 0; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { - ret = -EINVAL; + if (!fs_info->quota_root) { + ret = -ENOTCONN; goto out; } + quota_root = fs_info->quota_root; qgroup = find_qgroup_rb(fs_info, qgroupid); if (qgroup) { ret = -EEXIST; goto out; } + prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); + if (!prealloc) { + ret = -ENOMEM; + goto out; + } + ret = add_qgroup_item(trans, quota_root, qgroupid); if (ret) goto out; spin_lock(&fs_info->qgroup_lock); - qgroup = add_qgroup_rb(fs_info, qgroupid); + qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid); spin_unlock(&fs_info->qgroup_lock); + prealloc = NULL; - if (IS_ERR(qgroup)) - ret = PTR_ERR(qgroup); + ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* + * At this point we either failed at allocating prealloc, or we + * succeeded and passed the ownership to it to add_qgroup_rb(). In any + * case, this needs to be NULL or there is something wrong. + */ + ASSERT(prealloc == NULL); return ret; } -int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 qgroupid) +/* + * Return 0 if we can not delete the qgroup (not empty or has children etc). + * Return >0 if we can delete the qgroup. + * Return <0 for other errors during tree search. + */ +static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) { - struct btrfs_root *quota_root; + struct btrfs_key key; + BTRFS_PATH_AUTO_FREE(path); + + /* + * Squota would never be inconsistent, but there can still be case + * where a dropped subvolume still has qgroup numbers, and squota + * relies on such qgroup for future accounting. + * + * So for squota, do not allow dropping any non-zero qgroup. + */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && + (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr)) + return 0; + + /* For higher level qgroup, we can only delete it if it has no child. */ + if (btrfs_qgroup_level(qgroup->qgroupid)) { + if (!list_empty(&qgroup->members)) + return 0; + return 1; + } + + /* + * For level-0 qgroups, we can only delete it if it has no subvolume + * for it. + * This means even a subvolume is unlinked but not yet fully dropped, + * we can not delete the qgroup. + */ + key.objectid = qgroup->qgroupid; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = -1ULL; + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* + * The @ret from btrfs_find_root() exactly matches our definition for + * the return value, thus can be returned directly. + */ + return btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL); +} + +int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup *qgroup; struct btrfs_qgroup_list *list; int ret = 0; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { - ret = -EINVAL; + if (!fs_info->quota_root) { + ret = -ENOTCONN; goto out; } @@ -1300,38 +1756,128 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, if (!qgroup) { ret = -ENOENT; goto out; - } else { - /* check if there are no children of this qgroup */ - if (!list_empty(&qgroup->members)) { - ret = -EBUSY; - goto out; - } } - ret = del_qgroup_item(trans, quota_root, qgroupid); + + ret = can_delete_qgroup(fs_info, qgroup); + if (ret < 0) + goto out; + if (ret == 0) { + ret = -EBUSY; + goto out; + } + + /* Check if there are no children of this qgroup */ + if (!list_empty(&qgroup->members)) { + ret = -EBUSY; + goto out; + } + + ret = del_qgroup_item(trans, qgroupid); + if (ret && ret != -ENOENT) + goto out; while (!list_empty(&qgroup->groups)) { list = list_first_entry(&qgroup->groups, struct btrfs_qgroup_list, next_group); - ret = __del_qgroup_relation(trans, fs_info, - qgroupid, - list->group->qgroupid); + ret = __del_qgroup_relation(trans, qgroupid, + list->group->qgroupid); if (ret) goto out; } spin_lock(&fs_info->qgroup_lock); + /* + * Warn on reserved space. The subvolume should has no child nor + * corresponding subvolume. + * Thus its reserved space should all be zero, no matter if qgroup + * is consistent or the mode. + */ + if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] || + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] || + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) { + DEBUG_WARN(); + btrfs_warn_rl(fs_info, +"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA], + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC], + qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]); + + } + /* + * The same for rfer/excl numbers, but that's only if our qgroup is + * consistent and if it's in regular qgroup mode. + * For simple mode it's not as accurate thus we can hit non-zero values + * very frequently. + */ + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL && + !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { + if (qgroup->rfer || qgroup->excl || + qgroup->rfer_cmpr || qgroup->excl_cmpr) { + DEBUG_WARN(); + qgroup_mark_inconsistent(fs_info, + "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + qgroup->rfer, qgroup->rfer_cmpr, + qgroup->excl, qgroup->excl_cmpr); + } + } del_qgroup_rb(fs_info, qgroupid); spin_unlock(&fs_info->qgroup_lock); + + /* + * Remove the qgroup from sysfs now without holding the qgroup_lock + * spinlock, since the sysfs_remove_group() function needs to take + * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). + */ + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); + kfree(qgroup); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; } -int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 qgroupid, +int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid) +{ + struct btrfs_trans_handle *trans; + int ret; + + if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || + !fs_info->quota_root) + return 0; + + /* + * Commit current transaction to make sure all the rfer/excl numbers + * get updated. + */ + ret = btrfs_commit_current_transaction(fs_info->quota_root); + if (ret < 0) + return ret; + + /* Start new trans to delete the qgroup info and limit items. */ + trans = btrfs_start_transaction(fs_info->quota_root, 2); + if (IS_ERR(trans)) + return PTR_ERR(trans); + ret = btrfs_remove_qgroup(trans, subvolid); + btrfs_end_transaction(trans); + /* + * It's squota and the subvolume still has numbers needed for future + * accounting, in this case we can not delete it. Just skip it. + * + * Or the qgroup is already removed by a qgroup rescan. For both cases we're + * safe to ignore them. + */ + if (ret == -EBUSY || ret == -ENOENT) + ret = 0; + return ret; +} + +int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, struct btrfs_qgroup_limit *limit) { - struct btrfs_root *quota_root; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup *qgroup; int ret = 0; /* Sometimes we would want to clear the limit on this qgroup. @@ -1341,9 +1887,8 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, const u64 CLEAR_VALUE = -1; mutex_lock(&fs_info->qgroup_ioctl_lock); - quota_root = fs_info->quota_root; - if (!quota_root) { - ret = -EINVAL; + if (!fs_info->quota_root) { + ret = -ENOTCONN; goto out; } @@ -1394,57 +1939,134 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, spin_unlock(&fs_info->qgroup_lock); - ret = update_qgroup_limit_item(trans, quota_root, qgroup); - if (ret) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_info(fs_info, "unable to update quota limit for %llu", - qgroupid); - } + ret = update_qgroup_limit_item(trans, qgroup); + if (ret) + qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; } +/* + * Inform qgroup to trace one dirty extent, its info is recorded in @record. + * So qgroup can account it at transaction committing time. + * + * No lock version, caller must acquire delayed ref lock and allocated memory, + * then call btrfs_qgroup_trace_extent_post() after exiting lock context. + * + * Return 0 for success insert + * Return >0 for existing record, caller can free @record safely. + * Return <0 for insertion failure, caller can free @record safely. + */ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_root *delayed_refs, - struct btrfs_qgroup_extent_record *record) + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_qgroup_extent_record *record, + u64 bytenr) { - struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; - struct rb_node *parent_node = NULL; - struct btrfs_qgroup_extent_record *entry; - u64 bytenr = record->bytenr; - - assert_spin_locked(&delayed_refs->lock); - trace_btrfs_qgroup_trace_extent(fs_info, record); - - while (*p) { - parent_node = *p; - entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, - node); - if (bytenr < entry->bytenr) - p = &(*p)->rb_left; - else if (bytenr > entry->bytenr) - p = &(*p)->rb_right; - else - return 1; + struct btrfs_qgroup_extent_record *existing, *ret; + const unsigned long index = (bytenr >> fs_info->sectorsize_bits); + + if (!btrfs_qgroup_full_accounting(fs_info)) + return 1; + +#if BITS_PER_LONG == 32 + if (bytenr >= MAX_LFS_FILESIZE) { + btrfs_err_rl(fs_info, +"qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit", + bytenr); + btrfs_err_32bit_limit(fs_info); + return -EOVERFLOW; + } +#endif + + trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr); + + xa_lock(&delayed_refs->dirty_extents); + existing = xa_load(&delayed_refs->dirty_extents, index); + if (existing) { + if (record->data_rsv && !existing->data_rsv) { + existing->data_rsv = record->data_rsv; + existing->data_rsv_refroot = record->data_rsv_refroot; + } + xa_unlock(&delayed_refs->dirty_extents); + return 1; + } + + ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC); + xa_unlock(&delayed_refs->dirty_extents); + if (xa_is_err(ret)) { + qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret)); + return xa_err(ret); } - rb_link_node(&record->node, parent_node, p); - rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); return 0; } -int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, - struct btrfs_qgroup_extent_record *qrecord) +/* + * Post handler after qgroup_trace_extent_nolock(). + * + * NOTE: Current qgroup does the expensive backref walk at transaction + * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming + * new transaction. + * This is designed to allow btrfs_find_all_roots() to get correct new_roots + * result. + * + * However for old_roots there is no need to do backref walk at that time, + * since we search commit roots to walk backref and result will always be + * correct. + * + * Due to the nature of no lock version, we can't do backref there. + * So we must call btrfs_qgroup_trace_extent_post() after exiting + * spinlock context. + * + * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result + * using current root, then we can move all expensive backref walk out of + * transaction committing, but not now as qgroup accounting will be wrong again. + */ +int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, + struct btrfs_qgroup_extent_record *qrecord, + u64 bytenr) { - struct ulist *old_root; - u64 bytenr = qrecord->bytenr; + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_backref_walk_ctx ctx = { + .bytenr = bytenr, + .fs_info = fs_info, + }; int ret; - ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root); - if (ret < 0) - return ret; + if (!btrfs_qgroup_full_accounting(fs_info)) + return 0; + /* + * We are always called in a context where we are already holding a + * transaction handle. Often we are called when adding a data delayed + * reference from btrfs_truncate_inode_items() (truncating or unlinking), + * in which case we will be holding a write lock on extent buffer from a + * subvolume tree. In this case we can't allow btrfs_find_all_roots() to + * acquire fs_info->commit_root_sem, because that is a higher level lock + * that must be acquired before locking any extent buffers. + * + * So we want btrfs_find_all_roots() to not acquire the commit_root_sem + * but we can't pass it a non-NULL transaction handle, because otherwise + * it would not use commit roots and would lock extent buffers, causing + * a deadlock if it ends up trying to read lock the same extent buffer + * that was previously write locked at btrfs_truncate_inode_items(). + * + * So pass a NULL transaction handle to btrfs_find_all_roots() and + * explicitly tell it to not acquire the commit_root_sem - if we are + * holding a transaction handle we don't need its protection. + */ + ASSERT(trans != NULL); + + if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) + return 0; + + ret = btrfs_find_all_roots(&ctx, true); + if (ret < 0) { + qgroup_mark_inconsistent(fs_info, + "error accounting new delayed refs extent: %d", ret); + return 0; + } /* * Here we don't need to get the lock of @@ -1453,46 +2075,65 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, * * So modifying qrecord->old_roots is safe here */ - qrecord->old_roots = old_root; + qrecord->old_roots = ctx.roots; return 0; } -int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, - gfp_t gfp_flag) +/* + * Inform qgroup to trace one dirty extent, specified by @bytenr and + * @num_bytes. + * So qgroup can account it at commit trans time. + * + * Better encapsulated version, with memory allocation and backref walk for + * commit roots. + * So this can sleep. + * + * Return 0 if the operation is done. + * Return <0 for error, like memory allocation failure or invalid parameter + * (NULL trans) + */ +int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, + u64 num_bytes) { + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup_extent_record *record; - struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs; + const unsigned long index = (bytenr >> fs_info->sectorsize_bits); int ret; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) - || bytenr == 0 || num_bytes == 0) + if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0) return 0; - if (WARN_ON(trans == NULL)) - return -EINVAL; - record = kmalloc(sizeof(*record), gfp_flag); + record = kzalloc(sizeof(*record), GFP_NOFS); if (!record) return -ENOMEM; - delayed_refs = &trans->transaction->delayed_refs; - record->bytenr = bytenr; + if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) { + kfree(record); + return -ENOMEM; + } + record->num_bytes = num_bytes; - record->old_roots = NULL; - spin_lock(&delayed_refs->lock); - ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); - spin_unlock(&delayed_refs->lock); - if (ret > 0) { + ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr); + if (ret) { + /* Clean up if insertion fails or item exists. */ + xa_release(&delayed_refs->dirty_extents, index); kfree(record); return 0; } - return btrfs_qgroup_trace_extent_post(fs_info, record); + return btrfs_qgroup_trace_extent_post(trans, record, bytenr); } +/* + * Inform qgroup to trace all leaf items of data + * + * Return 0 for success + * Return <0 for error(ENOMEM) + */ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, struct extent_buffer *eb) { + struct btrfs_fs_info *fs_info = trans->fs_info; int nr = btrfs_header_nritems(eb); int i, extent_type, ret; struct btrfs_key key; @@ -1500,7 +2141,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, u64 bytenr, num_bytes; /* We can be called directly from walk_up_proc() */ - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + if (!btrfs_qgroup_full_accounting(fs_info)) return 0; for (i = 0; i < nr; i++) { @@ -1522,8 +2163,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); - ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr, - num_bytes, GFP_NOFS); + ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes); if (ret) return ret; } @@ -1592,32 +2232,385 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level) return 0; } +/* + * Helper function to trace a subtree tree block swap. + * + * The swap will happen in highest tree block, but there may be a lot of + * tree blocks involved. + * + * For example: + * OO = Old tree blocks + * NN = New tree blocks allocated during balance + * + * File tree (257) Reloc tree for 257 + * L2 OO NN + * / \ / \ + * L1 OO OO (a) OO NN (a) + * / \ / \ / \ / \ + * L0 OO OO OO OO OO OO NN NN + * (b) (c) (b) (c) + * + * When calling qgroup_trace_extent_swap(), we will pass: + * @src_eb = OO(a) + * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] + * @dst_level = 0 + * @root_level = 1 + * + * In that case, qgroup_trace_extent_swap() will search from OO(a) to + * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. + * + * The main work of qgroup_trace_extent_swap() can be split into 3 parts: + * + * 1) Tree search from @src_eb + * It should acts as a simplified btrfs_search_slot(). + * The key for search can be extracted from @dst_path->nodes[dst_level] + * (first key). + * + * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty + * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. + * They should be marked during previous (@dst_level = 1) iteration. + * + * 3) Mark file extents in leaves dirty + * We don't have good way to pick out new file extents only. + * So we still follow the old method by scanning all file extents in + * the leave. + * + * This function can free us from keeping two paths, thus later we only need + * to care about how to iterate all new tree blocks in reloc tree. + */ +static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, + struct extent_buffer *src_eb, + struct btrfs_path *dst_path, + int dst_level, int root_level, + bool trace_leaf) +{ + struct btrfs_key key; + BTRFS_PATH_AUTO_FREE(src_path); + struct btrfs_fs_info *fs_info = trans->fs_info; + u32 nodesize = fs_info->nodesize; + int cur_level = root_level; + int ret; + + BUG_ON(dst_level > root_level); + /* Level mismatch */ + if (btrfs_header_level(src_eb) != root_level) + return -EINVAL; + + src_path = btrfs_alloc_path(); + if (!src_path) + return -ENOMEM; + + if (dst_level) + btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); + else + btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); + + /* For src_path */ + refcount_inc(&src_eb->refs); + src_path->nodes[root_level] = src_eb; + src_path->slots[root_level] = dst_path->slots[root_level]; + src_path->locks[root_level] = 0; + + /* A simplified version of btrfs_search_slot() */ + while (cur_level >= dst_level) { + struct btrfs_key src_key; + struct btrfs_key dst_key; + + if (src_path->nodes[cur_level] == NULL) { + struct extent_buffer *eb; + int parent_slot; + + eb = src_path->nodes[cur_level + 1]; + parent_slot = src_path->slots[cur_level + 1]; + + eb = btrfs_read_node_slot(eb, parent_slot); + if (IS_ERR(eb)) + return PTR_ERR(eb); + + src_path->nodes[cur_level] = eb; + + btrfs_tree_read_lock(eb); + src_path->locks[cur_level] = BTRFS_READ_LOCK; + } + + src_path->slots[cur_level] = dst_path->slots[cur_level]; + if (cur_level) { + btrfs_node_key_to_cpu(dst_path->nodes[cur_level], + &dst_key, dst_path->slots[cur_level]); + btrfs_node_key_to_cpu(src_path->nodes[cur_level], + &src_key, src_path->slots[cur_level]); + } else { + btrfs_item_key_to_cpu(dst_path->nodes[cur_level], + &dst_key, dst_path->slots[cur_level]); + btrfs_item_key_to_cpu(src_path->nodes[cur_level], + &src_key, src_path->slots[cur_level]); + } + /* Content mismatch, something went wrong */ + if (btrfs_comp_cpu_keys(&dst_key, &src_key)) + return -ENOENT; + cur_level--; + } + + /* + * Now both @dst_path and @src_path have been populated, record the tree + * blocks for qgroup accounting. + */ + ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, + nodesize); + if (ret < 0) + return ret; + ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start, + nodesize); + if (ret < 0) + return ret; + + /* Record leaf file extents */ + if (dst_level == 0 && trace_leaf) { + ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); + if (ret < 0) + return ret; + ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); + } + + return ret; +} + +/* + * Helper function to do recursive generation-aware depth-first search, to + * locate all new tree blocks in a subtree of reloc tree. + * + * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) + * reloc tree + * L2 NN (a) + * / \ + * L1 OO NN (b) + * / \ / \ + * L0 OO OO OO NN + * (c) (d) + * If we pass: + * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], + * @cur_level = 1 + * @root_level = 1 + * + * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace + * above tree blocks along with their counter parts in file tree. + * While during search, old tree blocks OO(c) will be skipped as tree block swap + * won't affect OO(c). + */ +static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, + struct extent_buffer *src_eb, + struct btrfs_path *dst_path, + int cur_level, int root_level, + u64 last_snapshot, bool trace_leaf) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct extent_buffer *eb; + bool need_cleanup = false; + int ret = 0; + int i; + + /* Level sanity check */ + if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || + root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || + root_level < cur_level)) { + btrfs_err_rl(fs_info, + "%s: bad levels, cur_level=%d root_level=%d", + __func__, cur_level, root_level); + return -EUCLEAN; + } + + /* Read the tree block if needed */ + if (dst_path->nodes[cur_level] == NULL) { + int parent_slot; + u64 child_gen; + + /* + * dst_path->nodes[root_level] must be initialized before + * calling this function. + */ + if (unlikely(cur_level == root_level)) { + btrfs_err_rl(fs_info, + "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", + __func__, root_level, root_level, cur_level); + return -EUCLEAN; + } + + /* + * We need to get child blockptr/gen from parent before we can + * read it. + */ + eb = dst_path->nodes[cur_level + 1]; + parent_slot = dst_path->slots[cur_level + 1]; + child_gen = btrfs_node_ptr_generation(eb, parent_slot); + + /* This node is old, no need to trace */ + if (child_gen < last_snapshot) + goto out; + + eb = btrfs_read_node_slot(eb, parent_slot); + if (IS_ERR(eb)) { + ret = PTR_ERR(eb); + goto out; + } + + dst_path->nodes[cur_level] = eb; + dst_path->slots[cur_level] = 0; + + btrfs_tree_read_lock(eb); + dst_path->locks[cur_level] = BTRFS_READ_LOCK; + need_cleanup = true; + } + + /* Now record this tree block and its counter part for qgroups */ + ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, + root_level, trace_leaf); + if (ret < 0) + goto cleanup; + + eb = dst_path->nodes[cur_level]; + + if (cur_level > 0) { + /* Iterate all child tree blocks */ + for (i = 0; i < btrfs_header_nritems(eb); i++) { + /* Skip old tree blocks as they won't be swapped */ + if (btrfs_node_ptr_generation(eb, i) < last_snapshot) + continue; + dst_path->slots[cur_level] = i; + + /* Recursive call (at most 7 times) */ + ret = qgroup_trace_new_subtree_blocks(trans, src_eb, + dst_path, cur_level - 1, root_level, + last_snapshot, trace_leaf); + if (ret < 0) + goto cleanup; + } + } + +cleanup: + if (need_cleanup) { + /* Clean up */ + btrfs_tree_unlock_rw(dst_path->nodes[cur_level], + dst_path->locks[cur_level]); + free_extent_buffer(dst_path->nodes[cur_level]); + dst_path->nodes[cur_level] = NULL; + dst_path->slots[cur_level] = 0; + dst_path->locks[cur_level] = 0; + } +out: + return ret; +} + +static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, + struct extent_buffer *src_eb, + struct extent_buffer *dst_eb, + u64 last_snapshot, bool trace_leaf) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_path *dst_path = NULL; + int level; + int ret; + + if (!btrfs_qgroup_full_accounting(fs_info)) + return 0; + + /* Wrong parameter order */ + if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) { + btrfs_err_rl(fs_info, + "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, + btrfs_header_generation(src_eb), + btrfs_header_generation(dst_eb)); + return -EUCLEAN; + } + + if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) { + ret = -EIO; + goto out; + } + + level = btrfs_header_level(dst_eb); + dst_path = btrfs_alloc_path(); + if (!dst_path) { + ret = -ENOMEM; + goto out; + } + /* For dst_path */ + refcount_inc(&dst_eb->refs); + dst_path->nodes[level] = dst_eb; + dst_path->slots[level] = 0; + dst_path->locks[level] = 0; + + /* Do the generation aware breadth-first search */ + ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, + level, last_snapshot, trace_leaf); + if (ret < 0) + goto out; + ret = 0; + +out: + btrfs_free_path(dst_path); + if (ret < 0) + qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); + return ret; +} + +/* + * Inform qgroup to trace a whole subtree, including all its child tree + * blocks and data. + * The root tree block is specified by @root_eb. + * + * Normally used by relocation(tree block swap) and subvolume deletion. + * + * Return 0 for success + * Return <0 for error(ENOMEM or tree search error) + */ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct extent_buffer *root_eb, u64 root_gen, int root_level) { - struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_fs_info *fs_info = trans->fs_info; int ret = 0; int level; + u8 drop_subptree_thres; struct extent_buffer *eb = root_eb; - struct btrfs_path *path = NULL; + BTRFS_PATH_AUTO_FREE(path); - BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL); - BUG_ON(root_eb == NULL); + ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL); + ASSERT(root_eb != NULL); - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + if (!btrfs_qgroup_full_accounting(fs_info)) + return 0; + + spin_lock(&fs_info->qgroup_lock); + drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; + spin_unlock(&fs_info->qgroup_lock); + + /* + * This function only gets called for snapshot drop, if we hit a high + * node here, it means we are going to change ownership for quite a lot + * of extents, which will greatly slow down btrfs_commit_transaction(). + * + * So here if we find a high tree here, we just skip the accounting and + * mark qgroup inconsistent. + */ + if (root_level >= drop_subptree_thres) { + qgroup_mark_inconsistent(fs_info, "subtree level reached threshold"); return 0; + } if (!extent_buffer_uptodate(root_eb)) { - ret = btrfs_read_buffer(root_eb, root_gen); + struct btrfs_tree_parent_check check = { + .transid = root_gen, + .level = root_level + }; + + ret = btrfs_read_extent_buffer(root_eb, &check); if (ret) - goto out; + return ret; } if (root_level == 0) { - ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb); - goto out; + ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); + return ret; } path = btrfs_alloc_path(); @@ -1633,7 +2626,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, * walk back up the tree (adjusting slot pointers as we go) * and restart the search process. */ - extent_buffer_get(root_eb); /* For path */ + refcount_inc(&root_eb->refs); /* For path */ path->nodes[root_level] = root_eb; path->slots[root_level] = 0; path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ @@ -1642,48 +2635,37 @@ walk_down: while (level >= 0) { if (path->nodes[level] == NULL) { int parent_slot; - u64 child_gen; u64 child_bytenr; /* - * We need to get child blockptr/gen from parent before - * we can read it. + * We need to get child blockptr from parent before we + * can read it. */ eb = path->nodes[level + 1]; parent_slot = path->slots[level + 1]; child_bytenr = btrfs_node_blockptr(eb, parent_slot); - child_gen = btrfs_node_ptr_generation(eb, parent_slot); - eb = read_tree_block(fs_info, child_bytenr, child_gen); - if (IS_ERR(eb)) { - ret = PTR_ERR(eb); - goto out; - } else if (!extent_buffer_uptodate(eb)) { - free_extent_buffer(eb); - ret = -EIO; - goto out; - } + eb = btrfs_read_node_slot(eb, parent_slot); + if (IS_ERR(eb)) + return PTR_ERR(eb); path->nodes[level] = eb; path->slots[level] = 0; btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); - path->locks[level] = BTRFS_READ_LOCK_BLOCKING; + path->locks[level] = BTRFS_READ_LOCK; - ret = btrfs_qgroup_trace_extent(trans, fs_info, - child_bytenr, - fs_info->nodesize, - GFP_NOFS); + ret = btrfs_qgroup_trace_extent(trans, child_bytenr, + fs_info->nodesize); if (ret) - goto out; + return ret; } if (level == 0) { - ret = btrfs_qgroup_trace_leaf_items(trans,fs_info, - path->nodes[level]); + ret = btrfs_qgroup_trace_leaf_items(trans, + path->nodes[level]); if (ret) - goto out; + return ret; /* Nonzero return here means we completed our search */ ret = adjust_slots_upwards(path, root_level); @@ -1697,11 +2679,25 @@ walk_down: level--; } - ret = 0; -out: - btrfs_free_path(path); + return 0; +} - return ret; +static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup) +{ + if (!list_empty(&qgroup->nested_iterator)) + return; + + list_add_tail(&qgroup->nested_iterator, head); +} + +static void qgroup_iterator_nested_clean(struct list_head *head) +{ + while (!list_empty(head)) { + struct btrfs_qgroup *qgroup; + + qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator); + list_del_init(&qgroup->nested_iterator); + } } #define UPDATE_NEW 0 @@ -1709,64 +2705,48 @@ out: /* * Walk all of the roots that points to the bytenr and adjust their refcnts. */ -static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, - struct ulist *roots, struct ulist *tmp, - struct ulist *qgroups, u64 seq, int update_old) +static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info, + struct ulist *roots, struct list_head *qgroups, + u64 seq, bool update_old) { struct ulist_node *unode; struct ulist_iterator uiter; - struct ulist_node *tmp_unode; - struct ulist_iterator tmp_uiter; struct btrfs_qgroup *qg; - int ret = 0; if (!roots) - return 0; + return; ULIST_ITER_INIT(&uiter); while ((unode = ulist_next(roots, &uiter))) { + LIST_HEAD(tmp); + qg = find_qgroup_rb(fs_info, unode->val); if (!qg) continue; - ulist_reinit(tmp); - ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), - GFP_ATOMIC); - if (ret < 0) - return ret; - ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); - if (ret < 0) - return ret; - ULIST_ITER_INIT(&tmp_uiter); - while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { + qgroup_iterator_nested_add(qgroups, qg); + qgroup_iterator_add(&tmp, qg); + list_for_each_entry(qg, &tmp, iterator) { struct btrfs_qgroup_list *glist; - qg = unode_aux_to_qgroup(tmp_unode); if (update_old) btrfs_qgroup_update_old_refcnt(qg, seq, 1); else btrfs_qgroup_update_new_refcnt(qg, seq, 1); + list_for_each_entry(glist, &qg->groups, next_group) { - ret = ulist_add(qgroups, glist->group->qgroupid, - qgroup_to_aux(glist->group), - GFP_ATOMIC); - if (ret < 0) - return ret; - ret = ulist_add(tmp, glist->group->qgroupid, - qgroup_to_aux(glist->group), - GFP_ATOMIC); - if (ret < 0) - return ret; + qgroup_iterator_nested_add(qgroups, glist->group); + qgroup_iterator_add(&tmp, glist->group); } } + qgroup_iterator_clean(&tmp); } - return 0; } /* * Update qgroup rfer/excl counters. * Rfer update is easy, codes can explain themselves. * - * Excl update is tricky, the update is split into 2 part. + * Excl update is tricky, the update is split into 2 parts. * Part 1: Possible exclusive <-> sharing detect: * | A | !A | * ------------------------------------- @@ -1798,27 +2778,21 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, * But this time we don't need to consider other things, the codes and logic * is easy to understand now. */ -static int qgroup_update_counters(struct btrfs_fs_info *fs_info, - struct ulist *qgroups, - u64 nr_old_roots, - u64 nr_new_roots, - u64 num_bytes, u64 seq) +static void qgroup_update_counters(struct btrfs_fs_info *fs_info, + struct list_head *qgroups, u64 nr_old_roots, + u64 nr_new_roots, u64 num_bytes, u64 seq) { - struct ulist_node *unode; - struct ulist_iterator uiter; struct btrfs_qgroup *qg; - u64 cur_new_count, cur_old_count; - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(qgroups, &uiter))) { + list_for_each_entry(qg, qgroups, nested_iterator) { + u64 cur_new_count, cur_old_count; bool dirty = false; - qg = unode_aux_to_qgroup(unode); cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); - trace_qgroup_update_counters(fs_info, qg->qgroupid, - cur_old_count, cur_new_count); + trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count, + cur_new_count); /* Rfer update part */ if (cur_old_count == 0 && cur_new_count > 0) { @@ -1884,7 +2858,6 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, if (dirty) qgroup_dirty(fs_info, qg); } - return 0; } /* @@ -1913,24 +2886,27 @@ static int maybe_fs_roots(struct ulist *roots) * trees. * If it contains a non-fs tree, it won't be shared with fs/subvol trees. */ - return is_fstree(unode->val); + return btrfs_is_fstree(unode->val); } -int -btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - u64 bytenr, u64 num_bytes, - struct ulist *old_roots, struct ulist *new_roots) +int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, + u64 num_bytes, struct ulist *old_roots, + struct ulist *new_roots) { - struct ulist *qgroups = NULL; - struct ulist *tmp = NULL; + struct btrfs_fs_info *fs_info = trans->fs_info; + LIST_HEAD(qgroups); u64 seq; u64 nr_new_roots = 0; u64 nr_old_roots = 0; int ret = 0; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - return 0; + /* + * If quotas get disabled meanwhile, the resources need to be freed and + * we can't just exit here. + */ + if (!btrfs_qgroup_full_accounting(fs_info) || + fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) + goto out_free; if (new_roots) { if (!maybe_fs_roots(new_roots)) @@ -1947,21 +2923,8 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, if (nr_old_roots == 0 && nr_new_roots == 0) goto out_free; - BUG_ON(!fs_info->quota_root); - - trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes, - nr_old_roots, nr_new_roots); - - qgroups = ulist_alloc(GFP_NOFS); - if (!qgroups) { - ret = -ENOMEM; - goto out_free; - } - tmp = ulist_alloc(GFP_NOFS); - if (!tmp) { - ret = -ENOMEM; - goto out_free; - } + trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, + num_bytes, nr_old_roots, nr_new_roots); mutex_lock(&fs_info->qgroup_rescan_lock); if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { @@ -1977,118 +2940,143 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, seq = fs_info->qgroup_seq; /* Update old refcnts using old_roots */ - ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, - UPDATE_OLD); - if (ret < 0) - goto out; + qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD); /* Update new refcnts using new_roots */ - ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, - UPDATE_NEW); - if (ret < 0) - goto out; + qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW); - qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, + qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots, num_bytes, seq); /* + * We're done using the iterator, release all its qgroups while holding + * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup() + * and trigger use-after-free accesses to qgroups. + */ + qgroup_iterator_nested_clean(&qgroups); + + /* * Bump qgroup_seq to avoid seq overlap */ fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; -out: spin_unlock(&fs_info->qgroup_lock); out_free: - ulist_free(tmp); - ulist_free(qgroups); ulist_free(old_roots); ulist_free(new_roots); return ret; } -int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) { + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup_extent_record *record; struct btrfs_delayed_ref_root *delayed_refs; struct ulist *new_roots = NULL; - struct rb_node *node; + unsigned long index; + u64 num_dirty_extents = 0; u64 qgroup_to_skip; int ret = 0; + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) + return 0; + delayed_refs = &trans->transaction->delayed_refs; qgroup_to_skip = delayed_refs->qgroup_to_skip; - while ((node = rb_first(&delayed_refs->dirty_extent_root))) { - record = rb_entry(node, struct btrfs_qgroup_extent_record, - node); + xa_for_each(&delayed_refs->dirty_extents, index, record) { + const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits); + + num_dirty_extents++; + trace_btrfs_qgroup_account_extents(fs_info, record, bytenr); + + if (!ret && !(fs_info->qgroup_flags & + BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { + struct btrfs_backref_walk_ctx ctx = { 0 }; - trace_btrfs_qgroup_account_extents(fs_info, record); + ctx.bytenr = bytenr; + ctx.fs_info = fs_info; - if (!ret) { /* * Old roots should be searched when inserting qgroup - * extent record + * extent record. + * + * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, + * we may have some record inserted during + * NO_ACCOUNTING (thus no old_roots populated), but + * later we start rescan, which clears NO_ACCOUNTING, + * leaving some inserted records without old_roots + * populated. + * + * Those cases are rare and should not cause too much + * time spent during commit_transaction(). */ - if (WARN_ON(!record->old_roots)) { + if (!record->old_roots) { /* Search commit root to find old_roots */ - ret = btrfs_find_all_roots(NULL, fs_info, - record->bytenr, 0, - &record->old_roots); + ret = btrfs_find_all_roots(&ctx, false); if (ret < 0) goto cleanup; + record->old_roots = ctx.roots; + ctx.roots = NULL; } /* - * Use SEQ_LAST as time_seq to do special search, which - * doesn't lock tree or delayed_refs and search current - * root. It's safe inside commit_transaction(). + * Use BTRFS_SEQ_LAST as time_seq to do special search, + * which doesn't lock tree or delayed_refs and search + * current root. It's safe inside commit_transaction(). */ - ret = btrfs_find_all_roots(trans, fs_info, - record->bytenr, SEQ_LAST, &new_roots); + ctx.trans = trans; + ctx.time_seq = BTRFS_SEQ_LAST; + ret = btrfs_find_all_roots(&ctx, false); if (ret < 0) goto cleanup; + new_roots = ctx.roots; if (qgroup_to_skip) { ulist_del(new_roots, qgroup_to_skip, 0); ulist_del(record->old_roots, qgroup_to_skip, 0); } - ret = btrfs_qgroup_account_extent(trans, fs_info, - record->bytenr, record->num_bytes, - record->old_roots, new_roots); + ret = btrfs_qgroup_account_extent(trans, bytenr, + record->num_bytes, + record->old_roots, + new_roots); record->old_roots = NULL; new_roots = NULL; } + /* Free the reserved data space */ + btrfs_qgroup_free_refroot(fs_info, + record->data_rsv_refroot, + record->data_rsv, + BTRFS_QGROUP_RSV_DATA); cleanup: ulist_free(record->old_roots); ulist_free(new_roots); new_roots = NULL; - rb_erase(node, &delayed_refs->dirty_extent_root); + xa_erase(&delayed_refs->dirty_extents, index); kfree(record); } + trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents); return ret; } /* - * called from commit_transaction. Writes all changed qgroups to disk. + * Writes all changed qgroups to disk. + * Called by the transaction commit path and the qgroup assign ioctl. */ -int btrfs_run_qgroups(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +int btrfs_run_qgroups(struct btrfs_trans_handle *trans) { - struct btrfs_root *quota_root = fs_info->quota_root; + struct btrfs_fs_info *fs_info = trans->fs_info; int ret = 0; - int start_rescan_worker = 0; - - if (!quota_root) - goto out; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && - test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) - start_rescan_worker = 1; + /* + * In case we are called from the qgroup assign ioctl, assert that we + * are holding the qgroup_ioctl_lock, otherwise we can race with a quota + * disable operation (ioctl) and access a freed quota root. + */ + if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) + lockdep_assert_held(&fs_info->qgroup_ioctl_lock); - if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) - set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags)) - clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + if (!fs_info->quota_root) + return ret; spin_lock(&fs_info->qgroup_lock); while (!list_empty(&fs_info->dirty_qgroups)) { @@ -2097,39 +3085,181 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans, struct btrfs_qgroup, dirty); list_del_init(&qgroup->dirty); spin_unlock(&fs_info->qgroup_lock); - ret = update_qgroup_info_item(trans, quota_root, qgroup); + ret = update_qgroup_info_item(trans, qgroup); if (ret) - fs_info->qgroup_flags |= - BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - ret = update_qgroup_limit_item(trans, quota_root, qgroup); + qgroup_mark_inconsistent(fs_info, + "qgroup info item update error %d", ret); + ret = update_qgroup_limit_item(trans, qgroup); if (ret) - fs_info->qgroup_flags |= - BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + qgroup_mark_inconsistent(fs_info, + "qgroup limit item update error %d", ret); spin_lock(&fs_info->qgroup_lock); } - if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + if (btrfs_qgroup_enabled(fs_info)) fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; else fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; spin_unlock(&fs_info->qgroup_lock); - ret = update_qgroup_status_item(trans, fs_info, quota_root); + ret = update_qgroup_status_item(trans); if (ret) - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + qgroup_mark_inconsistent(fs_info, + "qgroup status item update error %d", ret); + + return ret; +} + +int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup_inherit *inherit, + size_t size) +{ + if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) + return -EOPNOTSUPP; + if (size < sizeof(*inherit) || size > PAGE_SIZE) + return -EINVAL; - if (!ret && start_rescan_worker) { - ret = qgroup_rescan_init(fs_info, 0, 1); - if (!ret) { - qgroup_rescan_zero_tracking(fs_info); - btrfs_queue_work(fs_info->qgroup_rescan_workers, - &fs_info->qgroup_rescan_work); + /* + * In the past we allowed btrfs_qgroup_inherit to specify to copy + * rfer/excl numbers directly from other qgroups. This behavior has + * been disabled in userspace for a very long time, but here we should + * also disable it in kernel, as this behavior is known to mark qgroup + * inconsistent, and a rescan would wipe out the changes anyway. + * + * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies. + */ + if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0) + return -EINVAL; + + if (size != struct_size(inherit, qgroups, inherit->num_qgroups)) + return -EINVAL; + + /* + * Skip the inherit source qgroups check if qgroup is not enabled. + * Qgroup can still be later enabled causing problems, but in that case + * btrfs_qgroup_inherit() would just ignore those invalid ones. + */ + if (!btrfs_qgroup_enabled(fs_info)) + return 0; + + /* + * Now check all the remaining qgroups, they should all: + * + * - Exist + * - Be higher level qgroups. + */ + for (int i = 0; i < inherit->num_qgroups; i++) { + struct btrfs_qgroup *qgroup; + u64 qgroupid = inherit->qgroups[i]; + + if (btrfs_qgroup_level(qgroupid) == 0) + return -EINVAL; + + spin_lock(&fs_info->qgroup_lock); + qgroup = find_qgroup_rb(fs_info, qgroupid); + if (!qgroup) { + spin_unlock(&fs_info->qgroup_lock); + return -ENOENT; } - ret = 0; + spin_unlock(&fs_info->qgroup_lock); } + return 0; +} -out: +static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, + u64 inode_rootid, + struct btrfs_qgroup_inherit **inherit) +{ + int i = 0; + u64 num_qgroups = 0; + struct btrfs_qgroup *inode_qg; + struct btrfs_qgroup_list *qg_list; + struct btrfs_qgroup_inherit *res; + size_t struct_sz; + u64 *qgids; + + if (*inherit) + return -EEXIST; + + inode_qg = find_qgroup_rb(fs_info, inode_rootid); + if (!inode_qg) + return -ENOENT; - return ret; + num_qgroups = list_count_nodes(&inode_qg->groups); + + if (!num_qgroups) + return 0; + + struct_sz = struct_size(res, qgroups, num_qgroups); + if (struct_sz == SIZE_MAX) + return -ERANGE; + + res = kzalloc(struct_sz, GFP_NOFS); + if (!res) + return -ENOMEM; + res->num_qgroups = num_qgroups; + qgids = res->qgroups; + + list_for_each_entry(qg_list, &inode_qg->groups, next_group) + qgids[i++] = qg_list->group->qgroupid; + + *inherit = res; + return 0; +} + +/* + * Check if we can skip rescan when inheriting qgroups. If @src has a single + * @parent, and that @parent is owning all its bytes exclusively, we can skip + * the full rescan, by just adding nodesize to the @parent's excl/rfer. + * + * Return <0 for fatal errors (like srcid/parentid has no qgroup). + * Return 0 if a quick inherit is done. + * Return >0 if a quick inherit is not possible, and a full rescan is needed. + */ +static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info, + u64 srcid, u64 parentid) +{ + struct btrfs_qgroup *src; + struct btrfs_qgroup *parent; + struct btrfs_qgroup_list *list; + int nr_parents = 0; + + src = find_qgroup_rb(fs_info, srcid); + if (!src) + return -ENOENT; + parent = find_qgroup_rb(fs_info, parentid); + if (!parent) + return -ENOENT; + + /* + * Source has no parent qgroup, but our new qgroup would have one. + * Qgroup numbers would become inconsistent. + */ + if (list_empty(&src->groups)) + return 1; + + list_for_each_entry(list, &src->groups, next_group) { + /* The parent is not the same, quick update is not possible. */ + if (list->group->qgroupid != parentid) + return 1; + nr_parents++; + /* + * More than one parent qgroup, we can't be sure about accounting + * consistency. + */ + if (nr_parents > 1) + return 1; + } + + /* + * The parent is not exclusively owning all its bytes. We're not sure + * if the source has any bytes not fully owned by the parent. + */ + if (parent->excl != parent->rfer) + return 1; + + parent->excl += fs_info->nodesize; + parent->rfer += fs_info->nodesize; + return 0; } /* @@ -2138,33 +3268,69 @@ out: * cause a transaction abort so we take extra care here to only error * when a readonly fs is a reasonable outcome. */ -int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, +int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + u64 objectid, u64 inode_rootid, struct btrfs_qgroup_inherit *inherit) { int ret = 0; - int i; u64 *i_qgroups; - struct btrfs_root *quota_root = fs_info->quota_root; + bool committing = false; + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *quota_root; struct btrfs_qgroup *srcgroup; struct btrfs_qgroup *dstgroup; + struct btrfs_qgroup *prealloc = NULL; + struct btrfs_qgroup_list **qlist_prealloc = NULL; + bool free_inherit = false; + bool need_rescan = false; u32 level_size = 0; u64 nums; - mutex_lock(&fs_info->qgroup_ioctl_lock); - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - goto out; + if (!btrfs_qgroup_enabled(fs_info)) + return 0; + + prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); + if (!prealloc) + return -ENOMEM; + /* + * There are only two callers of this function. + * + * One in create_subvol() in the ioctl context, which needs to hold + * the qgroup_ioctl_lock. + * + * The other one in create_pending_snapshot() where no other qgroup + * code can modify the fs as they all need to either start a new trans + * or hold a trans handler, thus we don't need to hold + * qgroup_ioctl_lock. + * This would avoid long and complex lock chain and make lockdep happy. + */ + spin_lock(&fs_info->trans_lock); + if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) + committing = true; + spin_unlock(&fs_info->trans_lock); + + if (!committing) + mutex_lock(&fs_info->qgroup_ioctl_lock); + + quota_root = fs_info->quota_root; if (!quota_root) { ret = -EINVAL; goto out; } + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) { + ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit); + if (ret) + goto out; + free_inherit = true; + } + if (inherit) { i_qgroups = (u64 *)(inherit + 1); nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 2 * inherit->num_excl_copies; - for (i = 0; i < nums; ++i) { + for (int i = 0; i < nums; i++) { srcgroup = find_qgroup_rb(fs_info, *i_qgroups); /* @@ -2186,50 +3352,46 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, if (ret) goto out; - if (srcid) { - struct btrfs_root *srcroot; - struct btrfs_key srckey; - - srckey.objectid = srcid; - srckey.type = BTRFS_ROOT_ITEM_KEY; - srckey.offset = (u64)-1; - srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey); - if (IS_ERR(srcroot)) { - ret = PTR_ERR(srcroot); - goto out; - } - - level_size = fs_info->nodesize; - } - /* * add qgroup to all inherited groups */ if (inherit) { i_qgroups = (u64 *)(inherit + 1); - for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { + for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) { if (*i_qgroups == 0) continue; - ret = add_qgroup_relation_item(trans, quota_root, - objectid, *i_qgroups); + ret = add_qgroup_relation_item(trans, objectid, + *i_qgroups); if (ret && ret != -EEXIST) goto out; - ret = add_qgroup_relation_item(trans, quota_root, - *i_qgroups, objectid); + ret = add_qgroup_relation_item(trans, *i_qgroups, + objectid); if (ret && ret != -EEXIST) goto out; } ret = 0; - } + qlist_prealloc = kcalloc(inherit->num_qgroups, + sizeof(struct btrfs_qgroup_list *), + GFP_NOFS); + if (!qlist_prealloc) { + ret = -ENOMEM; + goto out; + } + for (int i = 0; i < inherit->num_qgroups; i++) { + qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list), + GFP_NOFS); + if (!qlist_prealloc[i]) { + ret = -ENOMEM; + goto out; + } + } + } spin_lock(&fs_info->qgroup_lock); - dstgroup = add_qgroup_rb(fs_info, objectid); - if (IS_ERR(dstgroup)) { - ret = PTR_ERR(dstgroup); - goto unlock; - } + dstgroup = add_qgroup_rb(fs_info, prealloc, objectid); + prealloc = NULL; if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { dstgroup->lim_flags = inherit->lim.flags; @@ -2238,17 +3400,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, dstgroup->rsv_rfer = inherit->lim.rsv_rfer; dstgroup->rsv_excl = inherit->lim.rsv_excl; - ret = update_qgroup_limit_item(trans, quota_root, dstgroup); - if (ret) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_info(fs_info, - "unable to update quota limit for %llu", - dstgroup->qgroupid); - goto unlock; - } + qgroup_dirty(fs_info, dstgroup); } - if (srcid) { + if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) { srcgroup = find_qgroup_rb(fs_info, srcid); if (!srcgroup) goto unlock; @@ -2258,6 +3413,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, * our counts don't go crazy, so at this point the only * difference between the two roots should be the root node. */ + level_size = fs_info->nodesize; dstgroup->rfer = srcgroup->rfer; dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; dstgroup->excl = level_size; @@ -2274,22 +3430,40 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, qgroup_dirty(fs_info, dstgroup); qgroup_dirty(fs_info, srcgroup); + + /* + * If the source qgroup has parent but the new one doesn't, + * we need a full rescan. + */ + if (!inherit && !list_empty(&srcgroup->groups)) + need_rescan = true; } if (!inherit) goto unlock; i_qgroups = (u64 *)(inherit + 1); - for (i = 0; i < inherit->num_qgroups; ++i) { + for (int i = 0; i < inherit->num_qgroups; i++) { if (*i_qgroups) { - ret = add_relation_rb(fs_info, objectid, *i_qgroups); + ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid, + *i_qgroups); + qlist_prealloc[i] = NULL; if (ret) goto unlock; } + if (srcid) { + /* Check if we can do a quick inherit. */ + ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups); + if (ret < 0) + goto unlock; + if (ret > 0) + need_rescan = true; + ret = 0; + } ++i_qgroups; } - for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { + for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) { struct btrfs_qgroup *src; struct btrfs_qgroup *dst; @@ -2306,8 +3480,11 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, dst->rfer = src->rfer - level_size; dst->rfer_cmpr = src->rfer_cmpr - level_size; + + /* Manually tweaking numbers certainly needs a rescan */ + need_rescan = true; } - for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { + for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) { struct btrfs_qgroup *src; struct btrfs_qgroup *dst; @@ -2324,40 +3501,59 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, dst->excl = src->excl + level_size; dst->excl_cmpr = src->excl_cmpr + level_size; + need_rescan = true; } unlock: spin_unlock(&fs_info->qgroup_lock); + if (!ret) + ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); out: - mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (!committing) + mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (need_rescan) + qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan"); + if (qlist_prealloc) { + for (int i = 0; i < inherit->num_qgroups; i++) + kfree(qlist_prealloc[i]); + kfree(qlist_prealloc); + } + if (free_inherit) + kfree(inherit); + + /* + * At this point we either failed at allocating prealloc, or we + * succeeded and passed the ownership to it to add_qgroup_rb(). In any + * case, this needs to be NULL or there is something wrong. + */ + ASSERT(prealloc == NULL); + return ret; } static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) { if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && - qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer) + qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && - qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl) + qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; return true; } -static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) +static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, + enum btrfs_qgroup_rsv_type type) { - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; struct btrfs_fs_info *fs_info = root->fs_info; - u64 ref_root = root->root_key.objectid; + u64 ref_root = btrfs_root_id(root); int ret = 0; - int retried = 0; - struct ulist_node *unode; - struct ulist_iterator uiter; + LIST_HEAD(qgroup_list); - if (!is_fstree(ref_root)) + if (!btrfs_is_fstree(ref_root)) return 0; if (num_bytes == 0) @@ -2367,167 +3563,140 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) capable(CAP_SYS_RESOURCE)) enforce = false; -retry: spin_lock(&fs_info->qgroup_lock); - quota_root = fs_info->quota_root; - if (!quota_root) + if (!fs_info->quota_root) goto out; qgroup = find_qgroup_rb(fs_info, ref_root); if (!qgroup) goto out; - /* - * in a first step, we check all affected qgroups if any limits would - * be exceeded - */ - ulist_reinit(fs_info->qgroup_ulist); - ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, - (uintptr_t)qgroup, GFP_ATOMIC); - if (ret < 0) - goto out; - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { - struct btrfs_qgroup *qg; + qgroup_iterator_add(&qgroup_list, qgroup); + list_for_each_entry(qgroup, &qgroup_list, iterator) { struct btrfs_qgroup_list *glist; - qg = unode_aux_to_qgroup(unode); - - if (enforce && !qgroup_check_limits(qg, num_bytes)) { - /* - * Commit the tree and retry, since we may have - * deletions which would free up space. - */ - if (!retried && qg->reserved > 0) { - struct btrfs_trans_handle *trans; - - spin_unlock(&fs_info->qgroup_lock); - ret = btrfs_start_delalloc_inodes(root, 0); - if (ret) - return ret; - btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans); - if (ret) - return ret; - retried++; - goto retry; - } + if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { ret = -EDQUOT; goto out; } - list_for_each_entry(glist, &qg->groups, next_group) { - ret = ulist_add(fs_info->qgroup_ulist, - glist->group->qgroupid, - (uintptr_t)glist->group, GFP_ATOMIC); - if (ret < 0) - goto out; - } + list_for_each_entry(glist, &qgroup->groups, next_group) + qgroup_iterator_add(&qgroup_list, glist->group); } + ret = 0; /* * no limits exceeded, now record the reservation into all qgroups */ - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { - struct btrfs_qgroup *qg; - - qg = unode_aux_to_qgroup(unode); - - trace_qgroup_update_reserve(fs_info, qg, num_bytes); - qg->reserved += num_bytes; - } + list_for_each_entry(qgroup, &qgroup_list, iterator) + qgroup_rsv_add(fs_info, qgroup, num_bytes, type); out: + qgroup_iterator_clean(&qgroup_list); spin_unlock(&fs_info->qgroup_lock); return ret; } +/* + * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 + * qgroup). + * + * Will handle all higher level qgroup too. + * + * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. + * This special case is only used for META_PERTRANS type. + */ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, - u64 ref_root, u64 num_bytes) + u64 ref_root, u64 num_bytes, + enum btrfs_qgroup_rsv_type type) { - struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; - struct ulist_node *unode; - struct ulist_iterator uiter; - int ret = 0; + LIST_HEAD(qgroup_list); - if (!is_fstree(ref_root)) + if (!btrfs_is_fstree(ref_root)) return; if (num_bytes == 0) return; + if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { + WARN(1, "%s: Invalid type to free", __func__); + return; + } spin_lock(&fs_info->qgroup_lock); - quota_root = fs_info->quota_root; - if (!quota_root) + if (!fs_info->quota_root) goto out; qgroup = find_qgroup_rb(fs_info, ref_root); if (!qgroup) goto out; - ulist_reinit(fs_info->qgroup_ulist); - ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, - (uintptr_t)qgroup, GFP_ATOMIC); - if (ret < 0) - goto out; - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { - struct btrfs_qgroup *qg; - struct btrfs_qgroup_list *glist; - - qg = unode_aux_to_qgroup(unode); + if (num_bytes == (u64)-1) + /* + * We're freeing all pertrans rsv, get reserved value from + * level 0 qgroup as real num_bytes to free. + */ + num_bytes = qgroup->rsv.values[type]; - trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes); - if (qg->reserved < num_bytes) - report_reserved_underflow(fs_info, qg, num_bytes); - else - qg->reserved -= num_bytes; + qgroup_iterator_add(&qgroup_list, qgroup); + list_for_each_entry(qgroup, &qgroup_list, iterator) { + struct btrfs_qgroup_list *glist; - list_for_each_entry(glist, &qg->groups, next_group) { - ret = ulist_add(fs_info->qgroup_ulist, - glist->group->qgroupid, - (uintptr_t)glist->group, GFP_ATOMIC); - if (ret < 0) - goto out; + qgroup_rsv_release(fs_info, qgroup, num_bytes, type); + list_for_each_entry(glist, &qgroup->groups, next_group) { + qgroup_iterator_add(&qgroup_list, glist->group); } } - out: + qgroup_iterator_clean(&qgroup_list); spin_unlock(&fs_info->qgroup_lock); } /* + * Check if the leaf is the last leaf. Which means all node pointers + * are at their last position. + */ +static bool is_last_leaf(struct btrfs_path *path) +{ + int i; + + for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { + if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) + return false; + } + return true; +} + +/* * returns < 0 on error, 0 when more leafs are to be scanned. * returns 1 when done. */ -static int -qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - struct btrfs_trans_handle *trans) +static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, + struct btrfs_path *path) { + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *extent_root; struct btrfs_key found; struct extent_buffer *scratch_leaf = NULL; - struct ulist *roots = NULL; - struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); u64 num_bytes; + bool done; int slot; int ret; + if (!btrfs_qgroup_full_accounting(fs_info)) + return 1; + mutex_lock(&fs_info->qgroup_rescan_lock); - ret = btrfs_search_slot_for_read(fs_info->extent_root, + extent_root = btrfs_extent_root(fs_info, + fs_info->qgroup_rescan_progress.objectid); + ret = btrfs_search_slot_for_read(extent_root, &fs_info->qgroup_rescan_progress, path, 1, 0); btrfs_debug(fs_info, - "current progress key (%llu %u %llu), search_slot ret %d", - fs_info->qgroup_rescan_progress.objectid, - fs_info->qgroup_rescan_progress.type, - fs_info->qgroup_rescan_progress.offset, ret); + "current progress key " BTRFS_KEY_FMT ", search_slot ret %d", + BTRFS_KEY_FMT_VALUE(&fs_info->qgroup_rescan_progress), ret); if (ret) { /* @@ -2543,26 +3712,25 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, mutex_unlock(&fs_info->qgroup_rescan_lock); return ret; } + done = is_last_leaf(path); btrfs_item_key_to_cpu(path->nodes[0], &found, btrfs_header_nritems(path->nodes[0]) - 1); fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; - btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); if (!scratch_leaf) { ret = -ENOMEM; mutex_unlock(&fs_info->qgroup_rescan_lock); goto out; } - extent_buffer_get(scratch_leaf); - btrfs_tree_read_lock(scratch_leaf); - btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK); slot = path->slots[0]; btrfs_release_path(path); mutex_unlock(&fs_info->qgroup_rescan_lock); for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { + struct btrfs_backref_walk_ctx ctx = { 0 }; + btrfs_item_key_to_cpu(scratch_leaf, &found, slot); if (found.type != BTRFS_EXTENT_ITEM_KEY && found.type != BTRFS_METADATA_ITEM_KEY) @@ -2572,52 +3740,78 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, else num_bytes = found.offset; - ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, - &roots); + ctx.bytenr = found.objectid; + ctx.fs_info = fs_info; + + ret = btrfs_find_all_roots(&ctx, false); if (ret < 0) goto out; /* For rescan, just pass old_roots as NULL */ - ret = btrfs_qgroup_account_extent(trans, fs_info, - found.objectid, num_bytes, NULL, roots); + ret = btrfs_qgroup_account_extent(trans, found.objectid, + num_bytes, NULL, ctx.roots); if (ret < 0) goto out; } out: - if (scratch_leaf) { - btrfs_tree_read_unlock_blocking(scratch_leaf); + if (scratch_leaf) free_extent_buffer(scratch_leaf); - } - btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); + if (done && !ret) { + ret = 1; + fs_info->qgroup_rescan_progress.objectid = (u64)-1; + } return ret; } +static bool rescan_should_stop(struct btrfs_fs_info *fs_info) +{ + if (btrfs_fs_closing(fs_info)) + return true; + if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) + return true; + if (!btrfs_qgroup_enabled(fs_info)) + return true; + if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) + return true; + return false; +} + static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) { struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, qgroup_rescan_work); struct btrfs_path *path; struct btrfs_trans_handle *trans = NULL; - int err = -ENOMEM; int ret = 0; + bool stopped = false; + bool did_leaf_rescans = false; + + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) + return; path = btrfs_alloc_path(); - if (!path) + if (!path) { + ret = -ENOMEM; goto out; + } + /* + * Rescan should only search for commit root, and any later difference + * should be recorded by qgroup + */ + path->search_commit_root = true; + path->skip_locking = true; - err = 0; - while (!err && !btrfs_fs_closing(fs_info)) { + while (!ret && !(stopped = rescan_should_stop(fs_info))) { trans = btrfs_start_transaction(fs_info->fs_root, 0); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); break; } - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { - err = -EINTR; - } else { - err = qgroup_rescan_leaf(fs_info, path, trans); - } - if (err > 0) + + ret = qgroup_rescan_leaf(trans, path); + did_leaf_rescans = true; + + if (ret > 0) btrfs_commit_transaction(trans); else btrfs_end_transaction(trans); @@ -2627,50 +3821,66 @@ out: btrfs_free_path(path); mutex_lock(&fs_info->qgroup_rescan_lock); - if (!btrfs_fs_closing(fs_info)) - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; - - if (err > 0 && + if (ret > 0 && fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - } else if (err < 0) { + } else if (ret < 0 || stopped) { fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; } mutex_unlock(&fs_info->qgroup_rescan_lock); /* - * only update status, since the previous part has already updated the - * qgroup info. + * Only update status, since the previous part has already updated the + * qgroup info, and only if we did any actual work. This also prevents + * race with a concurrent quota disable, which has already set + * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at + * btrfs_quota_disable(). */ - trans = btrfs_start_transaction(fs_info->quota_root, 1); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); - btrfs_err(fs_info, - "fail to start transaction for status update: %d\n", - err); - goto done; + if (did_leaf_rescans) { + trans = btrfs_start_transaction(fs_info->quota_root, 1); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + btrfs_err(fs_info, + "fail to start transaction for status update: %d", + ret); + } + } else { + trans = NULL; } - ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); - if (ret < 0) { - err = ret; - btrfs_err(fs_info, "fail to update qgroup status: %d", err); + + mutex_lock(&fs_info->qgroup_rescan_lock); + if (!stopped || + fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; + if (trans) { + int ret2 = update_qgroup_status_item(trans); + + if (ret2 < 0) { + ret = ret2; + btrfs_err(fs_info, "fail to update qgroup status: %d", ret); + } } + fs_info->qgroup_rescan_running = false; + fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; + complete_all(&fs_info->qgroup_rescan_completion); + mutex_unlock(&fs_info->qgroup_rescan_lock); + + if (!trans) + return; + btrfs_end_transaction(trans); - if (btrfs_fs_closing(fs_info)) { + if (stopped) { btrfs_info(fs_info, "qgroup scan paused"); - } else if (err >= 0) { + } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { + btrfs_info(fs_info, "qgroup scan cancelled"); + } else if (ret >= 0) { btrfs_info(fs_info, "qgroup scan completed%s", - err > 0 ? " (inconsistency flag cleared)" : ""); + ret > 0 ? " (inconsistency flag cleared)" : ""); } else { - btrfs_err(fs_info, "qgroup scan failed with %d", err); + btrfs_err(fs_info, "qgroup scan failed with %d", ret); } - -done: - mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = false; - mutex_unlock(&fs_info->qgroup_rescan_lock); - complete_all(&fs_info->qgroup_rescan_completion); } /* @@ -2683,51 +3893,61 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, { int ret = 0; - if (!init_flags && - (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) || - !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) { - ret = -EINVAL; - goto err; + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { + btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode"); + return -EINVAL; + } + + if (!init_flags) { + /* we're resuming qgroup rescan at mount time */ + if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { + btrfs_debug(fs_info, + "qgroup rescan init failed, qgroup rescan is not queued"); + ret = -EINVAL; + } else if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_ON)) { + btrfs_debug(fs_info, + "qgroup rescan init failed, qgroup is not enabled"); + ret = -ENOTCONN; + } + + if (ret) + return ret; } mutex_lock(&fs_info->qgroup_rescan_lock); - spin_lock(&fs_info->qgroup_lock); if (init_flags) { - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { ret = -EINPROGRESS; - else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) - ret = -EINVAL; + } else if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_ON)) { + btrfs_debug(fs_info, + "qgroup rescan init failed, qgroup is not enabled"); + ret = -ENOTCONN; + } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) { + /* Quota disable is in progress */ + ret = -EBUSY; + } if (ret) { - spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); - goto err; + return ret; } fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; } memset(&fs_info->qgroup_rescan_progress, 0, sizeof(fs_info->qgroup_rescan_progress)); + fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | + BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); fs_info->qgroup_rescan_progress.objectid = progress_objectid; init_completion(&fs_info->qgroup_rescan_completion); - fs_info->qgroup_rescan_running = true; - - spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); - memset(&fs_info->qgroup_rescan_work, 0, - sizeof(fs_info->qgroup_rescan_work)); btrfs_init_work(&fs_info->qgroup_rescan_work, - btrfs_qgroup_rescan_helper, - btrfs_qgroup_rescan_worker, NULL, NULL); - - if (ret) { -err: - btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret); - return ret; - } - + btrfs_qgroup_rescan_worker, NULL); return 0; } @@ -2745,6 +3965,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) qgroup->rfer_cmpr = 0; qgroup->excl = 0; qgroup->excl_cmpr = 0; + qgroup_dirty(fs_info, qgroup); } spin_unlock(&fs_info->qgroup_lock); } @@ -2753,7 +3974,6 @@ int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) { int ret = 0; - struct btrfs_trans_handle *trans; ret = qgroup_rescan_init(fs_info, 0, 1); if (ret) @@ -2770,12 +3990,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) * going to clear all tracking information for a clean start. */ - trans = btrfs_join_transaction(fs_info->fs_root); - if (IS_ERR(trans)) { - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; - return PTR_ERR(trans); - } - ret = btrfs_commit_transaction(trans); + ret = btrfs_commit_current_transaction(fs_info->fs_root); if (ret) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; return ret; @@ -2783,10 +3998,22 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) qgroup_rescan_zero_tracking(fs_info); - btrfs_queue_work(fs_info->qgroup_rescan_workers, - &fs_info->qgroup_rescan_work); + mutex_lock(&fs_info->qgroup_rescan_lock); + /* + * The rescan worker is only for full accounting qgroups, check if it's + * enabled as it is pointless to queue it otherwise. A concurrent quota + * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED. + */ + if (btrfs_qgroup_full_accounting(fs_info)) { + fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } else { + ret = -ENOTCONN; + } + mutex_unlock(&fs_info->qgroup_rescan_lock); - return 0; + return ret; } int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, @@ -2796,9 +4023,7 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, int ret = 0; mutex_lock(&fs_info->qgroup_rescan_lock); - spin_lock(&fs_info->qgroup_lock); running = fs_info->qgroup_rescan_running; - spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); if (!running) @@ -2820,45 +4045,157 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) { - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { + mutex_lock(&fs_info->qgroup_rescan_lock); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + mutex_unlock(&fs_info->qgroup_rescan_lock); + } +} + +#define rbtree_iterate_from_safe(node, next, start) \ + for (node = start; node && ({ next = rb_next(node); 1;}); node = next) + +static int qgroup_unreserve_range(struct btrfs_inode *inode, + struct extent_changeset *reserved, u64 start, + u64 len) +{ + struct rb_node *node; + struct rb_node *next; + struct ulist_node *entry; + int ret = 0; + + node = reserved->range_changed.root.rb_node; + if (!node) + return 0; + while (node) { + entry = rb_entry(node, struct ulist_node, rb_node); + if (entry->val < start) + node = node->rb_right; + else + node = node->rb_left; + } + + if (entry->val > start && rb_prev(&entry->rb_node)) + entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, + rb_node); + + rbtree_iterate_from_safe(node, next, &entry->rb_node) { + u64 entry_start; + u64 entry_end; + u64 entry_len; + int clear_ret; + + entry = rb_entry(node, struct ulist_node, rb_node); + entry_start = entry->val; + entry_end = entry->aux; + entry_len = entry_end - entry_start + 1; + + if (entry_start >= start + len) + break; + if (entry_start + entry_len <= start) + continue; + /* + * Now the entry is in [start, start + len), revert the + * EXTENT_QGROUP_RESERVED bit. + */ + clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end, + EXTENT_QGROUP_RESERVED, NULL); + if (!ret && clear_ret < 0) + ret = clear_ret; + + ulist_del(&reserved->range_changed, entry->val, entry->aux); + if (likely(reserved->bytes_changed >= entry_len)) { + reserved->bytes_changed -= entry_len; + } else { + WARN_ON(1); + reserved->bytes_changed = 0; + } + } + + return ret; } /* - * Reserve qgroup space for range [start, start + len). + * Try to free some space for qgroup. * - * This function will either reserve space from related qgroups or doing - * nothing if the range is already reserved. + * For qgroup, there are only 3 ways to free qgroup space: + * - Flush nodatacow write + * Any nodatacow write will free its reserved data space at run_delalloc_range(). + * In theory, we should only flush nodatacow inodes, but it's not yet + * possible, so we need to flush the whole root. * - * Return 0 for successful reserve - * Return <0 for error (including -EQUOT) + * - Wait for ordered extents + * When ordered extents are finished, their reserved metadata is finally + * converted to per_trans status, which can be freed by later commit + * transaction. * - * NOTE: this function may sleep for memory allocation. - * if btrfs_qgroup_reserve_data() is called multiple times with - * same @reserved, caller must ensure when error happens it's OK - * to free *ALL* reserved space. + * - Commit transaction + * This would free the meta_per_trans space. + * In theory this shouldn't provide much space, but any more qgroup space + * is needed. */ -int btrfs_qgroup_reserve_data(struct inode *inode, +static int try_flush_qgroup(struct btrfs_root *root) +{ + int ret; + + /* Can't hold an open transaction or we run the risk of deadlocking. */ + ASSERT(current->journal_info == NULL); + if (WARN_ON(current->journal_info)) + return 0; + + /* + * We don't want to run flush again and again, so if there is a running + * one, we won't try to start a new flush, but exit directly. + */ + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { + wait_event(root->qgroup_flush_wait, + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); + return 0; + } + + ret = btrfs_start_delalloc_snapshot(root, true); + if (ret < 0) + goto out; + btrfs_wait_ordered_extents(root, U64_MAX, NULL); + + /* + * After waiting for ordered extents run delayed iputs in order to free + * space from unlinked files before committing the current transaction, + * as ordered extents may have been holding the last reference of an + * inode and they add a delayed iput when they complete. + */ + btrfs_run_delayed_iputs(root->fs_info); + btrfs_wait_on_delayed_iputs(root->fs_info); + + ret = btrfs_commit_current_transaction(root); +out: + clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); + wake_up(&root->qgroup_flush_wait); + return ret; +} + +static int qgroup_reserve_data(struct btrfs_inode *inode, struct extent_changeset **reserved_ret, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; - struct ulist_node *unode; - struct ulist_iterator uiter; + struct btrfs_root *root = inode->root; struct extent_changeset *reserved; + bool new_reserved = false; u64 orig_reserved; u64 to_reserve; int ret; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || - !is_fstree(root->objectid) || len == 0) + if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || + !btrfs_is_fstree(btrfs_root_id(root)) || len == 0) return 0; /* @reserved parameter is mandatory for qgroup */ if (WARN_ON(!reserved_ret)) return -EINVAL; if (!*reserved_ret) { + new_reserved = true; *reserved_ret = extent_changeset_alloc(); if (!*reserved_ret) return -ENOMEM; @@ -2866,41 +4203,70 @@ int btrfs_qgroup_reserve_data(struct inode *inode, reserved = *reserved_ret; /* Record already reserved space */ orig_reserved = reserved->bytes_changed; - ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, - start + len -1, EXTENT_QGROUP_RESERVED, reserved); + ret = btrfs_set_record_extent_bits(&inode->io_tree, start, + start + len - 1, EXTENT_QGROUP_RESERVED, + reserved); /* Newly reserved space */ to_reserve = reserved->bytes_changed - orig_reserved; - trace_btrfs_qgroup_reserve_data(inode, start, len, + trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, to_reserve, QGROUP_RESERVE); if (ret < 0) - goto cleanup; - ret = qgroup_reserve(root, to_reserve, true); + goto out; + ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); if (ret < 0) goto cleanup; return ret; cleanup: - /* cleanup *ALL* already reserved ranges */ - ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(&reserved->range_changed, &uiter))) - clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, - unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL, - GFP_NOFS); - extent_changeset_release(reserved); + qgroup_unreserve_range(inode, reserved, start, len); +out: + if (new_reserved) { + extent_changeset_free(reserved); + *reserved_ret = NULL; + } return ret; } +/* + * Reserve qgroup space for range [start, start + len). + * + * This function will either reserve space from related qgroups or do nothing + * if the range is already reserved. + * + * Return 0 for successful reservation + * Return <0 for error (including -EQUOT) + * + * NOTE: This function may sleep for memory allocation, dirty page flushing and + * commit transaction. So caller should not hold any dirty page locked. + */ +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, + struct extent_changeset **reserved_ret, u64 start, + u64 len) +{ + int ret; + + ret = qgroup_reserve_data(inode, reserved_ret, start, len); + if (ret <= 0 && ret != -EDQUOT) + return ret; + + ret = try_flush_qgroup(inode->root); + if (ret < 0) + return ret; + return qgroup_reserve_data(inode, reserved_ret, start, len); +} + /* Free ranges specified by @reserved, normally in error path */ -static int qgroup_free_reserved_data(struct inode *inode, - struct extent_changeset *reserved, u64 start, u64 len) +static int qgroup_free_reserved_data(struct btrfs_inode *inode, + struct extent_changeset *reserved, + u64 start, u64 len, u64 *freed_ret) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct ulist_node *unode; struct ulist_iterator uiter; struct extent_changeset changeset; - int freed = 0; + u64 freed = 0; int ret; extent_changeset_init(&changeset); @@ -2932,47 +4298,58 @@ static int qgroup_free_reserved_data(struct inode *inode, * EXTENT_QGROUP_RESERVED, we won't double free. * So not need to rush. */ - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree, - free_start, free_start + free_len - 1, - EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start, + free_start + free_len - 1, + EXTENT_QGROUP_RESERVED, + &changeset); if (ret < 0) goto out; freed += changeset.bytes_changed; } - btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed); - ret = freed; + btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed, + BTRFS_QGROUP_RSV_DATA); + if (freed_ret) + *freed_ret = freed; + ret = 0; out: extent_changeset_release(&changeset); return ret; } -static int __btrfs_qgroup_release_data(struct inode *inode, +static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len, - int free) + u64 *released, int free) { struct extent_changeset changeset; int trace_op = QGROUP_RELEASE; int ret; + if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { + return btrfs_clear_record_extent_bits(&inode->io_tree, start, + start + len - 1, + EXTENT_QGROUP_RESERVED, NULL); + } + /* In release case, we shouldn't have @reserved */ WARN_ON(!free && reserved); if (free && reserved) - return qgroup_free_reserved_data(inode, reserved, start, len); + return qgroup_free_reserved_data(inode, reserved, start, len, released); extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, - start + len -1, EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1, + EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; if (free) trace_op = QGROUP_FREE; - trace_btrfs_qgroup_release_data(inode, start, len, + trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, changeset.bytes_changed, trace_op); if (free) - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->objectid, - changeset.bytes_changed); - ret = changeset.bytes_changed; + btrfs_qgroup_free_refroot(inode->root->fs_info, + btrfs_root_id(inode->root), + changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); + if (released) + *released = changeset.bytes_changed; out: extent_changeset_release(&changeset); return ret; @@ -2990,10 +4367,11 @@ out: * * NOTE: This function may sleep for memory allocation. */ -int btrfs_qgroup_free_data(struct inode *inode, - struct extent_changeset *reserved, u64 start, u64 len) +int btrfs_qgroup_free_data(struct btrfs_inode *inode, + struct extent_changeset *reserved, + u64 start, u64 len, u64 *freed) { - return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); + return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1); } /* @@ -3011,66 +4389,194 @@ int btrfs_qgroup_free_data(struct inode *inode, * * NOTE: This function may sleep for memory allocation. */ -int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) +int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released) { - return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); + return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0); +} + +static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type) +{ + if (type != BTRFS_QGROUP_RSV_META_PREALLOC && + type != BTRFS_QGROUP_RSV_META_PERTRANS) + return; + if (num_bytes == 0) + return; + + spin_lock(&root->qgroup_meta_rsv_lock); + if (type == BTRFS_QGROUP_RSV_META_PREALLOC) + root->qgroup_meta_rsv_prealloc += num_bytes; + else + root->qgroup_meta_rsv_pertrans += num_bytes; + spin_unlock(&root->qgroup_meta_rsv_lock); +} + +static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type) +{ + if (type != BTRFS_QGROUP_RSV_META_PREALLOC && + type != BTRFS_QGROUP_RSV_META_PERTRANS) + return 0; + if (num_bytes == 0) + return 0; + + spin_lock(&root->qgroup_meta_rsv_lock); + if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { + num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, + num_bytes); + root->qgroup_meta_rsv_prealloc -= num_bytes; + } else { + num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, + num_bytes); + root->qgroup_meta_rsv_pertrans -= num_bytes; + } + spin_unlock(&root->qgroup_meta_rsv_lock); + return num_bytes; } int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, - bool enforce) + enum btrfs_qgroup_rsv_type type, bool enforce) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || - !is_fstree(root->objectid) || num_bytes == 0) + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || + !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0) return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - trace_qgroup_meta_reserve(root, (s64)num_bytes); - ret = qgroup_reserve(root, num_bytes, enforce); + trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type); + ret = qgroup_reserve(root, num_bytes, enforce, type); if (ret < 0) return ret; - atomic64_add(num_bytes, &root->qgroup_meta_rsv); + /* + * Record what we have reserved into root. + * + * To avoid quota disabled->enabled underflow. + * In that case, we may try to free space we haven't reserved + * (since quota was disabled), so record what we reserved into root. + * And ensure later release won't underflow this number. + */ + add_root_meta_rsv(root, num_bytes, type); return ret; } -void btrfs_qgroup_free_meta_all(struct btrfs_root *root) +int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type, bool enforce, + bool noflush) +{ + int ret; + + ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); + if ((ret <= 0 && ret != -EDQUOT) || noflush) + return ret; + + ret = try_flush_qgroup(root); + if (ret < 0) + return ret; + return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); +} + +/* + * Per-transaction meta reservation should be all freed at transaction commit + * time + */ +void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; - u64 reserved; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || - !is_fstree(root->objectid)) + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || + !btrfs_is_fstree(btrfs_root_id(root))) return; - reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0); - if (reserved == 0) - return; - trace_qgroup_meta_reserve(root, -(s64)reserved); - btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); + /* TODO: Update trace point to handle such free */ + trace_btrfs_qgroup_meta_free_all_pertrans(root); + /* Special value -1 means to free all reserved space */ + btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1, + BTRFS_QGROUP_RSV_META_PERTRANS); } -void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) +void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type) { struct btrfs_fs_info *fs_info = root->fs_info; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || - !is_fstree(root->objectid)) + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || + !btrfs_is_fstree(btrfs_root_id(root))) return; + /* + * reservation for META_PREALLOC can happen before quota is enabled, + * which can lead to underflow. + * Here ensure we will only free what we really have reserved. + */ + num_bytes = sub_root_meta_rsv(root, num_bytes, type); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes); - atomic64_sub(num_bytes, &root->qgroup_meta_rsv); - trace_qgroup_meta_reserve(root, -(s64)num_bytes); - btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); + trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type); + btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type); +} + +static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, + int num_bytes) +{ + struct btrfs_qgroup *qgroup; + LIST_HEAD(qgroup_list); + + if (num_bytes == 0) + return; + if (!fs_info->quota_root) + return; + + spin_lock(&fs_info->qgroup_lock); + qgroup = find_qgroup_rb(fs_info, ref_root); + if (!qgroup) + goto out; + + qgroup_iterator_add(&qgroup_list, qgroup); + list_for_each_entry(qgroup, &qgroup_list, iterator) { + struct btrfs_qgroup_list *glist; + + qgroup_rsv_release(fs_info, qgroup, num_bytes, + BTRFS_QGROUP_RSV_META_PREALLOC); + if (!sb_rdonly(fs_info->sb)) + qgroup_rsv_add(fs_info, qgroup, num_bytes, + BTRFS_QGROUP_RSV_META_PERTRANS); + + list_for_each_entry(glist, &qgroup->groups, next_group) + qgroup_iterator_add(&qgroup_list, glist->group); + } +out: + qgroup_iterator_clean(&qgroup_list); + spin_unlock(&fs_info->qgroup_lock); +} + +/* + * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS. + * + * This is called when preallocated meta reservation needs to be used. + * Normally after btrfs_join_transaction() call. + */ +void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || + !btrfs_is_fstree(btrfs_root_id(root))) + return; + /* Same as btrfs_qgroup_free_meta_prealloc() */ + num_bytes = sub_root_meta_rsv(root, num_bytes, + BTRFS_QGROUP_RSV_META_PREALLOC); + trace_btrfs_qgroup_meta_convert(root, num_bytes); + qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes); + if (!sb_rdonly(fs_info->sb)) + add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS); } /* * Check qgroup reserved space leaking, normally at destroy inode * time */ -void btrfs_qgroup_check_reserved_leak(struct inode *inode) +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) { struct extent_changeset changeset; struct ulist_node *unode; @@ -3078,21 +4584,321 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, + EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); while ((unode = ulist_next(&changeset.range_changed, &iter))) { - btrfs_warn(BTRFS_I(inode)->root->fs_info, - "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", - inode->i_ino, unode->val, unode->aux); + btrfs_warn(inode->root->fs_info, + "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", + btrfs_ino(inode), unode->val, unode->aux); } - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->objectid, - changeset.bytes_changed); + btrfs_qgroup_free_refroot(inode->root->fs_info, + btrfs_root_id(inode->root), + changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); } extent_changeset_release(&changeset); } + +void btrfs_qgroup_init_swapped_blocks( + struct btrfs_qgroup_swapped_blocks *swapped_blocks) +{ + int i; + + spin_lock_init(&swapped_blocks->lock); + for (i = 0; i < BTRFS_MAX_LEVEL; i++) + swapped_blocks->blocks[i] = RB_ROOT; + swapped_blocks->swapped = false; +} + +/* + * Delete all swapped blocks record of @root. + * Every record here means we skipped a full subtree scan for qgroup. + * + * Gets called when committing one transaction. + */ +void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) +{ + struct btrfs_qgroup_swapped_blocks *swapped_blocks; + int i; + + swapped_blocks = &root->swapped_blocks; + + spin_lock(&swapped_blocks->lock); + if (!swapped_blocks->swapped) + goto out; + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + struct rb_root *cur_root = &swapped_blocks->blocks[i]; + struct btrfs_qgroup_swapped_block *entry; + struct btrfs_qgroup_swapped_block *next; + + rbtree_postorder_for_each_entry_safe(entry, next, cur_root, + node) + kfree(entry); + swapped_blocks->blocks[i] = RB_ROOT; + } + swapped_blocks->swapped = false; +out: + spin_unlock(&swapped_blocks->lock); +} + +static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *bytenr = key; + const struct btrfs_qgroup_swapped_block *block = rb_entry(node, + struct btrfs_qgroup_swapped_block, node); + + if (block->subvol_bytenr < *bytenr) + return -1; + else if (block->subvol_bytenr > *bytenr) + return 1; + + return 0; +} + +static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new, + struct btrfs_qgroup_swapped_block, node); + + return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing); +} + +/* + * Add subtree roots record into @subvol_root. + * + * @subvol_root: tree root of the subvolume tree get swapped + * @bg: block group under balance + * @subvol_parent/slot: pointer to the subtree root in subvolume tree + * @reloc_parent/slot: pointer to the subtree root in reloc tree + * BOTH POINTERS ARE BEFORE TREE SWAP + * @last_snapshot: last snapshot generation of the subvolume tree + */ +int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root, + struct btrfs_block_group *bg, + struct extent_buffer *subvol_parent, int subvol_slot, + struct extent_buffer *reloc_parent, int reloc_slot, + u64 last_snapshot) +{ + struct btrfs_fs_info *fs_info = subvol_root->fs_info; + struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; + struct btrfs_qgroup_swapped_block *block; + struct rb_node *node; + int level = btrfs_header_level(subvol_parent) - 1; + int ret = 0; + + if (!btrfs_qgroup_full_accounting(fs_info)) + return 0; + + if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) > + btrfs_node_ptr_generation(reloc_parent, reloc_slot))) { + btrfs_err_rl(fs_info, + "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", + __func__, + btrfs_node_ptr_generation(subvol_parent, subvol_slot), + btrfs_node_ptr_generation(reloc_parent, reloc_slot)); + return -EUCLEAN; + } + + block = kmalloc(sizeof(*block), GFP_NOFS); + if (!block) { + ret = -ENOMEM; + goto out; + } + + /* + * @reloc_parent/slot is still before swap, while @block is going to + * record the bytenr after swap, so we do the swap here. + */ + block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); + block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, + reloc_slot); + block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); + block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, + subvol_slot); + block->last_snapshot = last_snapshot; + block->level = level; + + /* + * If we have bg == NULL, we're called from btrfs_recover_relocation(), + * no one else can modify tree blocks thus we qgroup will not change + * no matter the value of trace_leaf. + */ + if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) + block->trace_leaf = true; + else + block->trace_leaf = false; + btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); + + /* Insert @block into @blocks */ + spin_lock(&blocks->lock); + node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp); + if (node) { + struct btrfs_qgroup_swapped_block *entry; + + entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node); + + if (entry->subvol_generation != block->subvol_generation || + entry->reloc_bytenr != block->reloc_bytenr || + entry->reloc_generation != block->reloc_generation) { + /* + * Duplicated but mismatch entry found. Shouldn't happen. + * Marking qgroup inconsistent should be enough for end + * users. + */ + DEBUG_WARN("duplicated but mismatched entry found"); + ret = -EEXIST; + } + kfree(block); + goto out_unlock; + } + blocks->swapped = true; +out_unlock: + spin_unlock(&blocks->lock); +out: + if (ret < 0) + qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); + return ret; +} + +/* + * Check if the tree block is a subtree root, and if so do the needed + * delayed subtree trace for qgroup. + * + * This is called during btrfs_cow_block(). + */ +int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *subvol_eb) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_tree_parent_check check = { 0 }; + struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; + struct btrfs_qgroup_swapped_block AUTO_KFREE(block); + struct extent_buffer *reloc_eb = NULL; + struct rb_node *node; + bool swapped = false; + int level = btrfs_header_level(subvol_eb); + int ret = 0; + int i; + + if (!btrfs_qgroup_full_accounting(fs_info)) + return 0; + if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root) + return 0; + + spin_lock(&blocks->lock); + if (!blocks->swapped) { + spin_unlock(&blocks->lock); + return 0; + } + node = rb_find(&subvol_eb->start, &blocks->blocks[level], + qgroup_swapped_block_bytenr_key_cmp); + if (!node) { + spin_unlock(&blocks->lock); + goto out; + } + block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); + + /* Found one, remove it from @blocks first and update blocks->swapped */ + rb_erase(&block->node, &blocks->blocks[level]); + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + if (RB_EMPTY_ROOT(&blocks->blocks[i])) { + swapped = true; + break; + } + } + blocks->swapped = swapped; + spin_unlock(&blocks->lock); + + check.level = block->level; + check.transid = block->reloc_generation; + check.has_first_key = true; + memcpy(&check.first_key, &block->first_key, sizeof(check.first_key)); + + /* Read out reloc subtree root */ + reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check); + if (IS_ERR(reloc_eb)) { + ret = PTR_ERR(reloc_eb); + reloc_eb = NULL; + goto free_out; + } + if (unlikely(!extent_buffer_uptodate(reloc_eb))) { + ret = -EIO; + goto free_out; + } + + ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, + block->last_snapshot, block->trace_leaf); +free_out: + free_extent_buffer(reloc_eb); +out: + if (ret < 0) { + qgroup_mark_inconsistent(fs_info, + "failed to account subtree at bytenr %llu: %d", + subvol_eb->start, ret); + } + return ret; +} + +void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) +{ + struct btrfs_qgroup_extent_record *entry; + unsigned long index; + + xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) { + ulist_free(entry->old_roots); + kfree(entry); + } + xa_destroy(&trans->delayed_refs.dirty_extents); +} + +int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, + const struct btrfs_squota_delta *delta) +{ + int ret; + struct btrfs_qgroup *qgroup; + struct btrfs_qgroup *qg; + LIST_HEAD(qgroup_list); + u64 root = delta->root; + u64 num_bytes = delta->num_bytes; + const int sign = (delta->is_inc ? 1 : -1); + + if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) + return 0; + + if (!btrfs_is_fstree(root)) + return 0; + + /* If the extent predates enabling quotas, don't count it. */ + if (delta->generation < fs_info->qgroup_enable_gen) + return 0; + + spin_lock(&fs_info->qgroup_lock); + qgroup = find_qgroup_rb(fs_info, root); + if (!qgroup) { + ret = -ENOENT; + goto out; + } + + ret = 0; + qgroup_iterator_add(&qgroup_list, qgroup); + list_for_each_entry(qg, &qgroup_list, iterator) { + struct btrfs_qgroup_list *glist; + + qg->excl += num_bytes * sign; + qg->rfer += num_bytes * sign; + qgroup_dirty(fs_info, qg); + + list_for_each_entry(glist, &qg->groups, next_group) + qgroup_iterator_add(&qgroup_list, glist->group); + } + qgroup_iterator_clean(&qgroup_list); + +out: + spin_unlock(&fs_info->qgroup_lock); + return ret; +} |
