summaryrefslogtreecommitdiff
path: root/fs/btrfs/qgroup.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2020-12-04 09:24:47 +0800
committerDavid Sterba <dsterba@suse.com>2020-12-18 14:50:07 +0100
commitae5e070eaca9dbebde3459dd8f4c2756f8c097d0 (patch)
treef50431f9404ecd29a263188a4c0b579404fd497a /fs/btrfs/qgroup.c
parent9a664971569daf68254928149f580b4f5856d274 (diff)
btrfs: qgroup: don't try to wait flushing if we're already holding a transaction
There is a chance of racing for qgroup flushing which may lead to deadlock: Thread A | Thread B (not holding trans handle) | (holding a trans handle) --------------------------------+-------------------------------- __btrfs_qgroup_reserve_meta() | __btrfs_qgroup_reserve_meta() |- try_flush_qgroup() | |- try_flush_qgroup() |- QGROUP_FLUSHING bit set | | | | |- test_and_set_bit() | | |- wait_event() |- btrfs_join_transaction() | |- btrfs_commit_transaction()| !!! DEAD LOCK !!! Since thread A wants to commit transaction, but thread B is holding a transaction handle, blocking the commit. At the same time, thread B is waiting for thread A to finish its commit. This is just a hot fix, and would lead to more EDQUOT when we're near the qgroup limit. The proper fix would be to make all metadata/data reservations happen without holding a transaction handle. CC: stable@vger.kernel.org # 5.9+ Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r--fs/btrfs/qgroup.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fe3046007f52..47f27658eac1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3531,16 +3531,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
bool can_commit = true;
/*
- * We don't want to run flush again and again, so if there is a running
- * one, we won't try to start a new flush, but exit directly.
- */
- if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
- wait_event(root->qgroup_flush_wait,
- !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
- return 0;
- }
-
- /*
* If current process holds a transaction, we shouldn't flush, as we
* assume all space reservation happens before a transaction handle is
* held.
@@ -3554,6 +3544,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
current->journal_info != BTRFS_SEND_TRANS_STUB)
can_commit = false;
+ /*
+ * We don't want to run flush again and again, so if there is a running
+ * one, we won't try to start a new flush, but exit directly.
+ */
+ if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
+ /*
+ * We are already holding a transaction, thus we can block other
+ * threads from flushing. So exit right now. This increases
+ * the chance of EDQUOT for heavy load and near limit cases.
+ * But we can argue that if we're already near limit, EDQUOT is
+ * unavoidable anyway.
+ */
+ if (!can_commit)
+ return 0;
+
+ wait_event(root->qgroup_flush_wait,
+ !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
+ return 0;
+ }
+
ret = btrfs_start_delalloc_snapshot(root);
if (ret < 0)
goto out;