summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-26 11:08:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-26 11:08:37 -0700
commit4792ba1f1ff0db30369f7016c1611fda3f84b895 (patch)
treedaec30787cf1f3b13f87a05216fca2f8d4916ed1 /fs/btrfs
parent863fa8887befa19c89fadb1dc6666ac34314fbce (diff)
parenta3b46b86ca76d7f9d487e6a0b594fd1984e0796e (diff)
Merge tag 'for-5.3-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "Two regression fixes: - hangs caused by a missing barrier in the locking code - memory leaks of extent_state due to bad handling of a cached pointer" * tag 'for-5.3-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: fix extent_state leak in btrfs_lock_and_flush_ordered_range btrfs: Fix deadlock caused by missing memory barrier
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/locking.c9
-rw-r--r--fs/btrfs/ordered-data.c11
2 files changed, 12 insertions, 8 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 98fccce4208c..393eceda57c8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -346,9 +346,12 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
btrfs_assert_no_spinning_writers(eb);
eb->blocking_writers--;
- /* Use the lighter barrier after atomic */
- smp_mb__after_atomic();
- cond_wake_up_nomb(&eb->write_lock_wq);
+ /*
+ * We need to order modifying blocking_writers above with
+ * actually waking up the sleepers to ensure they see the
+ * updated value of blocking_writers
+ */
+ cond_wake_up(&eb->write_lock_wq);
} else {
btrfs_assert_spinning_writers_put(eb);
write_unlock(&eb->lock);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 1744ba8b2754..ae7f64a8facb 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -985,13 +985,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct extent_state **cached_state)
{
struct btrfs_ordered_extent *ordered;
- struct extent_state *cachedp = NULL;
+ struct extent_state *cache = NULL;
+ struct extent_state **cachedp = &cache;
if (cached_state)
- cachedp = *cached_state;
+ cachedp = cached_state;
while (1) {
- lock_extent_bits(tree, start, end, &cachedp);
+ lock_extent_bits(tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1001,10 +1002,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
* aren't exposing it outside of this function
*/
if (!cached_state)
- refcount_dec(&cachedp->refs);
+ refcount_dec(&cache->refs);
break;
}
- unlock_extent_cached(tree, start, end, &cachedp);
+ unlock_extent_cached(tree, start, end, cachedp);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}