summaryrefslogtreecommitdiff
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2022-03-15 15:22:41 +0000
committerDavid Sterba <dsterba@suse.com>2022-05-16 17:03:09 +0200
commit63c34cb4c6dddd7899a14ed0a11b208a41e9c85c (patch)
treec39dddcc55bce02fd51bc05b68020557e19c3f95 /fs/btrfs/file.c
parent55961c8abfdcb1f0c8420c5c6bdff86170cf43fc (diff)
btrfs: add and use helper to assert an inode range is clean
We have four different scenarios where we don't expect to find ordered extents after locking a file range: 1) During plain fallocate; 2) During hole punching; 3) During zero range; 4) During reflinks (both cloning and deduplication). This is because in all these cases we follow the pattern: 1) Lock the inode's VFS lock in exclusive mode; 2) Lock the inode's i_mmap_lock in exclusive node, to serialize with mmap writes; 3) Flush delalloc in a file range and wait for all ordered extents to complete - both done through btrfs_wait_ordered_range(); 4) Lock the file range in the inode's io_tree. So add a helper that asserts that we don't have ordered extents for a given range. Make the four scenarios listed above use this helper after locking the respective file range. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index fd90e25382b2..bd329316945f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2608,6 +2608,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
lockend, cached_state);
}
+
+ btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
}
static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
@@ -3479,6 +3481,8 @@ static long btrfs_fallocate(struct file *file, int mode,
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state);
+ btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
+
/* First, check if we exceed the qgroup limit */
INIT_LIST_HEAD(&reserve_list);
while (cur_offset < alloc_end) {