summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-06-28 17:31:42 +0200
committerDavid Sterba <dsterba@suse.com>2023-08-21 14:52:16 +0200
commit778b878543f05ddf11843cd44ba9b1e775216fc0 (patch)
treebb2c2174d0814ccb63d2501dfbf366e346643867 /fs/btrfs/extent_io.h
parent6e144bf16ba07dff649e6bd6afd79d2e353f6216 (diff)
btrfs: don't redirty locked_page in run_delalloc_zoned
extent_write_locked_range currently expects that either all or no pages are dirty when it is called. Bur run_delalloc_zoned is called directly in the writepages path, and has the dirty bit cleared only for locked_page and which the extent_write_cache_pages currently operates. It currently works around this by redirtying locked_page, but that is a bit inefficient and cumbersome. Pass a locked_page argument to run_delalloc_zoned so that clearing the dirty bit can be skipped on just that page. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.h')
-rw-r--r--fs/btrfs/extent_io.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 2678906e87c5..c01f9c5ddc13 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -177,8 +177,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
-void extent_write_locked_range(struct inode *inode, u64 start, u64 end,
- struct writeback_control *wbc, bool pages_dirty);
+void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, struct writeback_control *wbc,
+ bool pages_dirty);
int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,