summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_iomap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_iomap.c')
-rw-r--r--fs/xfs/xfs_iomap.c47
1 files changed, 8 insertions, 39 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7bb55dbc19d3..ea96e8a34868 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1123,12 +1123,12 @@ out_unlock:
static int
xfs_buffered_write_delalloc_punch(
struct inode *inode,
- loff_t start_byte,
- loff_t end_byte)
+ loff_t offset,
+ loff_t length)
{
struct xfs_mount *mp = XFS_M(inode->i_sb);
- xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte);
- xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);
+ xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
end_fsb - start_fsb);
@@ -1143,13 +1143,9 @@ xfs_buffered_write_iomap_end(
unsigned flags,
struct iomap *iomap)
{
- struct xfs_mount *mp = XFS_M(inode->i_sb);
- loff_t start_byte;
- loff_t end_byte;
- int error = 0;
- if (iomap->type != IOMAP_DELALLOC)
- return 0;
+ struct xfs_mount *mp = XFS_M(inode->i_sb);
+ int error;
/*
* Behave as if the write failed if drop writes is enabled. Set the NEW
@@ -1160,35 +1156,8 @@ xfs_buffered_write_iomap_end(
written = 0;
}
- /* If we didn't reserve the blocks, we're not allowed to punch them. */
- if (!(iomap->flags & IOMAP_F_NEW))
- return 0;
-
- /*
- * start_fsb refers to the first unused block after a short write. If
- * nothing was written, round offset down to point at the first block in
- * the range.
- */
- if (unlikely(!written))
- start_byte = round_down(offset, mp->m_sb.sb_blocksize);
- else
- start_byte = round_up(offset + written, mp->m_sb.sb_blocksize);
- end_byte = round_up(offset + length, mp->m_sb.sb_blocksize);
-
- /* Nothing to do if we've written the entire delalloc extent */
- if (start_byte >= end_byte)
- return 0;
-
- /*
- * Lock the mapping to avoid races with page faults re-instantiating
- * folios and dirtying them via ->page_mkwrite between the page cache
- * truncation and the delalloc extent removal. Failing to do this can
- * leave dirty pages with no space reservation in the cache.
- */
- filemap_invalidate_lock(inode->i_mapping);
- truncate_pagecache_range(inode, start_byte, end_byte - 1);
- error = xfs_buffered_write_delalloc_punch(inode, start_byte, end_byte);
- filemap_invalidate_unlock(inode->i_mapping);
+ error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
+ length, written, &xfs_buffered_write_delalloc_punch);
if (error && !xfs_is_shutdown(mp)) {
xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
__func__, XFS_I(inode)->i_ino);