summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/iomap/buffered-io.c25
1 files changed, 0 insertions, 25 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 1cabd9b0249e..6ae031ac8058 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1518,35 +1518,10 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
.private = private,
};
struct address_space *mapping = inode->i_mapping;
- unsigned int blocksize = i_blocksize(inode);
- unsigned int off = pos & (blocksize - 1);
- loff_t plen = min_t(loff_t, len, blocksize - off);
int ret;
bool range_dirty;
/*
- * Zero range can skip mappings that are zero on disk so long as
- * pagecache is clean. If pagecache was dirty prior to zero range, the
- * mapping converts on writeback completion and so must be zeroed.
- *
- * The simplest way to deal with this across a range is to flush
- * pagecache and process the updated mappings. To avoid excessive
- * flushing on partial eof zeroing, special case it to zero the
- * unaligned start portion if already dirty in pagecache.
- */
- if (!iter.fbatch && off &&
- filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
- iter.len = plen;
- while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.status = iomap_zero_iter(&iter, did_zero,
- write_ops);
-
- iter.len = len - (iter.pos - pos);
- if (ret || !iter.len)
- return ret;
- }
-
- /*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
* writeback.