summaryrefslogtreecommitdiff
path: root/fs/zonefs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/zonefs/file.c')
-rw-r--r--fs/zonefs/file.c50
1 files changed, 31 insertions, 19 deletions
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 42e2c0065bb3..fd3a5922f6c3 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -124,37 +124,46 @@ static void zonefs_readahead(struct readahead_control *rac)
* Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size.
*/
-static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
- struct inode *inode, loff_t offset,
- unsigned int len)
+static ssize_t zonefs_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 offset, unsigned len, u64 end_pos)
{
- struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(wpc->inode);
if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
return -EIO;
- if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+ if (WARN_ON_ONCE(offset >= i_size_read(wpc->inode)))
return -EIO;
/* If the mapping is already OK, nothing needs to be done */
- if (offset >= wpc->iomap.offset &&
- offset < wpc->iomap.offset + wpc->iomap.length)
- return 0;
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length) {
+ int error;
+
+ error = zonefs_write_iomap_begin(wpc->inode, offset,
+ z->z_capacity - offset, IOMAP_WRITE,
+ &wpc->iomap, NULL);
+ if (error)
+ return error;
+ }
- return zonefs_write_iomap_begin(inode, offset,
- z->z_capacity - offset,
- IOMAP_WRITE, &wpc->iomap, NULL);
+ return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
}
static const struct iomap_writeback_ops zonefs_writeback_ops = {
- .map_blocks = zonefs_write_map_blocks,
+ .writeback_range = zonefs_writeback_range,
+ .writeback_submit = iomap_ioend_writeback_submit,
};
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct iomap_writepage_ctx wpc = { };
+ struct iomap_writepage_ctx wpc = {
+ .inode = mapping->host,
+ .wbc = wbc,
+ .ops = &zonefs_writeback_ops,
+ };
- return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+ return iomap_writepages(&wpc);
}
static int zonefs_swap_activate(struct swap_info_struct *sis,
@@ -312,8 +321,10 @@ static const struct vm_operations_struct zonefs_file_vm_ops = {
.page_mkwrite = zonefs_filemap_page_mkwrite,
};
-static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int zonefs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
/*
* Conventional zones accept random writes, so their files can support
* shared writable mappings. For sequential zone files, only read
@@ -321,11 +332,11 @@ static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
* ordering between msync() and page cache writeback.
*/
if (zonefs_inode_is_seq(file_inode(file)) &&
- (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
return -EINVAL;
file_accessed(file);
- vma->vm_ops = &zonefs_file_vm_ops;
+ desc->vm_ops = &zonefs_file_vm_ops;
return 0;
}
@@ -563,7 +574,8 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
if (ret <= 0)
goto inode_unlock;
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops,
+ NULL, NULL);
if (ret == -EIO)
zonefs_io_error(inode, true);
@@ -850,7 +862,7 @@ const struct file_operations zonefs_file_operations = {
.open = zonefs_file_open,
.release = zonefs_file_release,
.fsync = zonefs_file_fsync,
- .mmap = zonefs_file_mmap,
+ .mmap_prepare = zonefs_file_mmap_prepare,
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,