summaryrefslogtreecommitdiff
path: root/fs/zonefs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/zonefs')
-rw-r--r--fs/zonefs/file.c52
-rw-r--r--fs/zonefs/super.c36
-rw-r--r--fs/zonefs/sysfs.c1
3 files changed, 47 insertions, 42 deletions
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 3b103715acc9..fd3a5922f6c3 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -124,37 +124,46 @@ static void zonefs_readahead(struct readahead_control *rac)
* Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size.
*/
-static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
- struct inode *inode, loff_t offset,
- unsigned int len)
+static ssize_t zonefs_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 offset, unsigned len, u64 end_pos)
{
- struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(wpc->inode);
if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
return -EIO;
- if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+ if (WARN_ON_ONCE(offset >= i_size_read(wpc->inode)))
return -EIO;
/* If the mapping is already OK, nothing needs to be done */
- if (offset >= wpc->iomap.offset &&
- offset < wpc->iomap.offset + wpc->iomap.length)
- return 0;
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length) {
+ int error;
+
+ error = zonefs_write_iomap_begin(wpc->inode, offset,
+ z->z_capacity - offset, IOMAP_WRITE,
+ &wpc->iomap, NULL);
+ if (error)
+ return error;
+ }
- return zonefs_write_iomap_begin(inode, offset,
- z->z_capacity - offset,
- IOMAP_WRITE, &wpc->iomap, NULL);
+ return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
}
static const struct iomap_writeback_ops zonefs_writeback_ops = {
- .map_blocks = zonefs_write_map_blocks,
+ .writeback_range = zonefs_writeback_range,
+ .writeback_submit = iomap_ioend_writeback_submit,
};
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct iomap_writepage_ctx wpc = { };
+ struct iomap_writepage_ctx wpc = {
+ .inode = mapping->host,
+ .wbc = wbc,
+ .ops = &zonefs_writeback_ops,
+ };
- return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+ return iomap_writepages(&wpc);
}
static int zonefs_swap_activate(struct swap_info_struct *sis,
@@ -299,7 +308,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
/* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping);
- ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+ ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops, NULL);
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
@@ -312,8 +321,10 @@ static const struct vm_operations_struct zonefs_file_vm_ops = {
.page_mkwrite = zonefs_filemap_page_mkwrite,
};
-static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int zonefs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
/*
* Conventional zones accept random writes, so their files can support
* shared writable mappings. For sequential zone files, only read
@@ -321,11 +332,11 @@ static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
* ordering between msync() and page cache writeback.
*/
if (zonefs_inode_is_seq(file_inode(file)) &&
- (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
return -EINVAL;
file_accessed(file);
- vma->vm_ops = &zonefs_file_vm_ops;
+ desc->vm_ops = &zonefs_file_vm_ops;
return 0;
}
@@ -563,7 +574,8 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
if (ret <= 0)
goto inode_unlock;
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops,
+ NULL, NULL);
if (ret == -EIO)
zonefs_io_error(inode, true);
@@ -850,7 +862,7 @@ const struct file_operations zonefs_file_operations = {
.open = zonefs_file_open,
.release = zonefs_file_release,
.fsync = zonefs_file_fsync,
- .mmap = zonefs_file_mmap,
+ .mmap_prepare = zonefs_file_mmap_prepare,
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 964fa7f24003..4dc7f967c861 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -662,6 +662,7 @@ static struct inode *zonefs_get_file_inode(struct inode *dir,
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
inode->i_mapping->a_ops = &zonefs_file_aops;
+ mapping_set_large_folios(inode->i_mapping);
/* Update the inode access rights depending on the zone condition */
zonefs_inode_update_mode(inode);
@@ -1110,28 +1111,20 @@ static int zonefs_read_super(struct super_block *sb)
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_super *super;
u32 crc, stored_crc;
- struct page *page;
- struct bio_vec bio_vec;
- struct bio bio;
int ret;
- page = alloc_page(GFP_KERNEL);
- if (!page)
+ super = kmalloc(ZONEFS_SUPER_SIZE, GFP_KERNEL);
+ if (!super)
return -ENOMEM;
- bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = 0;
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
- ret = submit_bio_wait(&bio);
+ ret = bdev_rw_virt(sb->s_bdev, 0, super, ZONEFS_SUPER_SIZE,
+ REQ_OP_READ);
if (ret)
- goto free_page;
-
- super = page_address(page);
+ goto free_super;
ret = -EINVAL;
if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
- goto free_page;
+ goto free_super;
stored_crc = le32_to_cpu(super->s_crc);
super->s_crc = 0;
@@ -1139,14 +1132,14 @@ static int zonefs_read_super(struct super_block *sb)
if (crc != stored_crc) {
zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
crc, stored_crc);
- goto free_page;
+ goto free_super;
}
sbi->s_features = le64_to_cpu(super->s_features);
if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
zonefs_err(sb, "Unknown features set 0x%llx\n",
sbi->s_features);
- goto free_page;
+ goto free_super;
}
if (sbi->s_features & ZONEFS_F_UID) {
@@ -1154,7 +1147,7 @@ static int zonefs_read_super(struct super_block *sb)
le32_to_cpu(super->s_uid));
if (!uid_valid(sbi->s_uid)) {
zonefs_err(sb, "Invalid UID feature\n");
- goto free_page;
+ goto free_super;
}
}
@@ -1163,7 +1156,7 @@ static int zonefs_read_super(struct super_block *sb)
le32_to_cpu(super->s_gid));
if (!gid_valid(sbi->s_gid)) {
zonefs_err(sb, "Invalid GID feature\n");
- goto free_page;
+ goto free_super;
}
}
@@ -1172,15 +1165,14 @@ static int zonefs_read_super(struct super_block *sb)
if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
zonefs_err(sb, "Reserved area is being used\n");
- goto free_page;
+ goto free_super;
}
import_uuid(&sbi->s_uuid, super->s_uuid);
ret = 0;
-free_page:
- __free_page(page);
-
+free_super:
+ kfree(super);
return ret;
}
diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c
index 8ccb65c2b419..ff9a688f1f9c 100644
--- a/fs/zonefs/sysfs.c
+++ b/fs/zonefs/sysfs.c
@@ -92,6 +92,7 @@ int zonefs_sysfs_register(struct super_block *sb)
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret;
+ super_set_sysfs_name_id(sb);
init_completion(&sbi->s_kobj_unregister);
ret = kobject_init_and_add(&sbi->s_kobj, &zonefs_sb_ktype,
zonefs_sysfs_root, "%s", sb->s_id);