diff options
Diffstat (limited to 'fs')
423 files changed, 8715 insertions, 7951 deletions
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 5061f192eafd..04795508a795 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -127,7 +127,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = { }; const struct dentry_operations v9fs_dentry_operations = { - .d_delete = always_delete_dentry, .d_release = v9fs_dentry_release, .d_unalias_trylock = v9fs_dentry_unalias_trylock, .d_unalias_unlock = v9fs_dentry_unalias_unlock, diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 348cc90bf9c5..eb0b083da269 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -454,9 +454,10 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, } static int -v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) +v9fs_file_mmap_prepare(struct vm_area_desc *desc) { int retval; + struct file *filp = desc->file; struct inode *inode = file_inode(filp); struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); @@ -464,12 +465,12 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) if (!(v9ses->cache & CACHE_WRITEBACK)) { p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)"); - return generic_file_readonly_mmap(filp, vma); + return generic_file_readonly_mmap_prepare(desc); } - retval = generic_file_mmap(filp, vma); + retval = generic_file_mmap_prepare(desc); if (!retval) - vma->vm_ops = &v9fs_mmap_file_vm_ops; + desc->vm_ops = &v9fs_mmap_file_vm_ops; return retval; } @@ -516,7 +517,7 @@ const struct file_operations v9fs_file_operations = { .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock, - .mmap = generic_file_readonly_mmap, + .mmap_prepare = generic_file_readonly_mmap_prepare, .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync, @@ -531,7 +532,7 @@ const struct file_operations v9fs_file_operations_dotl = { .release = v9fs_dir_release, .lock = v9fs_file_lock_dotl, .flock = v9fs_file_flock_dotl, - .mmap = v9fs_file_mmap, + .mmap_prepare = v9fs_file_mmap_prepare, .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync_dotl, diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 489db161abc9..795c6388744c 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -134,10 +134,12 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, if (retval) goto release_sb; - if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) - sb->s_d_op = &v9fs_cached_dentry_operations; - else - sb->s_d_op = &v9fs_dentry_operations; + if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { + set_default_d_op(sb, &v9fs_cached_dentry_operations); + } else { + set_default_d_op(sb, &v9fs_dentry_operations); + sb->s_d_flags |= DCACHE_DONTCACHE; + } inode = v9fs_get_new_inode_from_fid(v9ses, fid, sb); if (IS_ERR(inode)) { diff --git a/fs/Makefile b/fs/Makefile index 79c08b914c47..334654f9584b 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -15,7 +15,8 @@ obj-y := open.o read_write.o file_table.o super.o \ pnode.o splice.o sync.o utimes.o d_path.o \ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \ fs_types.o fs_context.o fs_parser.o fsopen.o init.o \ - kernel_read_file.o mnt_idmapping.o remap_range.o pidfs.o + kernel_read_file.o mnt_idmapping.o remap_range.o pidfs.o \ + file_attr.o obj-$(CONFIG_BUFFER_HEAD) += buffer.o mpage.o obj-$(CONFIG_PROC_FS) += proc_namespace.o diff --git a/fs/adfs/file.c b/fs/adfs/file.c index ee80718aaeec..cd13165fd904 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -25,7 +25,7 @@ const struct file_operations adfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .fsync = generic_file_fsync, .write_iter = generic_file_write_iter, .splice_read = filemap_splice_read, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 21527189e430..6830f8bc8d4e 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -53,13 +53,14 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to) truncate_pagecache(inode, inode->i_size); } -static int adfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int adfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; - ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, + ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, adfs_get_block, &ADFS_I(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 017c48a80203..fdccdbbfc213 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -397,7 +397,7 @@ static int adfs_fill_super(struct super_block *sb, struct fs_context *fc) if (asb->s_ftsuffix) asb->s_namelen += 4; - sb->s_d_op = &adfs_dentry_operations; + set_default_d_op(sb, &adfs_dentry_operations); root = adfs_iget(sb, &root_obj); sb->s_root = d_make_root(root); if (!sb->s_root) { diff --git a/fs/affs/file.c b/fs/affs/file.c index 7a71018e3f67..765c3443663e 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -415,13 +415,14 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) return ret; } -static int affs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int affs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; - ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, + ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, affs_get_block, &AFFS_I(mapping->host)->mmu_private); if (unlikely(ret)) @@ -430,14 +431,15 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int affs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int copied, +static int affs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned int len, unsigned int copied, struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; int ret; - ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); /* Clear Archived bit on file writes, as AmigaOS would do */ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { @@ -645,7 +647,8 @@ static int affs_read_folio_ofs(struct file *file, struct folio *folio) return err; } -static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, +static int affs_write_begin_ofs(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -684,9 +687,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping return err; } -static int affs_write_end_ofs(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int affs_write_end_ofs(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; struct super_block *sb = inode->i_sb; @@ -999,7 +1003,7 @@ const struct file_operations affs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .open = affs_file_open, .release = affs_file_release, .fsync = affs_file_fsync, diff --git a/fs/affs/super.c b/fs/affs/super.c index 2fa40337776d..44f8aa883100 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -500,9 +500,9 @@ got_root: return PTR_ERR(root_inode); if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL)) - sb->s_d_op = &affs_intl_dentry_operations; + set_default_d_op(sb, &affs_intl_dentry_operations); else - sb->s_d_op = &affs_dentry_operations; + set_default_d_op(sb, &affs_dentry_operations); sb->s_root = d_make_root(root_inode); if (!sb->s_root) { diff --git a/fs/afs/file.c b/fs/afs/file.c index fc15497608c6..f66a92294284 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -19,7 +19,7 @@ #include <trace/events/netfs.h> #include "internal.h" -static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); +static int afs_file_mmap_prepare(struct vm_area_desc *desc); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, @@ -35,7 +35,7 @@ const struct file_operations afs_file_operations = { .llseek = generic_file_llseek, .read_iter = afs_file_read_iter, .write_iter = netfs_file_write_iter, - .mmap = afs_file_mmap, + .mmap_prepare = afs_file_mmap_prepare, .splice_read = afs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = afs_fsync, @@ -492,16 +492,16 @@ static void afs_drop_open_mmap(struct afs_vnode *vnode) /* * Handle setting up a memory mapping on an AFS file. */ -static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int afs_file_mmap_prepare(struct vm_area_desc *desc) { - struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); + struct afs_vnode *vnode = AFS_FS_I(file_inode(desc->file)); int ret; afs_add_open_mmap(vnode); - ret = generic_file_mmap(file, vma); + ret = generic_file_mmap_prepare(desc); if (ret == 0) - vma->vm_ops = &afs_vm_ops; + desc->vm_ops = &afs_vm_ops; else afs_drop_open_mmap(vnode); return ret; diff --git a/fs/afs/super.c b/fs/afs/super.c index 25b306db6992..da407f2d6f0d 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -483,9 +483,9 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) goto error; if (as->dyn_root) { - sb->s_d_op = &afs_dynroot_dentry_operations; + set_default_d_op(sb, &afs_dynroot_dentry_operations); } else { - sb->s_d_op = &afs_fs_dentry_operations; + set_default_d_op(sb, &afs_fs_dentry_operations); rcu_assign_pointer(as->volume->sb, sb); } @@ -392,15 +392,15 @@ static const struct vm_operations_struct aio_ring_vm_ops = { #endif }; -static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) +static int aio_ring_mmap_prepare(struct vm_area_desc *desc) { - vm_flags_set(vma, VM_DONTEXPAND); - vma->vm_ops = &aio_ring_vm_ops; + desc->vm_flags |= VM_DONTEXPAND; + desc->vm_ops = &aio_ring_vm_ops; return 0; } static const struct file_operations aio_ring_fops = { - .mmap = aio_ring_mmap, + .mmap_prepare = aio_ring_mmap_prepare, }; #if IS_ENABLED(CONFIG_MIGRATION) diff --git a/fs/attr.c b/fs/attr.c index 9caf63d20d03..5425c1dbbff9 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -230,7 +230,7 @@ EXPORT_SYMBOL(setattr_prepare); * @inode: the inode to be truncated * @offset: the new size to assign to the inode * - * inode_newsize_ok must be called with i_mutex held. + * inode_newsize_ok must be called with i_rwsem held exclusively. * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ @@ -318,7 +318,7 @@ static void setattr_copy_mgtime(struct inode *inode, const struct iattr *attr) * @inode: the inode to be updated * @attr: the new attributes * - * setattr_copy must be called with i_mutex held. + * setattr_copy must be called with i_rwsem held exclusively. * * setattr_copy updates the inode's metadata with that specified * in attr on idmapped mounts. Necessary permission checks to determine @@ -403,13 +403,13 @@ EXPORT_SYMBOL(may_setattr); * @attr: new attributes * @delegated_inode: returns inode, if the inode is delegated * - * The caller must hold the i_mutex on the affected object. + * The caller must hold the i_rwsem exclusively on the affected object. * * If notify_change discovers a delegation in need of breaking, * it will return -EWOULDBLOCK and return a reference to the inode in * delegated_inode. The caller should then break the delegation and * retry. Because breaking a delegation may take a long time, the - * caller should drop the i_mutex before doing so. + * caller should drop the i_rwsem before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not @@ -456,7 +456,7 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry, if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; - /* Flag setting protected by i_mutex */ + /* Flag setting protected by i_rwsem */ if (is_sxid(attr->ia_mode)) inode->i_flags &= ~S_NOSEC; } diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index ee2edccaef70..f5c16ffba013 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -311,7 +311,7 @@ static int autofs_fill_super(struct super_block *s, struct fs_context *fc) s->s_blocksize_bits = 10; s->s_magic = AUTOFS_SUPER_MAGIC; s->s_op = &autofs_sops; - s->s_d_op = &autofs_dentry_operations; + set_default_d_op(s, &autofs_dentry_operations); s->s_time_gran = 1; /* diff --git a/fs/backing-file.c b/fs/backing-file.c index 763fbe9b72b2..15a7f8031084 100644 --- a/fs/backing-file.c +++ b/fs/backing-file.c @@ -41,7 +41,7 @@ struct file *backing_file_open(const struct path *user_path, int flags, return f; path_get(user_path); - *backing_file_user_path(f) = *user_path; + backing_file_set_user_path(f, user_path); error = vfs_open(real_path, f); if (error) { fput(f); @@ -65,7 +65,7 @@ struct file *backing_tmpfile_open(const struct path *user_path, int flags, return f; path_get(user_path); - *backing_file_user_path(f) = *user_path; + backing_file_set_user_path(f, user_path); error = vfs_tmpfile(real_idmap, real_parentpath, f, mode); if (error) { fput(f); @@ -333,13 +333,13 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma, if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING))) return -EIO; - if (!file->f_op->mmap) + if (!can_mmap_file(file)) return -ENODEV; vma_set_file(vma, file); old_cred = override_creds(ctx->cred); - ret = call_mmap(vma->vm_file, vma); + ret = vfs_mmap(vma->vm_file, vma); revert_creds(old_cred); if (ctx->accessed) diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c index 66bacdd49f78..1c54b9b5bd69 100644 --- a/fs/bcachefs/fs-io-buffered.c +++ b/fs/bcachefs/fs-io-buffered.c @@ -674,7 +674,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc /* buffered writes: */ -int bch2_write_begin(struct file *file, struct address_space *mapping, +int bch2_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -757,7 +757,7 @@ err_unlock: return bch2_err_class(ret); } -int bch2_write_end(struct file *file, struct address_space *mapping, +int bch2_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { diff --git a/fs/bcachefs/fs-io-buffered.h b/fs/bcachefs/fs-io-buffered.h index 3207ebbb4ab4..14de91c27656 100644 --- a/fs/bcachefs/fs-io-buffered.h +++ b/fs/bcachefs/fs-io-buffered.h @@ -10,9 +10,9 @@ int bch2_read_folio(struct file *, struct folio *); int bch2_writepages(struct address_space *, struct writeback_control *); void bch2_readahead(struct readahead_control *); -int bch2_write_begin(struct file *, struct address_space *, loff_t pos, +int bch2_write_begin(const struct kiocb *, struct address_space *, loff_t pos, unsigned len, struct folio **, void **); -int bch2_write_end(struct file *, struct address_space *, loff_t, +int bch2_write_end(const struct kiocb *, struct address_space *, loff_t, unsigned len, unsigned copied, struct folio *, void *); ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *); diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index e54e4f255b22..687af0eea0c2 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1551,11 +1551,11 @@ static const struct vm_operations_struct bch_vm_ops = { .page_mkwrite = bch2_page_mkwrite, }; -static int bch2_mmap(struct file *file, struct vm_area_struct *vma) +static int bch2_mmap_prepare(struct vm_area_desc *desc) { - file_accessed(file); + file_accessed(desc->file); - vma->vm_ops = &bch_vm_ops; + desc->vm_ops = &bch_vm_ops; return 0; } @@ -1617,7 +1617,7 @@ static const __maybe_unused unsigned bch_flags_to_xflags[] = { }; static int bch2_fileattr_get(struct dentry *dentry, - struct fileattr *fa) + struct file_kattr *fa) { struct bch_inode_info *inode = to_bch_ei(d_inode(dentry)); struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -1680,7 +1680,7 @@ static int fssetxattr_inode_update_fn(struct btree_trans *trans, static int bch2_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, - struct fileattr *fa) + struct file_kattr *fa) { struct bch_inode_info *inode = to_bch_ei(d_inode(dentry)); struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -1739,7 +1739,7 @@ static const struct file_operations bch_file_operations = { .llseek = bch2_llseek, .read_iter = bch2_read_iter, .write_iter = bch2_write_iter, - .mmap = bch2_mmap, + .mmap_prepare = bch2_mmap_prepare, .get_unmapped_area = thp_get_unmapped_area, .fsync = bch2_fsync, .splice_read = filemap_splice_read, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index fa66a09e496a..d33d6bde992b 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -27,7 +27,7 @@ const struct file_operations bfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .splice_read = filemap_splice_read, }; @@ -170,9 +170,10 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to) truncate_pagecache(inode, inode->i_size); } -static int bfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int bfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a43363d593e5..e3b56b603192 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -646,7 +646,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, if (!elf_check_arch(interp_elf_ex) || elf_check_fdpic(interp_elf_ex)) goto out; - if (!interpreter->f_op->mmap) + if (!can_mmap_file(interpreter)) goto out; total_size = total_mapping_size(interp_elf_phdata, @@ -848,7 +848,7 @@ static int load_elf_binary(struct linux_binprm *bprm) goto out; if (elf_check_fdpic(elf_ex)) goto out; - if (!bprm->file->f_op->mmap) + if (!can_mmap_file(bprm->file)) goto out; elf_phdata = load_elf_phdrs(elf_ex, bprm->file); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 9133f3827f90..59b138062352 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -109,7 +109,7 @@ static int is_elf(struct elfhdr *hdr, struct file *file) return 0; if (!elf_check_arch(hdr)) return 0; - if (!file->f_op->mmap) + if (!can_mmap_file(file)) return 0; return 1; } diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 432fbf4fc334..a839f960cd4a 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -675,44 +675,6 @@ static void bm_evict_inode(struct inode *inode) } /** - * unlink_binfmt_dentry - remove the dentry for the binary type handler - * @dentry: dentry associated with the binary type handler - * - * Do the actual filesystem work to remove a dentry for a registered binary - * type handler. Since binfmt_misc only allows simple files to be created - * directly under the root dentry of the filesystem we ensure that we are - * indeed passed a dentry directly beneath the root dentry, that the inode - * associated with the root dentry is locked, and that it is a regular file we - * are asked to remove. - */ -static void unlink_binfmt_dentry(struct dentry *dentry) -{ - struct dentry *parent = dentry->d_parent; - struct inode *inode, *parent_inode; - - /* All entries are immediate descendants of the root dentry. */ - if (WARN_ON_ONCE(dentry->d_sb->s_root != parent)) - return; - - /* We only expect to be called on regular files. */ - inode = d_inode(dentry); - if (WARN_ON_ONCE(!S_ISREG(inode->i_mode))) - return; - - /* The parent inode must be locked. */ - parent_inode = d_inode(parent); - if (WARN_ON_ONCE(!inode_is_locked(parent_inode))) - return; - - if (simple_positive(dentry)) { - dget(dentry); - simple_unlink(parent_inode, dentry); - d_delete(dentry); - dput(dentry); - } -} - -/** * remove_binfmt_handler - remove a binary type handler * @misc: handle to binfmt_misc instance * @e: binary type handler to remove @@ -729,7 +691,7 @@ static void remove_binfmt_handler(struct binfmt_misc *misc, Node *e) write_lock(&misc->entries_lock); list_del_init(&e->list); write_unlock(&misc->entries_lock); - unlink_binfmt_dentry(e->dentry); + locked_recursive_removal(e->dentry, NULL); } /* /<entry> */ @@ -772,7 +734,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, case 3: /* Delete this handler. */ inode = d_inode(inode->i_sb->s_root); - inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_PARENT); /* * In order to add new element or remove elements from the list @@ -922,7 +884,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, case 3: /* Delete all handlers. */ inode = d_inode(file_inode(file)->i_sb->s_root); - inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_PARENT); /* * In order to add new element or remove elements from the list diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c index 08412532db1b..1e36a12b88f7 100644 --- a/fs/bpf_fs_kfuncs.c +++ b/fs/bpf_fs_kfuncs.c @@ -9,6 +9,7 @@ #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/file.h> +#include <linux/kernfs.h> #include <linux/mm.h> #include <linux/xattr.h> @@ -322,6 +323,39 @@ __bpf_kfunc int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name_ return ret; } +#ifdef CONFIG_CGROUPS +/** + * bpf_cgroup_read_xattr - read xattr of a cgroup's node in cgroupfs + * @cgroup: cgroup to get xattr from + * @name__str: name of the xattr + * @value_p: output buffer of the xattr value + * + * Get xattr *name__str* of *cgroup* and store the output in *value_ptr*. + * + * For security reasons, only *name__str* with prefix "user." is allowed. + * + * Return: length of the xattr value on success, a negative value on error. + */ +__bpf_kfunc int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str, + struct bpf_dynptr *value_p) +{ + struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p; + u32 value_len; + void *value; + + /* Only allow reading "user.*" xattrs */ + if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + return -EPERM; + + value_len = __bpf_dynptr_size(value_ptr); + value = __bpf_dynptr_data_rw(value_ptr, value_len); + if (!value) + return -EINVAL; + + return kernfs_xattr_get(cgroup->kn, name__str, value, value_len); +} +#endif /* CONFIG_CGROUPS */ + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(bpf_fs_kfunc_set_ids) diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index c352f3ae0385..ea95c90c8474 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -114,6 +114,8 @@ config BTRFS_EXPERIMENTAL - extent tree v2 - complex rework of extent tracking + - large folio support + If unsure, say N. config BTRFS_FS_REF_VERIFY diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c index e3716516ca38..861c7d92c437 100644 --- a/fs/btrfs/accessors.c +++ b/fs/btrfs/accessors.c @@ -9,27 +9,24 @@ #include "fs.h" #include "accessors.h" -static bool check_setget_bounds(const struct extent_buffer *eb, - const void *ptr, unsigned off, int size) +static void __cold report_setget_bounds(const struct extent_buffer *eb, + const void *ptr, unsigned off, int size) { - const unsigned long member_offset = (unsigned long)ptr + off; + unsigned long member_offset = (unsigned long)ptr + off; - if (unlikely(member_offset + size > eb->len)) { - btrfs_warn(eb->fs_info, - "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d", - (member_offset > eb->len ? "start" : "end"), - (unsigned long)ptr, eb->start, member_offset, size); - return false; - } - - return true; + btrfs_warn(eb->fs_info, + "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d", + (member_offset > eb->len ? "start" : "end"), + (unsigned long)ptr, eb->start, member_offset, size); } -void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb) +/* Copy bytes from @src1 and @src2 to @dest. */ +static __always_inline void memcpy_split_src(char *dest, const char *src1, + const char *src2, const size_t len1, + const size_t total) { - token->eb = eb; - token->kaddr = folio_address(eb->folios[0]); - token->offset = 0; + memcpy(dest, src1, len1); + memcpy(dest + len1, src2, total - len1); } /* @@ -41,11 +38,6 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e * - btrfs_set_8 (for 8/16/32/64) * - btrfs_get_8 (for 8/16/32/64) * - * Generic helpers with a token (cached address of the most recently accessed - * page): - * - btrfs_set_token_8 (for 8/16/32/64) - * - btrfs_get_token_8 (for 8/16/32/64) - * * The set/get functions handle data spanning two pages transparently, in case * metadata block size is larger than page. Every pointer to metadata items is * an offset into the extent buffer page array, cast to a specific type. This @@ -57,118 +49,66 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e */ #define DEFINE_BTRFS_SETGET_BITS(bits) \ -u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ - const void *ptr, unsigned long off) \ -{ \ - const unsigned long member_offset = (unsigned long)ptr + off; \ - const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \ - const unsigned long oil = get_eb_offset_in_folio(token->eb, \ - member_offset);\ - const int unit_size = token->eb->folio_size; \ - const int unit_shift = token->eb->folio_shift; \ - const int size = sizeof(u##bits); \ - u8 lebytes[sizeof(u##bits)]; \ - const int part = unit_size - oil; \ - \ - ASSERT(token); \ - ASSERT(token->kaddr); \ - ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ - if (token->offset <= member_offset && \ - member_offset + size <= token->offset + unit_size) { \ - return get_unaligned_le##bits(token->kaddr + oil); \ - } \ - token->kaddr = folio_address(token->eb->folios[idx]); \ - token->offset = idx << unit_shift; \ - if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \ - return get_unaligned_le##bits(token->kaddr + oil); \ - \ - memcpy(lebytes, token->kaddr + oil, part); \ - token->kaddr = folio_address(token->eb->folios[idx + 1]); \ - token->offset = (idx + 1) << unit_shift; \ - memcpy(lebytes + part, token->kaddr, size - part); \ - return get_unaligned_le##bits(lebytes); \ -} \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ const void *ptr, unsigned long off) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long idx = get_eb_folio_index(eb, member_offset);\ - const unsigned long oil = get_eb_offset_in_folio(eb, \ - member_offset);\ - const int unit_size = eb->folio_size; \ - char *kaddr = folio_address(eb->folios[idx]); \ - const int size = sizeof(u##bits); \ - const int part = unit_size - oil; \ - u8 lebytes[sizeof(u##bits)]; \ - \ - ASSERT(check_setget_bounds(eb, ptr, off, size)); \ - if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \ - return get_unaligned_le##bits(kaddr + oil); \ - \ - memcpy(lebytes, kaddr + oil, part); \ - kaddr = folio_address(eb->folios[idx + 1]); \ - memcpy(lebytes + part, kaddr, size - part); \ - return get_unaligned_le##bits(lebytes); \ -} \ -void btrfs_set_token_##bits(struct btrfs_map_token *token, \ - const void *ptr, unsigned long off, \ - u##bits val) \ -{ \ - const unsigned long member_offset = (unsigned long)ptr + off; \ - const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \ - const unsigned long oil = get_eb_offset_in_folio(token->eb, \ + const unsigned long oif = get_eb_offset_in_folio(eb, \ member_offset);\ - const int unit_size = token->eb->folio_size; \ - const int unit_shift = token->eb->folio_shift; \ - const int size = sizeof(u##bits); \ + char *kaddr = folio_address(eb->folios[idx]) + oif; \ + const int part = eb->folio_size - oif; \ u8 lebytes[sizeof(u##bits)]; \ - const int part = unit_size - oil; \ \ - ASSERT(token); \ - ASSERT(token->kaddr); \ - ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ - if (token->offset <= member_offset && \ - member_offset + size <= token->offset + unit_size) { \ - put_unaligned_le##bits(val, token->kaddr + oil); \ - return; \ + if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \ + report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \ + return 0; \ } \ - token->kaddr = folio_address(token->eb->folios[idx]); \ - token->offset = idx << unit_shift; \ - if (INLINE_EXTENT_BUFFER_PAGES == 1 || \ - oil + size <= unit_size) { \ - put_unaligned_le##bits(val, token->kaddr + oil); \ - return; \ + if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \ + likely(sizeof(u##bits) <= part)) \ + return get_unaligned_le##bits(kaddr); \ + \ + if (sizeof(u##bits) == 2) { \ + lebytes[0] = *kaddr; \ + kaddr = folio_address(eb->folios[idx + 1]); \ + lebytes[1] = *kaddr; \ + } else { \ + memcpy_split_src(lebytes, kaddr, \ + folio_address(eb->folios[idx + 1]), \ + part, sizeof(u##bits)); \ } \ - put_unaligned_le##bits(val, lebytes); \ - memcpy(token->kaddr + oil, lebytes, part); \ - token->kaddr = folio_address(token->eb->folios[idx + 1]); \ - token->offset = (idx + 1) << unit_shift; \ - memcpy(token->kaddr, lebytes + part, size - part); \ + return get_unaligned_le##bits(lebytes); \ } \ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long idx = get_eb_folio_index(eb, member_offset);\ - const unsigned long oil = get_eb_offset_in_folio(eb, \ + const unsigned long oif = get_eb_offset_in_folio(eb, \ member_offset);\ - const int unit_size = eb->folio_size; \ - char *kaddr = folio_address(eb->folios[idx]); \ - const int size = sizeof(u##bits); \ - const int part = unit_size - oil; \ + char *kaddr = folio_address(eb->folios[idx]) + oif; \ + const int part = eb->folio_size - oif; \ u8 lebytes[sizeof(u##bits)]; \ \ - ASSERT(check_setget_bounds(eb, ptr, off, size)); \ - if (INLINE_EXTENT_BUFFER_PAGES == 1 || \ - oil + size <= unit_size) { \ - put_unaligned_le##bits(val, kaddr + oil); \ + if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \ + report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \ + return; \ + } \ + if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \ + likely(sizeof(u##bits) <= part)) { \ + put_unaligned_le##bits(val, kaddr); \ return; \ } \ - \ put_unaligned_le##bits(val, lebytes); \ - memcpy(kaddr + oil, lebytes, part); \ - kaddr = folio_address(eb->folios[idx + 1]); \ - memcpy(kaddr, lebytes + part, size - part); \ + if (sizeof(u##bits) == 2) { \ + *kaddr = lebytes[0]; \ + kaddr = folio_address(eb->folios[idx + 1]); \ + *kaddr = lebytes[1]; \ + } else { \ + memcpy(kaddr, lebytes, part); \ + kaddr = folio_address(eb->folios[idx + 1]); \ + memcpy(kaddr, lebytes + part, sizeof(u##bits) - part); \ + } \ } DEFINE_BTRFS_SETGET_BITS(8) diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h index 15ea6348800b..99b3ced12805 100644 --- a/fs/btrfs/accessors.h +++ b/fs/btrfs/accessors.h @@ -16,14 +16,6 @@ struct extent_buffer; -struct btrfs_map_token { - struct extent_buffer *eb; - char *kaddr; - unsigned long offset; -}; - -void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb); - /* * Some macros to generate set/get functions for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple one @@ -56,11 +48,6 @@ static inline void put_unaligned_le8(u8 val, void *p) sizeof_field(type, member))) #define DECLARE_BTRFS_SETGET_BITS(bits) \ -u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ - const void *ptr, unsigned long off); \ -void btrfs_set_token_##bits(struct btrfs_map_token *token, \ - const void *ptr, unsigned long off, \ - u##bits val); \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ const void *ptr, unsigned long off); \ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ @@ -83,18 +70,6 @@ static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \ { \ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \ btrfs_set_##bits(eb, s, offsetof(type, member), val); \ -} \ -static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \ - const type *s) \ -{ \ - static_assert(sizeof(u##bits) == sizeof_field(type, member)); \ - return btrfs_get_token_##bits(token, s, offsetof(type, member));\ -} \ -static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\ - type *s, u##bits val) \ -{ \ - static_assert(sizeof(u##bits) == sizeof_field(type, member)); \ - btrfs_set_token_##bits(token, s, offsetof(type, member), val); \ } #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ @@ -479,18 +454,6 @@ static inline void btrfs_set_item_##member(const struct extent_buffer *eb, \ int slot, u32 val) \ { \ btrfs_set_raw_item_##member(eb, btrfs_item_nr(eb, slot), val); \ -} \ -static inline u32 btrfs_token_item_##member(struct btrfs_map_token *token, \ - int slot) \ -{ \ - struct btrfs_item *item = btrfs_item_nr(token->eb, slot); \ - return btrfs_token_raw_item_##member(token, item); \ -} \ -static inline void btrfs_set_token_item_##member(struct btrfs_map_token *token, \ - int slot, u32 val) \ -{ \ - struct btrfs_item *item = btrfs_item_nr(token->eb, slot); \ - btrfs_set_token_raw_item_##member(token, item, val); \ } BTRFS_ITEM_SETGET_FUNCS(offset) diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ed497f5f8d1b..6a450be293b1 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -733,7 +733,6 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx, struct preftrees *preftrees, struct share_check *sc) { - int err; int ret = 0; struct ulist *parents; struct ulist_node *node; @@ -752,6 +751,7 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx, */ while ((rnode = rb_first_cached(&preftrees->indirect.root))) { struct prelim_ref *ref; + int ret2; ref = rb_entry(rnode, struct prelim_ref, rbnode); if (WARN(ref->parent, @@ -773,18 +773,18 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx, ret = BACKREF_FOUND_SHARED; goto out; } - err = resolve_indirect_ref(ctx, path, preftrees, ref, parents); + ret2 = resolve_indirect_ref(ctx, path, preftrees, ref, parents); /* * we can only tolerate ENOENT,otherwise,we should catch error * and return directly. */ - if (err == -ENOENT) { + if (ret2 == -ENOENT) { prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL); continue; - } else if (err) { + } else if (ret2) { free_pref(ref); - ret = err; + ret = ret2; goto out; } @@ -2201,7 +2201,6 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, int ret; u64 flags; u64 size = 0; - u32 item_size; const struct extent_buffer *eb; struct btrfs_extent_item *ei; struct btrfs_key key; @@ -2244,7 +2243,6 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, } eb = path->nodes[0]; - item_size = btrfs_item_size(eb, path->slots[0]); ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); flags = btrfs_extent_flags(eb, ei); @@ -2252,7 +2250,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, btrfs_debug(fs_info, "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", logical, logical - found_key->objectid, found_key->objectid, - found_key->offset, flags, item_size); + found_key->offset, flags, btrfs_item_size(eb, path->slots[0])); WARN_ON(!flags_ret); if (flags_ret) { @@ -2548,17 +2546,20 @@ static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *c } int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, - struct btrfs_path *path, void *ctx, bool ignore_offset) { struct btrfs_backref_walk_ctx walk_ctx = { 0 }; int ret; u64 flags = 0; struct btrfs_key found_key; - int search_commit_root = path->search_commit_root; + struct btrfs_path *path; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); - btrfs_release_path(path); + btrfs_free_path(path); if (ret < 0) return ret; if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) @@ -2571,8 +2572,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, walk_ctx.extent_item_pos = logical - found_key.objectid; walk_ctx.fs_info = fs_info; - return iterate_extent_inodes(&walk_ctx, search_commit_root, - build_ino_list, ctx); + return iterate_extent_inodes(&walk_ctx, false, build_ino_list, ctx); } static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, @@ -3161,18 +3161,14 @@ void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) ASSERT(!cache->nr_edges); } -void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, - struct btrfs_backref_node *lower, - struct btrfs_backref_node *upper, - int link_which) +static void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, + struct btrfs_backref_node *lower, + struct btrfs_backref_node *upper) { ASSERT(upper && lower && upper->level == lower->level + 1); edge->node[LOWER] = lower; edge->node[UPPER] = upper; - if (link_which & LINK_LOWER) - list_add_tail(&edge->list[LOWER], &lower->upper); - if (link_which & LINK_UPPER) - list_add_tail(&edge->list[UPPER], &upper->lower); + list_add_tail(&edge->list[LOWER], &lower->upper); } /* * Handle direct tree backref @@ -3242,7 +3238,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, ASSERT(upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); } - btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); + btrfs_backref_link_edge(edge, cur, upper); return 0; } @@ -3412,7 +3408,7 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans, if (!upper->owner) upper->owner = btrfs_header_owner(eb); } - btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); + btrfs_backref_link_edge(edge, lower, upper); if (rb_node) { btrfs_put_root(root); @@ -3570,7 +3566,7 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, ASSERT(start->checked); - rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, &start->rb_node); + rb_node = rb_simple_insert(&cache->rb_root, &start->simple_node); if (rb_node) btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST); @@ -3621,8 +3617,7 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, return -EUCLEAN; } - rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, - &upper->rb_node); + rb_node = rb_simple_insert(&cache->rb_root, &upper->simple_node); if (unlikely(rb_node)) { btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST); return -EUCLEAN; diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 953637115956..34b0193a181c 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -226,8 +226,7 @@ int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx, iterate_extent_inodes_t *iterate, void *user_ctx); int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, - struct btrfs_path *path, void *ctx, - bool ignore_offset); + void *ctx, bool ignore_offset); int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); @@ -313,10 +312,15 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter); * Represent a tree block in the backref cache */ struct btrfs_backref_node { - struct { - struct rb_node rb_node; - u64 bytenr; - }; /* Use rb_simple_node for search/insert */ + union{ + /* Use rb_simple_node for search/insert */ + struct { + struct rb_node rb_node; + u64 bytenr; + }; + + struct rb_simple_node simple_node; + }; /* * This is a sanity check, whenever we COW a block we will update @@ -423,13 +427,6 @@ struct btrfs_backref_node *btrfs_backref_alloc_node( struct btrfs_backref_edge *btrfs_backref_alloc_edge( struct btrfs_backref_cache *cache); -#define LINK_LOWER (1U << 0) -#define LINK_UPPER (1U << 1) - -void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, - struct btrfs_backref_node *lower, - struct btrfs_backref_node *upper, - int link_which); void btrfs_backref_free_node(struct btrfs_backref_cache *cache, struct btrfs_backref_node *node); void btrfs_backref_free_edge(struct btrfs_backref_cache *cache, diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index f7d8958b7327..50b5fc1c06d7 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -27,12 +27,12 @@ struct btrfs_failed_bio { }; /* Is this a data path I/O that needs storage layer checksum and repair? */ -static inline bool is_data_bbio(struct btrfs_bio *bbio) +static inline bool is_data_bbio(const struct btrfs_bio *bbio) { return bbio->inode && is_data_inode(bbio->inode); } -static bool bbio_has_ordered_extent(struct btrfs_bio *bbio) +static bool bbio_has_ordered_extent(const struct btrfs_bio *bbio) { return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; } @@ -134,14 +134,14 @@ void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) } } -static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) +static int next_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror) { if (cur_mirror == fbio->num_copies) return cur_mirror + 1 - fbio->num_copies; return cur_mirror + 1; } -static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) +static int prev_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror) { if (cur_mirror == 1) return fbio->num_copies; @@ -165,12 +165,6 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); int mirror = repair_bbio->mirror_num; - /* - * We can only trigger this for data bio, which doesn't support larger - * folios yet. - */ - ASSERT(folio_order(page_folio(bv->bv_page)) == 0); - if (repair_bbio->bio.bi_status || !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) { bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); @@ -301,7 +295,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de btrfs_bio_end_io(bbio, bbio->bio.bi_status); } -static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) +static void btrfs_log_dev_io_error(const struct bio *bio, struct btrfs_device *dev) { if (!dev || !dev->bdev) return; @@ -316,8 +310,8 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); } -static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info, - struct bio *bio) +static struct workqueue_struct *btrfs_end_io_wq(const struct btrfs_fs_info *fs_info, + const struct bio *bio) { if (bio->bi_opf & REQ_META) return fs_info->endio_meta_workers; @@ -439,7 +433,7 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) ASSERT(btrfs_dev_is_sequential(dev, physical)); bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; } - btrfs_debug_in_rcu(dev->fs_info, + btrfs_debug(dev->fs_info, "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), @@ -845,7 +839,7 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, goto out_bio_uninit; } - btrfs_info_rl_in_rcu(fs_info, + btrfs_info_rl(fs_info, "read error corrected: ino %llu off %llu (dev %s sector %llu)", ino, start, btrfs_dev_name(smap.dev), smap.physical >> SECTOR_SHIFT); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 5b0cb04b2b93..9bf282d2453c 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -34,6 +34,19 @@ int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group } #endif +static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group) +{ + /* The meta_write_pointer is available only on the zoned setup. */ + if (!btrfs_is_zoned(block_group->fs_info)) + return false; + + if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) + return false; + + return block_group->start + block_group->alloc_offset > + block_group->meta_write_pointer; +} + /* * Return target flags in extended format or 0 if restripe for this chunk_type * is not in progress @@ -832,8 +845,8 @@ out: static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) { - btrfs_clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, - bg->start + bg->length - 1, EXTENT_DIRTY); + btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start, + bg->start + bg->length - 1, EXTENT_DIRTY, NULL); } static noinline void caching_thread(struct btrfs_work *work) @@ -877,7 +890,7 @@ static noinline void caching_thread(struct btrfs_work *work) */ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) - ret = load_free_space_tree(caching_ctl); + ret = btrfs_load_free_space_tree(caching_ctl); else ret = load_extent_tree_free(caching_ctl); done: @@ -1235,7 +1248,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * another task to attempt to create another block group with the same * item key (and failing with -EEXIST and a transaction abort). */ - ret = remove_block_group_free_space(trans, block_group); + ret = btrfs_remove_block_group_free_space(trans, block_group); if (ret) goto out; @@ -1244,6 +1257,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, goto out; spin_lock(&block_group->lock); + /* + * Hitting this WARN means we removed a block group with an unwritten + * region. It will cause "unable to find chunk map for logical" errors. + */ + if (WARN_ON(has_unwritten_metadata(block_group))) + btrfs_warn(fs_info, + "block group %llu is removed before metadata write out", + block_group->start); + set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); /* @@ -1403,7 +1425,7 @@ out: if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { btrfs_info(cache->fs_info, "unable to make block group %llu ro", cache->start); - btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); + btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, false); } return ret; } @@ -1436,14 +1458,14 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, */ mutex_lock(&fs_info->unused_bg_unpin_mutex); if (prev_trans) { - ret = btrfs_clear_extent_bits(&prev_trans->pinned_extents, start, end, - EXTENT_DIRTY); + ret = btrfs_clear_extent_bit(&prev_trans->pinned_extents, start, end, + EXTENT_DIRTY, NULL); if (ret) goto out; } - ret = btrfs_clear_extent_bits(&trans->transaction->pinned_extents, start, end, - EXTENT_DIRTY); + ret = btrfs_clear_extent_bit(&trans->transaction->pinned_extents, start, end, + EXTENT_DIRTY, NULL); out: mutex_unlock(&fs_info->unused_bg_unpin_mutex); if (prev_trans) @@ -1586,8 +1608,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * needing to allocate extents from the block group. */ used = btrfs_space_info_used(space_info, true); - if (space_info->total_bytes - block_group->length < used && - block_group->zone_unusable < block_group->length) { + if ((space_info->total_bytes - block_group->length < used && + block_group->zone_unusable < block_group->length) || + has_unwritten_metadata(block_group)) { /* * Add a reference for the list, compensate for the ref * drop under the "next" label for the @@ -1616,8 +1639,10 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) ret = btrfs_zone_finish(block_group); if (ret < 0) { btrfs_dec_block_group_ro(block_group); - if (ret == -EAGAIN) + if (ret == -EAGAIN) { + btrfs_link_bg_list(block_group, &retry_list); ret = 0; + } goto next; } @@ -1843,7 +1868,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) */ list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); while (!list_empty(&fs_info->reclaim_bgs)) { - u64 zone_unusable; u64 used; u64 reserved; int ret = 0; @@ -1910,16 +1934,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) goto next; } - /* - * Cache the zone_unusable value before turning the block group - * to read only. As soon as the block group is read only it's - * zone_unusable value gets moved to the block group's read-only - * bytes and isn't available for calculations anymore. We also - * cache it before unlocking the block group, to prevent races - * (reports from KCSAN and such tools) with tasks updating it. - */ - zone_unusable = bg->zone_unusable; - spin_unlock(&bg->lock); spin_unlock(&space_info->lock); @@ -1963,14 +1977,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) reserved = bg->reserved; spin_unlock(&bg->lock); - btrfs_info(fs_info, - "reclaiming chunk %llu with %llu%% used %llu%% reserved %llu%% unusable", - bg->start, - div64_u64(used * 100, bg->length), - div64_u64(reserved * 100, bg->length), - div64_u64(zone_unusable * 100, bg->length)); trace_btrfs_reclaim_block_group(bg); - ret = btrfs_relocate_chunk(fs_info, bg->start); + ret = btrfs_relocate_chunk(fs_info, bg->start, false); if (ret) { btrfs_dec_block_group_ro(bg); btrfs_err(fs_info, "error relocating chunk %llu", @@ -2372,7 +2380,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); cache->space_info = btrfs_find_space_info(info, cache->flags); - set_free_space_tree_thresholds(cache); + btrfs_set_free_space_tree_thresholds(cache); if (need_clear) { /* @@ -2791,7 +2799,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) block_group->length); if (ret) btrfs_abort_transaction(trans, ret); - add_block_group_free_space(trans, block_group); + btrfs_add_block_group_free_space(trans, block_group); /* * If we restriped during balance, we may have added a new raid @@ -2889,7 +2897,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); cache->length = size; - set_free_space_tree_thresholds(cache); + btrfs_set_free_space_tree_thresholds(cache); cache->flags = type; cache->cached = BTRFS_CACHE_FINISHED; cache->global_root_id = calculate_global_root_id(fs_info, cache->start); @@ -3636,9 +3644,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); ret = update_block_group_item(trans, path, cache); - } - if (ret) + if (ret) + btrfs_abort_transaction(trans, ret); + } else if (ret) { btrfs_abort_transaction(trans, ret); + } } /* If its not on the io list, we need to put the block group */ @@ -4298,7 +4308,7 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans, if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", left, bytes, type); - btrfs_dump_space_info(fs_info, info, 0, 0); + btrfs_dump_space_info(fs_info, info, 0, false); } if (left < bytes) { @@ -4443,7 +4453,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info) * indicates a real bug if this happens. */ if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0)) - btrfs_dump_space_info(info, space_info, 0, 0); + btrfs_dump_space_info(info, space_info, 0, false); /* * If there was a failure to cleanup a log tree, very likely due to an @@ -4454,7 +4464,7 @@ static void check_removing_space_info(struct btrfs_space_info *space_info) if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { if (WARN_ON(space_info->bytes_reserved > 0)) - btrfs_dump_space_info(info, space_info, 0, 0); + btrfs_dump_space_info(info, space_info, 0, false); } WARN_ON(space_info->reclaim_size > 0); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index aa176cc9a324..a8bb8429c966 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -246,6 +246,11 @@ struct btrfs_block_group { /* Lock for free space tree operations. */ struct mutex free_space_lock; + /* Protected by @free_space_lock. */ + bool using_free_space_bitmaps; + /* Protected by @free_space_lock. */ + bool using_free_space_bitmaps_cached; + /* * Number of extents in this block group used for swap files. * All accesses protected by the spinlock 'lock'. diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index a79fa0726f1d..b99fb0273292 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -525,6 +525,19 @@ static inline void btrfs_update_inode_mapping_flags(struct btrfs_inode *inode) mapping_set_stable_writes(inode->vfs_inode.i_mapping); } +static inline void btrfs_set_inode_mapping_order(struct btrfs_inode *inode) +{ + /* Metadata inode should not reach here. */ + ASSERT(is_data_inode(inode)); + + /* We only allow BITS_PER_LONGS blocks for each bitmap. */ +#ifdef CONFIG_BTRFS_EXPERIMENTAL + mapping_set_folio_order_range(inode->vfs_inode.i_mapping, 0, + ilog2(((BITS_PER_LONG << inode->root->fs_info->sectorsize_bits) + >> PAGE_SHIFT))); +#endif +} + /* Array of bytes with variable length, hexadecimal format 0x1234 */ #define CSUM_FMT "0x%*phN" #define CSUM_FMT_VALUE(size, bytes) size, bytes diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 48d07939fee4..d09d622016ef 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -282,8 +282,8 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) { struct inode *inode = &cb->bbio.inode->vfs_inode; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); - unsigned long index = cb->start >> PAGE_SHIFT; - unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; + pgoff_t index = cb->start >> PAGE_SHIFT; + const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; int i; int ret; @@ -415,7 +415,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, int *memstall, unsigned long *pflags) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); - unsigned long end_index; + pgoff_t end_index; struct bio *orig_bio = &cb->orig_bbio->bio; u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); @@ -446,8 +446,8 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (cur < compressed_end) { - u64 page_end; - u64 pg_index = cur >> PAGE_SHIFT; + pgoff_t page_end; + pgoff_t pg_index = cur >> PAGE_SHIFT; u32 add_size; if (pg_index > end_index) @@ -789,8 +789,8 @@ static void btrfs_init_workspace_manager(int type) */ workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { - pr_warn( - "BTRFS: cannot preallocate compression workspace, will try later\n"); + btrfs_warn(NULL, + "cannot preallocate compression workspace, will try later"); } else { atomic_set(&wsm->total_ws, 1); wsm->free_ws = 1; @@ -888,9 +888,9 @@ again: /* once per minute */ 60 * HZ, /* no burst */ 1); - if (__ratelimit(&_rs)) { - pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); - } + if (__ratelimit(&_rs)) + btrfs_warn(NULL, + "no compression workspaces, low memory, retrying"); } goto again; } @@ -975,7 +975,7 @@ static int btrfs_compress_set_level(unsigned int type, int level) if (level == 0) level = ops->default_level; else - level = min(max(level, ops->min_level), ops->max_level); + level = clamp(level, ops->min_level, ops->max_level); return level; } @@ -1482,7 +1482,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, struct heuristic_ws *ws) { struct page *page; - u64 index, index_end; + pgoff_t index, index_end; u32 i, curr_sample_pos; u8 *in_data; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index d34c4341eaf4..1b38e707bbd9 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -13,6 +13,7 @@ #include <linux/wait.h> #include <linux/pagemap.h> #include "bio.h" +#include "fs.h" #include "messages.h" struct address_space; @@ -77,12 +78,10 @@ struct compressed_bio { /* @range_end must be exclusive. */ static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u64 cur) { - const u64 folio_end = folio_pos(folio) + folio_size(folio); - /* @cur must be inside the folio. */ ASSERT(folio_pos(folio) <= cur); - ASSERT(cur < folio_end); - return min(range_end, folio_end) - cur; + ASSERT(cur < folio_end(folio)); + return min(range_end, folio_end(folio)) - cur; } int __init btrfs_init_compress(void); @@ -114,6 +113,8 @@ enum btrfs_compression_type { BTRFS_COMPRESS_LZO = 2, BTRFS_COMPRESS_ZSTD = 3, BTRFS_NR_COMPRESS_TYPES = 4, + + BTRFS_DEFRAG_DONT_COMPRESS, }; struct workspace_manager { diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a2e7979372cc..74e6d7f3d266 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -198,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) * the inc_not_zero dance and if it doesn't work then * synchronize_rcu and try again. */ - if (atomic_inc_not_zero(&eb->refs)) { + if (refcount_inc_not_zero(&eb->refs)) { rcu_read_unlock(); break; } @@ -283,15 +283,26 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); - WARN_ON(btrfs_header_generation(buf) > trans->transid); - if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) + if (unlikely(btrfs_header_generation(buf) > trans->transid)) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + return ret; + } + + if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) { ret = btrfs_inc_ref(trans, root, cow, 1); - else + if (ret) + btrfs_abort_transaction(trans, ret); + } else { ret = btrfs_inc_ref(trans, root, cow, 0); + if (ret) + btrfs_abort_transaction(trans, ret); + } if (ret) { btrfs_tree_unlock(cow); free_extent_buffer(cow); - btrfs_abort_transaction(trans, ret); return ret; } @@ -303,9 +314,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, /* * check if the tree block can be shared by multiple trees */ -bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf) +bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans, + const struct btrfs_root *root, + const struct extent_buffer *buf) { const u64 buf_gen = btrfs_header_generation(buf); @@ -549,7 +560,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans, btrfs_abort_transaction(trans, ret); goto error_unlock_cow; } - atomic_inc(&cow->refs); + refcount_inc(&cow->refs); rcu_assign_pointer(root->node, cow); ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, @@ -602,9 +613,9 @@ error_unlock_cow: return ret; } -static inline int should_cow_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf) +static inline int should_cow_block(const struct btrfs_trans_handle *trans, + const struct btrfs_root *root, + const struct extent_buffer *buf) { if (btrfs_is_testing(root->fs_info)) return 0; @@ -724,7 +735,7 @@ int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_ke * Slot may point to the total number of items (i.e. one position beyond the last * key) if the key is bigger than the last key in the extent buffer. */ -int btrfs_bin_search(struct extent_buffer *eb, int first_slot, +int btrfs_bin_search(const struct extent_buffer *eb, int first_slot, const struct btrfs_key *key, int *slot) { unsigned long p; @@ -1081,7 +1092,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, /* update the path */ if (left) { if (btrfs_header_nritems(left) > orig_slot) { - atomic_inc(&left->refs); + refcount_inc(&left->refs); /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; @@ -1268,7 +1279,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, * to the block in 'slot', and triggering ra on them. */ static void reada_for_search(struct btrfs_fs_info *fs_info, - struct btrfs_path *path, + const struct btrfs_path *path, int level, int slot, u64 objectid) { struct extent_buffer *node; @@ -1350,7 +1361,7 @@ static void reada_for_search(struct btrfs_fs_info *fs_info, } } -static noinline void reada_for_balance(struct btrfs_path *path, int level) +static noinline void reada_for_balance(const struct btrfs_path *path, int level) { struct extent_buffer *parent; int slot; @@ -1446,8 +1457,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, u64 blocknr; struct extent_buffer *tmp = NULL; int ret = 0; + int ret2; int parent_level; - int err; bool read_tmp = false; bool tmp_locked = false; bool path_released = false; @@ -1505,9 +1516,9 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, } /* Now we're allowed to do a blocking uptodate check. */ - err = btrfs_read_extent_buffer(tmp, &check); - if (err) { - ret = err; + ret2 = btrfs_read_extent_buffer(tmp, &check); + if (ret2) { + ret = ret2; goto out; } @@ -1548,9 +1559,9 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, } /* Now we're allowed to do a blocking uptodate check. */ - err = btrfs_read_extent_buffer(tmp, &check); - if (err) { - ret = err; + ret2 = btrfs_read_extent_buffer(tmp, &check); + if (ret2) { + ret = ret2; goto out; } @@ -1685,7 +1696,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, if (p->search_commit_root) { b = root->commit_root; - atomic_inc(&b->refs); + refcount_inc(&b->refs); level = btrfs_header_level(b); /* * Ensure that all callers have set skip_locking when @@ -1794,7 +1805,7 @@ static int finish_need_commit_sem_search(struct btrfs_path *path) return 0; } -static inline int search_for_key_slot(struct extent_buffer *eb, +static inline int search_for_key_slot(const struct extent_buffer *eb, int search_low_slot, const struct btrfs_key *key, int prev_cmp, @@ -1928,15 +1939,14 @@ static int search_leaf(struct btrfs_trans_handle *trans, ASSERT(leaf_free_space >= 0); if (leaf_free_space < ins_len) { - int err; - - err = split_leaf(trans, root, key, path, ins_len, - (ret == 0)); - ASSERT(err <= 0); - if (WARN_ON(err > 0)) - err = -EUCLEAN; - if (err) - ret = err; + int ret2; + + ret2 = split_leaf(trans, root, key, path, ins_len, (ret == 0)); + ASSERT(ret2 <= 0); + if (WARN_ON(ret2 > 0)) + ret2 = -EUCLEAN; + if (ret2) + ret = ret2; } } @@ -1982,7 +1992,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *b; int slot; int ret; - int err; int level; int lowest_unlock = 1; /* everything at write_lock_level or lower must be write locked */ @@ -2053,6 +2062,7 @@ again: while (b) { int dec = 0; + int ret2; level = btrfs_header_level(b); @@ -2081,16 +2091,15 @@ again: } if (last_level) - err = btrfs_cow_block(trans, root, b, NULL, 0, - &b, - BTRFS_NESTING_COW); + ret2 = btrfs_cow_block(trans, root, b, NULL, 0, + &b, BTRFS_NESTING_COW); else - err = btrfs_cow_block(trans, root, b, - p->nodes[level + 1], - p->slots[level + 1], &b, - BTRFS_NESTING_COW); - if (err) { - ret = err; + ret2 = btrfs_cow_block(trans, root, b, + p->nodes[level + 1], + p->slots[level + 1], &b, + BTRFS_NESTING_COW); + if (ret2) { + ret = ret2; goto done; } } @@ -2138,12 +2147,12 @@ cow_done: slot--; } p->slots[level] = slot; - err = setup_nodes_for_search(trans, root, p, b, level, ins_len, - &write_lock_level); - if (err == -EAGAIN) + ret2 = setup_nodes_for_search(trans, root, p, b, level, ins_len, + &write_lock_level); + if (ret2 == -EAGAIN) goto again; - if (err) { - ret = err; + if (ret2) { + ret = ret2; goto done; } b = p->nodes[level]; @@ -2169,11 +2178,11 @@ cow_done: goto done; } - err = read_block_for_search(root, p, &b, slot, key); - if (err == -EAGAIN && !p->nowait) + ret2 = read_block_for_search(root, p, &b, slot, key); + if (ret2 == -EAGAIN && !p->nowait) goto again; - if (err) { - ret = err; + if (ret2) { + ret = ret2; goto done; } @@ -2236,7 +2245,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, struct extent_buffer *b; int slot; int ret; - int err; int level; int lowest_unlock = 1; u8 lowest_level = 0; @@ -2261,6 +2269,7 @@ again: while (b) { int dec = 0; + int ret2; level = btrfs_header_level(b); p->nodes[level] = b; @@ -2296,11 +2305,11 @@ again: goto done; } - err = read_block_for_search(root, p, &b, slot, key); - if (err == -EAGAIN && !p->nowait) + ret2 = read_block_for_search(root, p, &b, slot, key); + if (ret2 == -EAGAIN && !p->nowait) goto again; - if (err) { - ret = err; + if (ret2) { + ret = ret2; goto done; } @@ -2872,6 +2881,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, if (ret < 0) { int ret2; + btrfs_clear_buffer_dirty(trans, c); ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); if (ret2 < 0) btrfs_abort_transaction(trans, ret2); @@ -2885,7 +2895,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, free_extent_buffer(old); add_root_to_dirty_list(root); - atomic_inc(&c->refs); + refcount_inc(&c->refs); path->nodes[level] = c; path->locks[level] = BTRFS_WRITE_LOCK; path->slots[level] = 0; @@ -3100,7 +3110,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = right->fs_info; struct extent_buffer *left = path->nodes[0]; struct extent_buffer *upper = path->nodes[1]; - struct btrfs_map_token token; struct btrfs_disk_key disk_key; int slot; u32 i; @@ -3174,13 +3183,12 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); /* update the item pointers */ - btrfs_init_map_token(&token, right); right_nritems += push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(fs_info); for (i = 0; i < right_nritems; i++) { - push_space -= btrfs_token_item_size(&token, i); - btrfs_set_token_item_offset(&token, i, push_space); + push_space -= btrfs_item_size(right, i); + btrfs_set_item_offset(right, i, push_space); } left_nritems -= push_items; @@ -3323,7 +3331,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, int ret = 0; u32 this_item_size; u32 old_left_item_size; - struct btrfs_map_token token; if (empty) nr = min(right_nritems, max_slot); @@ -3371,13 +3378,12 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, old_left_nritems = btrfs_header_nritems(left); BUG_ON(old_left_nritems <= 0); - btrfs_init_map_token(&token, left); old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, + ioff = btrfs_item_offset(left, i); + btrfs_set_item_offset(left, i, ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); } btrfs_set_header_nritems(left, old_left_nritems + push_items); @@ -3398,13 +3404,12 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, btrfs_header_nritems(right) - push_items); } - btrfs_init_map_token(&token, right); right_nritems -= push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(fs_info); for (i = 0; i < right_nritems; i++) { - push_space = push_space - btrfs_token_item_size(&token, i); - btrfs_set_token_item_offset(&token, i, push_space); + push_space = push_space - btrfs_item_size(right, i); + btrfs_set_item_offset(right, i, push_space); } btrfs_mark_buffer_dirty(trans, left); @@ -3518,7 +3523,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans, int i; int ret; struct btrfs_disk_key disk_key; - struct btrfs_map_token token; nritems = nritems - mid; btrfs_set_header_nritems(right, nritems); @@ -3531,12 +3535,11 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans, rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); - btrfs_init_map_token(&token, right); for (i = 0; i < nritems; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); + ioff = btrfs_item_offset(right, i); + btrfs_set_item_offset(right, i, ioff + rt_data_off); } btrfs_set_header_nritems(l, mid); @@ -4002,7 +4005,6 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans, unsigned int old_size; unsigned int size_diff; int i; - struct btrfs_map_token token; leaf = path->nodes[0]; slot = path->slots[0]; @@ -4025,12 +4027,11 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans, * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ - btrfs_init_map_token(&token, leaf); for (i = slot; i < nritems; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, ioff + size_diff); + ioff = btrfs_item_offset(leaf, i); + btrfs_set_item_offset(leaf, i, ioff + size_diff); } /* shift the data */ @@ -4093,7 +4094,6 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans, unsigned int old_data; unsigned int old_size; int i; - struct btrfs_map_token token; leaf = path->nodes[0]; @@ -4119,12 +4119,11 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans, * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ - btrfs_init_map_token(&token, leaf); for (i = slot; i < nritems; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, ioff - data_size); + ioff = btrfs_item_offset(leaf, i); + btrfs_set_item_offset(leaf, i, ioff - data_size); } /* shift the data */ @@ -4164,7 +4163,6 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans, struct btrfs_disk_key disk_key; struct extent_buffer *leaf; int slot; - struct btrfs_map_token token; u32 total_size; /* @@ -4192,7 +4190,6 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans, BUG(); } - btrfs_init_map_token(&token, leaf); if (slot != nritems) { unsigned int old_data = btrfs_item_data_end(leaf, slot); @@ -4210,8 +4207,8 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans, for (i = slot; i < nritems; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, + ioff = btrfs_item_offset(leaf, i); + btrfs_set_item_offset(leaf, i, ioff - batch->total_data_size); } /* shift the items */ @@ -4228,8 +4225,8 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans, btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); btrfs_set_item_key(leaf, &disk_key, slot + i); data_end -= batch->data_sizes[i]; - btrfs_set_token_item_offset(&token, slot + i, data_end); - btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); + btrfs_set_item_offset(leaf, slot + i, data_end); + btrfs_set_item_size(leaf, slot + i, batch->data_sizes[i]); } btrfs_set_header_nritems(leaf, nritems + batch->nr); @@ -4442,7 +4439,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, root_sub_used_bytes(root); - atomic_inc(&leaf->refs); + refcount_inc(&leaf->refs); ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); free_extent_buffer_stale(leaf); if (ret < 0) @@ -4469,7 +4466,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (slot + nr != nritems) { const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); const int data_end = leaf_data_end(leaf); - struct btrfs_map_token token; u32 dsize = 0; int i; @@ -4479,12 +4475,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, memmove_leaf_data(leaf, data_end + dsize, data_end, last_off - data_end); - btrfs_init_map_token(&token, leaf); for (i = slot + nr; i < nritems; i++) { u32 ioff; - ioff = btrfs_token_item_offset(&token, i); - btrfs_set_token_item_offset(&token, i, ioff + dsize); + ioff = btrfs_item_offset(leaf, i); + btrfs_set_item_offset(leaf, i, ioff + dsize); } memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); @@ -4527,7 +4522,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, * for possible call to btrfs_del_ptr below */ slot = path->slots[1]; - atomic_inc(&leaf->refs); + refcount_inc(&leaf->refs); /* * We want to be able to at least push one item to the * left neighbour leaf, and that's the first item. @@ -4585,16 +4580,13 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, /* * A helper function to walk down the tree starting at min_key, and looking - * for nodes or leaves that are have a minimum transaction id. + * for leaves that have a minimum transaction id. * This is used by the btree defrag code, and tree logging * * This does not cow, but it does stuff the starting key it finds back * into min_key, so you can call btrfs_search_slot with cow=1 on the * key and get a writable path. * - * This honors path->lowest_level to prevent descent past a given level - * of the tree. - * * min_trans indicates the oldest transaction that you are interested * in walking through. Any nodes or leaves older than min_trans are * skipped over (without reading them). @@ -4615,6 +4607,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, int keep_locks = path->keep_locks; ASSERT(!path->nowait); + ASSERT(path->lowest_level == 0); path->keep_locks = 1; again: cur = btrfs_read_lock_root_node(root); @@ -4636,8 +4629,8 @@ again: goto out; } - /* at the lowest level, we're done, setup the path and exit */ - if (level == path->lowest_level) { + /* At level 0 we're done, setup the path and exit. */ + if (level == 0) { if (slot >= nritems) goto find_next_key; ret = 0; @@ -4678,12 +4671,6 @@ find_next_key: goto out; } } - if (level == path->lowest_level) { - ret = 0; - /* Save our key for returning back. */ - btrfs_node_key_to_cpu(cur, min_key, slot); - goto out; - } cur = btrfs_read_node_slot(cur, slot); if (IS_ERR(cur)) { ret = PTR_ERR(cur); @@ -4699,7 +4686,7 @@ find_next_key: out: path->keep_locks = keep_locks; if (ret == 0) - btrfs_unlock_up_safe(path, path->lowest_level + 1); + btrfs_unlock_up_safe(path, 1); return ret; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 71fa42ca04fe..fe70b593c7cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -224,16 +224,10 @@ struct btrfs_root { struct list_head root_list; - /* - * Xarray that keeps track of in-memory inodes, protected by the lock - * @inode_lock. - */ + /* Xarray that keeps track of in-memory inodes. */ struct xarray inodes; - /* - * Xarray that keeps track of delayed nodes of every inode, protected - * by @inode_lock. - */ + /* Xarray that keeps track of delayed nodes of every inode. */ struct xarray delayed_nodes; /* * right now this just gets used so that a root has its own devid @@ -508,7 +502,7 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) int __init btrfs_ctree_init(void); void __cold btrfs_ctree_exit(void); -int btrfs_bin_search(struct extent_buffer *eb, int first_slot, +int btrfs_bin_search(const struct extent_buffer *eb, int first_slot, const struct btrfs_key *key, int *slot); int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); @@ -576,9 +570,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer **cow_ret, u64 new_root_objectid); -bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf); +bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans, + const struct btrfs_root *root, + const struct extent_buffer *buf); int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); void btrfs_extend_item(struct btrfs_trans_handle *trans, @@ -727,13 +721,18 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) } int btrfs_leaf_free_space(const struct extent_buffer *leaf); -static inline int is_fstree(u64 rootid) +static inline bool btrfs_is_fstree(u64 rootid) { - if (rootid == BTRFS_FS_TREE_OBJECTID || - ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && - !btrfs_qgroup_level(rootid))) - return 1; - return 0; + if (rootid == BTRFS_FS_TREE_OBJECTID) + return true; + + if ((s64)rootid < (s64)BTRFS_FIRST_FREE_OBJECTID) + return false; + + if (btrfs_qgroup_level(rootid) != 0) + return false; + + return true; } static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root) diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 1831618579cb..738179a5e170 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -60,6 +60,14 @@ static int compare_inode_defrag(const struct inode_defrag *defrag1, return 0; } +static int inode_defrag_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct inode_defrag *new_defrag = rb_entry(new, struct inode_defrag, rb_node); + const struct inode_defrag *existing_defrag = rb_entry(existing, struct inode_defrag, rb_node); + + return compare_inode_defrag(new_defrag, existing_defrag); +} + /* * Insert a record for an inode into the defrag tree. The lock must be held * already. @@ -71,37 +79,23 @@ static int btrfs_insert_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { struct btrfs_fs_info *fs_info = inode->root->fs_info; - struct inode_defrag *entry; - struct rb_node **p; - struct rb_node *parent = NULL; - int ret; + struct rb_node *node; - p = &fs_info->defrag_inodes.rb_node; - while (*p) { - parent = *p; - entry = rb_entry(parent, struct inode_defrag, rb_node); + node = rb_find_add(&defrag->rb_node, &fs_info->defrag_inodes, inode_defrag_cmp); + if (node) { + struct inode_defrag *entry; - ret = compare_inode_defrag(defrag, entry); - if (ret < 0) - p = &parent->rb_left; - else if (ret > 0) - p = &parent->rb_right; - else { - /* - * If we're reinserting an entry for an old defrag run, - * make sure to lower the transid of our existing - * record. - */ - if (defrag->transid < entry->transid) - entry->transid = defrag->transid; - entry->extent_thresh = min(defrag->extent_thresh, - entry->extent_thresh); - return -EEXIST; - } + entry = rb_entry(node, struct inode_defrag, rb_node); + /* + * If we're reinserting an entry for an old defrag run, make + * sure to lower the transid of our existing record. + */ + if (defrag->transid < entry->transid) + entry->transid = defrag->transid; + entry->extent_thresh = min(defrag->extent_thresh, entry->extent_thresh); + return -EEXIST; } set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); - rb_link_node(&defrag->rb_node, parent, p); - rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); return 0; } @@ -854,8 +848,8 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t { struct address_space *mapping = inode->vfs_inode.i_mapping; gfp_t mask = btrfs_alloc_write_mask(mapping); - u64 folio_start; - u64 folio_end; + u64 lock_start; + u64 lock_end; struct extent_state *cached_state = NULL; struct folio *folio; int ret; @@ -891,15 +885,15 @@ again: return ERR_PTR(ret); } - folio_start = folio_pos(folio); - folio_end = folio_pos(folio) + folio_size(folio) - 1; + lock_start = folio_pos(folio); + lock_end = folio_end(folio) - 1; /* Wait for any existing ordered extent in the range */ while (1) { struct btrfs_ordered_extent *ordered; - btrfs_lock_extent(&inode->io_tree, folio_start, folio_end, &cached_state); - ordered = btrfs_lookup_ordered_range(inode, folio_start, folio_size(folio)); - btrfs_unlock_extent(&inode->io_tree, folio_start, folio_end, &cached_state); + btrfs_lock_extent(&inode->io_tree, lock_start, lock_end, &cached_state); + ordered = btrfs_lookup_ordered_range(inode, lock_start, folio_size(folio)); + btrfs_unlock_extent(&inode->io_tree, lock_start, lock_end, &cached_state); if (!ordered) break; @@ -953,7 +947,7 @@ struct defrag_target_range { * @extent_thresh: file extent size threshold, any extent size >= this value * will be ignored * @newer_than: only defrag extents newer than this value - * @do_compress: whether the defrag is doing compression + * @do_compress: whether the defrag is doing compression or no-compression * if true, @extent_thresh will be ignored and all regular * file extents meeting @newer_than will be targets. * @locked: if the range has already held extent lock @@ -1184,8 +1178,7 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, if (!folio) break; - if (start >= folio_pos(folio) + folio_size(folio) || - start + len <= folio_pos(folio)) + if (start >= folio_end(folio) || start + len <= folio_pos(folio)) continue; btrfs_folio_clamp_clear_checked(fs_info, folio, start, len); btrfs_folio_clamp_set_dirty(fs_info, folio, start, len); @@ -1226,7 +1219,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, folios[i] = NULL; goto free_folios; } - cur = folio_pos(folios[i]) + folio_size(folios[i]); + cur = folio_end(folios[i]); } for (int i = 0; i < nr_pages; i++) { if (!folios[i]) @@ -1371,6 +1364,7 @@ int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra, u64 cur; u64 last_byte; bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS); + bool no_compress = (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS); int compress_type = BTRFS_COMPRESS_ZLIB; int compress_level = 0; int ret = 0; @@ -1401,6 +1395,9 @@ int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra, if (range->compress_type) compress_type = range->compress_type; } + } else if (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS) { + compress_type = BTRFS_DEFRAG_DONT_COMPRESS; + compress_level = 1; } if (extent_thresh == 0) @@ -1451,13 +1448,14 @@ int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra, btrfs_inode_unlock(inode, 0); break; } - if (do_compress) { + if (do_compress || no_compress) { inode->defrag_compress = compress_type; inode->defrag_compress_level = compress_level; } ret = defrag_one_cluster(inode, ra, cur, cluster_end + 1 - cur, extent_thresh, - newer_than, do_compress, §ors_defragged, + newer_than, do_compress || no_compress, + §ors_defragged, max_to_defrag, &last_scanned); if (sectors_defragged > prev_sectors_defragged) @@ -1496,7 +1494,7 @@ int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra, btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); ret = sectors_defragged; } - if (do_compress) { + if (do_compress || no_compress) { btrfs_inode_lock(inode, 0); inode->defrag_compress = BTRFS_COMPRESS_NONE; btrfs_inode_unlock(inode, 0); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 8c597fa60523..0f8d8e275143 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -334,6 +334,20 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len, return item; } +static int delayed_item_index_cmp(const void *key, const struct rb_node *node) +{ + const u64 *index = key; + const struct btrfs_delayed_item *delayed_item = rb_entry(node, + struct btrfs_delayed_item, rb_node); + + if (delayed_item->index < *index) + return 1; + else if (delayed_item->index > *index) + return -1; + + return 0; +} + /* * Look up the delayed item by key. * @@ -347,21 +361,10 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( struct rb_root *root, u64 index) { - struct rb_node *node = root->rb_node; - struct btrfs_delayed_item *delayed_item = NULL; - - while (node) { - delayed_item = rb_entry(node, struct btrfs_delayed_item, - rb_node); - if (delayed_item->index < index) - node = node->rb_right; - else if (delayed_item->index > index) - node = node->rb_left; - else - return delayed_item; - } + struct rb_node *node; - return NULL; + node = rb_find(&index, root, delayed_item_index_cmp); + return rb_entry_safe(node, struct btrfs_delayed_item, rb_node); } static int btrfs_delayed_item_cmp(const struct rb_node *new, @@ -369,14 +372,8 @@ static int btrfs_delayed_item_cmp(const struct rb_node *new, { const struct btrfs_delayed_item *new_item = rb_entry(new, struct btrfs_delayed_item, rb_node); - const struct btrfs_delayed_item *exist_item = - rb_entry(exist, struct btrfs_delayed_item, rb_node); - if (new_item->index < exist_item->index) - return -1; - if (new_item->index > exist_item->index) - return 1; - return 0; + return delayed_item_index_cmp(&new_item->index, exist); } static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, @@ -1008,8 +1005,16 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, ret = btrfs_lookup_inode(trans, root, path, &key, mod); if (ret > 0) ret = -ENOENT; - if (ret < 0) + if (ret < 0) { + /* + * If we fail to update the delayed inode we need to abort the + * transaction, because we could leave the inode with the + * improper counts behind. + */ + if (ret != -ENOENT) + btrfs_abort_transaction(trans, ret); goto out; + } leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], @@ -1034,8 +1039,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) + if (ret < 0) { + btrfs_abort_transaction(trans, ret); goto err_out; + } ASSERT(ret > 0); ASSERT(path->slots[0] > 0); ret = 0; @@ -1057,21 +1064,14 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, * in the same item doesn't exist. */ ret = btrfs_del_item(trans, root, path); + if (ret < 0) + btrfs_abort_transaction(trans, ret); out: btrfs_release_delayed_iref(node); btrfs_release_path(path); err_out: btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0)); btrfs_release_delayed_inode(node); - - /* - * If we fail to update the delayed inode we need to abort the - * transaction, because we could leave the inode with the improper - * counts behind. - */ - if (ret && ret != -ENOENT) - btrfs_abort_transaction(trans, ret); - return ret; } @@ -1540,8 +1540,8 @@ release_node: return ret; } -static int btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node, - u64 index) +static bool btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node, + u64 index) { struct btrfs_delayed_item *item; @@ -1549,7 +1549,7 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node, item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index); if (!item) { mutex_unlock(&node->mutex); - return 1; + return false; } /* @@ -1584,7 +1584,7 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node, } mutex_unlock(&node->mutex); - return 0; + return true; } int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, @@ -1598,9 +1598,10 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, if (IS_ERR(node)) return PTR_ERR(node); - ret = btrfs_delete_delayed_insertion_item(node, index); - if (!ret) + if (btrfs_delete_delayed_insertion_item(node, index)) { + ret = 0; goto end; + } item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM); if (!item) { @@ -1617,7 +1618,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, */ if (ret < 0) { btrfs_err(trans->fs_info, -"metadata reservation failed for delayed dir item deltiona, should have been reserved"); +"metadata reservation failed for delayed dir item deletion, index: %llu, root: %llu, inode: %llu, error: %d", + index, btrfs_root_id(node->root), node->inode_id, ret); btrfs_release_delayed_item(item); goto end; } @@ -1626,9 +1628,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, ret = __btrfs_add_delayed_item(node, item); if (unlikely(ret)) { btrfs_err(trans->fs_info, - "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", - index, btrfs_root_id(node->root), - node->inode_id, ret); +"failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d", + index, btrfs_root_id(node->root), node->inode_id, ret); btrfs_delayed_item_release_metadata(dir->root, item); btrfs_release_delayed_item(item); } @@ -1733,17 +1734,16 @@ void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode, downgrade_write(&inode->vfs_inode.i_rwsem); } -int btrfs_should_delete_dir_index(const struct list_head *del_list, - u64 index) +bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index) { struct btrfs_delayed_item *curr; - int ret = 0; + bool ret = false; list_for_each_entry(curr, del_list, readdir_list) { if (curr->index > index) break; if (curr->index == index) { - ret = 1; + ret = true; break; } } @@ -1753,15 +1753,14 @@ int btrfs_should_delete_dir_index(const struct list_head *del_list, /* * Read dir info stored in the delayed tree. */ -int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, - const struct list_head *ins_list) +bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx, + const struct list_head *ins_list) { struct btrfs_dir_item *di; struct btrfs_delayed_item *curr, *next; struct btrfs_key location; char *name; int name_len; - int over = 0; unsigned char d_type; /* @@ -1770,6 +1769,8 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, * directory, nobody can delete any directory indexes now. */ list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + bool over; + list_del(&curr->readdir_list); if (curr->index < ctx->pos) { @@ -1787,17 +1788,16 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type)); btrfs_disk_key_to_cpu(&location, &di->location); - over = !dir_emit(ctx, name, name_len, - location.objectid, d_type); + over = !dir_emit(ctx, name, name_len, location.objectid, d_type); if (refcount_dec_and_test(&curr->refs)) kfree(curr); if (over) - return 1; + return true; ctx->pos++; } - return 0; + return false; } static void fill_stack_inode_item(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index c4b4ba122beb..e6e763ad2d42 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -150,10 +150,9 @@ bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode, void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode, struct list_head *ins_list, struct list_head *del_list); -int btrfs_should_delete_dir_index(const struct list_head *del_list, - u64 index); -int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, - const struct list_head *ins_list); +bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index); +bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx, + const struct list_head *ins_list); /* Used during directory logging. */ void btrfs_log_get_delayed_items(struct btrfs_inode *inode, diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 739c9e29aaa3..ca382c5b186f 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -928,7 +928,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info, if (action == BTRFS_ADD_DELAYED_EXTENT) action = BTRFS_ADD_DELAYED_REF; - if (is_fstree(generic_ref->ref_root)) + if (btrfs_is_fstree(generic_ref->ref_root)) seq = atomic64_read(&fs_info->tree_mod_seq); refcount_set(&ref->refs, 1); @@ -958,8 +958,8 @@ void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root, #endif generic_ref->tree_ref.level = level; generic_ref->type = BTRFS_REF_METADATA; - if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && - (!mod_root || is_fstree(mod_root)))) + if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) && + (!mod_root || btrfs_is_fstree(mod_root)))) generic_ref->skip_qgroup = true; else generic_ref->skip_qgroup = false; @@ -976,8 +976,8 @@ void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset, generic_ref->data_ref.objectid = ino; generic_ref->data_ref.offset = offset; generic_ref->type = BTRFS_REF_DATA; - if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && - (!mod_root || is_fstree(mod_root)))) + if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) && + (!mod_root || btrfs_is_fstree(mod_root)))) generic_ref->skip_qgroup = true; else generic_ref->skip_qgroup = false; diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 78cc23837610..552ec4fa645d 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -420,7 +420,7 @@ bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head, u64 root, u64 parent); void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans); -static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node) +static inline u64 btrfs_delayed_ref_owner(const struct btrfs_delayed_ref_node *node) { if (node->type == BTRFS_EXTENT_DATA_REF_KEY || node->type == BTRFS_SHARED_DATA_REF_KEY) @@ -428,7 +428,7 @@ static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node) return node->tree_ref.level; } -static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node) +static inline u64 btrfs_delayed_ref_offset(const struct btrfs_delayed_ref_node *node) { if (node->type == BTRFS_EXTENT_DATA_REF_KEY || node->type == BTRFS_SHARED_DATA_REF_KEY) @@ -436,7 +436,7 @@ static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node) return 0; } -static inline u8 btrfs_ref_type(struct btrfs_ref *ref) +static inline u8 btrfs_ref_type(const struct btrfs_ref *ref) { ASSERT(ref->type == BTRFS_REF_DATA || ref->type == BTRFS_REF_METADATA); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 2decb9fff445..4675bcd5f92e 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -250,7 +250,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, } bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, - fs_info->bdev_holder, NULL); + fs_info->sb, &fs_holder_ops); if (IS_ERR(bdev_file)) { btrfs_err(fs_info, "target device %s is invalid!", device_path); return PTR_ERR(bdev_file); @@ -327,7 +327,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, return 0; error: - fput(bdev_file); + bdev_fput(bdev_file); return ret; } @@ -600,7 +600,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, return PTR_ERR(src_device); if (btrfs_pinned_by_swapfile(fs_info, src_device)) { - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "cannot replace device %s (devid %llu) due to active swapfile", btrfs_dev_name(src_device), src_device->devid); return -ETXTBSY; @@ -647,7 +647,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, dev_replace->srcdev = src_device; dev_replace->tgtdev = tgt_device; - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "dev_replace from %s (devid %llu) to %s started", btrfs_dev_name(src_device), src_device->devid, @@ -943,7 +943,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, tgt_device); } else { if (scrub_ret != -ECANCELED) - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "btrfs_scrub_dev(%s, %llu, %s) failed %d", btrfs_dev_name(src_device), src_device->devid, @@ -961,7 +961,7 @@ error: return scrub_ret; } - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "dev_replace from %s (devid %llu) to %s finished", btrfs_dev_name(src_device), src_device->devid, @@ -1109,7 +1109,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) * btrfs_dev_replace_finishing() will handle the * cleanup part */ - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "dev_replace from %s (devid %llu) to %s canceled", btrfs_dev_name(src_device), src_device->devid, btrfs_dev_name(tgt_device)); @@ -1143,7 +1143,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) ret = btrfs_commit_transaction(trans); WARN_ON(ret); - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "suspended dev_replace from %s (devid %llu) to %s canceled", btrfs_dev_name(src_device), src_device->devid, btrfs_dev_name(tgt_device)); @@ -1247,7 +1247,7 @@ static int btrfs_dev_replace_kthread(void *data) progress = btrfs_dev_replace_progress(fs_info); progress = div_u64(progress, 10); - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "continuing dev_replace from %s (devid %llu) to target %s @%u%%", btrfs_dev_name(dev_replace->srcdev), dev_replace->srcdev->devid, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index b29cc31a7c4a..69863e398e22 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -227,7 +227,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, return di; } -int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, +int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino, const struct fscrypt_str *name) { int ret; @@ -242,7 +242,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, if (!path) return -ENOMEM; - key.objectid = dir; + key.objectid = dir_ino; key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name->name, name->len); diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h index 8462579a95f4..e52174a8baf9 100644 --- a/fs/btrfs/dir-item.h +++ b/fs/btrfs/dir-item.h @@ -14,7 +14,7 @@ struct btrfs_inode; struct btrfs_root; struct btrfs_trans_handle; -int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, +int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino, const struct fscrypt_str *name); int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const struct fscrypt_str *name, struct btrfs_inode *dir, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0d6ad7512f21..9cc14ab35297 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -884,7 +884,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, btrfs_set_root_used(&root->root_item, leaf->len); btrfs_set_root_last_snapshot(&root->root_item, 0); btrfs_set_root_dirid(&root->root_item, 0); - if (is_fstree(objectid)) + if (btrfs_is_fstree(objectid)) generate_random_guid(root->root_item.uuid); else export_guid(root->root_item.uuid, &guid_null); @@ -1104,7 +1104,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID && !btrfs_is_data_reloc_root(root) && - is_fstree(btrfs_root_id(root))) { + btrfs_is_fstree(btrfs_root_id(root))) { set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } @@ -1113,7 +1113,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) * Don't assign anonymous block device to roots that are not exposed to * userspace, the id pool is limited to 1M */ - if (is_fstree(btrfs_root_id(root)) && + if (btrfs_is_fstree(btrfs_root_id(root)) && btrfs_root_refs(&root->root_item) > 0) { if (!anon_dev) { ret = get_anon_bdev(&root->anon_dev); @@ -1246,6 +1246,8 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) { struct percpu_counter *em_counter = &fs_info->evictable_extent_maps; + if (fs_info->fs_devices) + btrfs_close_devices(fs_info->fs_devices); percpu_counter_destroy(&fs_info->stats_read_blocks); percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->delalloc_bytes); @@ -1315,7 +1317,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, * This is namely for free-space-tree and quota tree, which can change * at runtime and should only be grabbed from fs_info. */ - if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) + if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) return ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, objectid); @@ -1947,7 +1949,6 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) fs_info->qgroup_tree = RB_ROOT; INIT_LIST_HEAD(&fs_info->dirty_qgroups); fs_info->qgroup_seq = 1; - fs_info->qgroup_ulist = NULL; fs_info->qgroup_rescan_running = false; fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT; mutex_init(&fs_info->qgroup_rescan_lock); @@ -3396,6 +3397,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); fs_info->nodesize = nodesize; + fs_info->nodesize_bits = ilog2(nodesize); fs_info->sectorsize = sectorsize; fs_info->sectorsize_bits = ilog2(sectorsize); fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size; @@ -3561,6 +3563,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device goto fail_sysfs; } + btrfs_zoned_reserve_data_reloc_bg(fs_info); btrfs_free_zone_cache(fs_info); btrfs_check_active_zone_reservation(fs_info); @@ -3681,7 +3684,6 @@ fail_alloc: iput(fs_info->btree_inode); fail: - btrfs_close_devices(fs_info->fs_devices); ASSERT(ret < 0); return ret; } @@ -3694,7 +3696,7 @@ static void btrfs_end_super_write(struct bio *bio) bio_for_each_folio_all(fi, bio) { if (bio->bi_status) { - btrfs_warn_rl_in_rcu(device->fs_info, + btrfs_warn_rl(device->fs_info, "lost super block write due to IO error on %s (%d)", btrfs_dev_name(device), blk_status_to_errno(bio->bi_status)); @@ -3992,7 +3994,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) } if (min_tolerated == INT_MAX) { - pr_warn("BTRFS: unknown raid flag: %llu", flags); + btrfs_warn(NULL, "unknown raid flag: %llu", flags); min_tolerated = 0; } @@ -4428,7 +4430,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) iput(fs_info->btree_inode); btrfs_mapping_tree_free(fs_info); - btrfs_close_devices(fs_info->fs_devices); } void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans, @@ -4640,7 +4641,7 @@ static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, mark, NULL)) { - btrfs_clear_extent_bits(dirty_pages, start, end, mark); + btrfs_clear_extent_bit(dirty_pages, start, end, mark, NULL); while (start <= end) { eb = find_extent_buffer(fs_info, start); start += fs_info->nodesize; diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index b1b96eb5f64e..66361325f6dc 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -43,7 +43,8 @@ static inline void btrfs_extent_state_leak_debug_check(void) while (!list_empty(&states)) { state = list_first_entry(&states, struct extent_state, leak_list); - pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", + btrfs_err(NULL, + "state leak: start %llu end %llu state %u in tree %d refs %d", state->start, state->end, state->state, extent_state_in_tree(state), refcount_read(&state->refs)); @@ -1882,12 +1883,11 @@ int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 e bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached) { - int err; + int ret; u64 failed_start; - err = set_extent_bit(tree, start, end, bits, &failed_start, NULL, - cached, NULL); - if (err == -EEXIST) { + ret = set_extent_bit(tree, start, end, bits, &failed_start, NULL, cached, NULL); + if (ret == -EEXIST) { if (failed_start > start) btrfs_clear_extent_bit(tree, start, failed_start - 1, bits, cached); @@ -1904,21 +1904,21 @@ int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 struct extent_state **cached_state) { struct extent_state *failed_state = NULL; - int err; + int ret; u64 failed_start; - err = set_extent_bit(tree, start, end, bits, &failed_start, + ret = set_extent_bit(tree, start, end, bits, &failed_start, &failed_state, cached_state, NULL); - while (err == -EEXIST) { + while (ret == -EEXIST) { if (failed_start != start) btrfs_clear_extent_bit(tree, start, failed_start - 1, bits, cached_state); wait_extent_bit(tree, failed_start, end, bits, &failed_state); - err = set_extent_bit(tree, start, end, bits, &failed_start, + ret = set_extent_bit(tree, start, end, bits, &failed_start, &failed_state, cached_state, NULL); } - return err; + return ret; } /* diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index 0a18ca9c59c3..36facca37973 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -19,7 +19,8 @@ enum { ENUM_BIT(EXTENT_DIRTY), ENUM_BIT(EXTENT_LOCKED), ENUM_BIT(EXTENT_DIO_LOCKED), - ENUM_BIT(EXTENT_NEW), + ENUM_BIT(EXTENT_DIRTY_LOG1), + ENUM_BIT(EXTENT_DIRTY_LOG2), ENUM_BIT(EXTENT_DELALLOC), ENUM_BIT(EXTENT_DEFRAG), ENUM_BIT(EXTENT_BOUNDARY), @@ -191,12 +192,6 @@ static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u6 cached, NULL); } -static inline int btrfs_clear_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, u32 bits) -{ - return btrfs_clear_extent_bit(tree, start, end, bits, NULL); -} - int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset); int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cb6128778a83..97d517cdf2df 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -46,7 +46,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *href, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extra_op); static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, struct extent_buffer *leaf, @@ -56,12 +56,12 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, u64 flags, u64 owner, u64 offset, struct btrfs_key *ins, int ref_mod, u64 oref_root); static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op); -static int find_next_key(struct btrfs_path *path, int level, +static int find_next_key(const struct btrfs_path *path, int level, struct btrfs_key *key); -static int block_group_bits(struct btrfs_block_group *cache, u64 bits) +static int block_group_bits(const struct btrfs_block_group *cache, u64 bits) { return (cache->flags & bits) == bits; } @@ -329,7 +329,7 @@ search_again: * is_data == BTRFS_REF_TYPE_ANY, either type is OK. */ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, - struct btrfs_extent_inline_ref *iref, + const struct btrfs_extent_inline_ref *iref, enum btrfs_inline_ref_type is_data) { struct btrfs_fs_info *fs_info = eb->fs_info; @@ -401,16 +401,16 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) return ((u64)high_crc << 31) ^ (u64)low_crc; } -static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, - struct btrfs_extent_data_ref *ref) +static u64 hash_extent_data_ref_item(const struct extent_buffer *leaf, + const struct btrfs_extent_data_ref *ref) { return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), btrfs_extent_data_ref_objectid(leaf, ref), btrfs_extent_data_ref_offset(leaf, ref)); } -static bool match_extent_data_ref(struct extent_buffer *leaf, - struct btrfs_extent_data_ref *ref, +static bool match_extent_data_ref(const struct extent_buffer *leaf, + const struct btrfs_extent_data_ref *ref, u64 root_objectid, u64 owner, u64 offset) { if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || @@ -497,7 +497,7 @@ fail: static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, u64 bytenr) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); @@ -617,13 +617,13 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, return ret; } -static noinline u32 extent_data_ref_count(struct btrfs_path *path, - struct btrfs_extent_inline_ref *iref) +static noinline u32 extent_data_ref_count(const struct btrfs_path *path, + const struct btrfs_extent_inline_ref *iref) { struct btrfs_key key; struct extent_buffer *leaf; - struct btrfs_extent_data_ref *ref1; - struct btrfs_shared_data_ref *ref2; + const struct btrfs_extent_data_ref *ref1; + const struct btrfs_shared_data_ref *ref2; u32 num_refs = 0; int type; @@ -638,10 +638,10 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path, type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); ASSERT(type != BTRFS_REF_TYPE_INVALID); if (type == BTRFS_EXTENT_DATA_REF_KEY) { - ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); + ref1 = (const struct btrfs_extent_data_ref *)(&iref->offset); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else { - ref2 = (struct btrfs_shared_data_ref *)(iref + 1); + ref2 = (const struct btrfs_shared_data_ref *)(iref + 1); num_refs = btrfs_shared_data_ref_count(leaf, ref2); } } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { @@ -684,7 +684,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, u64 bytenr) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); @@ -722,7 +722,7 @@ static inline int extent_ref_type(u64 parent, u64 owner) return type; } -static int find_next_key(struct btrfs_path *path, int level, +static int find_next_key(const struct btrfs_path *path, int level, struct btrfs_key *key) { @@ -1480,7 +1480,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, * */ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { BTRFS_PATH_AUTO_FREE(path); @@ -1522,19 +1522,21 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, btrfs_release_path(path); /* now insert the actual backref */ - if (owner < BTRFS_FIRST_FREE_OBJECTID) + if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = insert_tree_block_ref(trans, path, node, bytenr); - else + if (ret) + btrfs_abort_transaction(trans, ret); + } else { ret = insert_extent_data_ref(trans, path, node, bytenr); - - if (ret) - btrfs_abort_transaction(trans, ret); + if (ret) + btrfs_abort_transaction(trans, ret); + } return ret; } static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_head *href) + const struct btrfs_delayed_ref_head *href) { u64 root = href->owning_root; @@ -1543,7 +1545,7 @@ static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info, * where it has already been unset. */ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE || - !href->is_data || !is_fstree(root)) + !href->is_data || !btrfs_is_fstree(root)) return; btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes, @@ -1552,7 +1554,7 @@ static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info, static int run_delayed_data_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *href, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { @@ -1620,7 +1622,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, } static int run_delayed_extent_op(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_head *head, + const struct btrfs_delayed_ref_head *head, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -1707,7 +1709,7 @@ again: static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *href, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { @@ -1754,7 +1756,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, /* helper function to actually process a single delayed ref entry */ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *href, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { @@ -2998,7 +3000,7 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans, return ret; } - ret = add_to_free_space_tree(trans, bytenr, num_bytes); + ret = btrfs_add_to_free_space_tree(trans, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); return ret; @@ -3079,7 +3081,7 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans, */ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *href, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *info = trans->fs_info; @@ -3649,6 +3651,21 @@ btrfs_release_block_group(struct btrfs_block_group *cache, btrfs_put_block_group(cache); } +static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl, + const struct btrfs_block_group *bg) +{ + if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) + return true; + if (!btrfs_block_group_should_use_size_class(bg)) + return true; + if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) + return true; + if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && + bg->size_class == BTRFS_BG_SZ_NONE) + return true; + return ffe_ctl->size_class == bg->size_class; +} + /* * Helper function for find_free_extent(). * @@ -3670,7 +3687,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg, if (!cluster_bg) goto refill_cluster; if (cluster_bg != bg && (cluster_bg->ro || - !block_group_bits(cluster_bg, ffe_ctl->flags))) + !block_group_bits(cluster_bg, ffe_ctl->flags) || + !find_free_extent_check_size_class(ffe_ctl, cluster_bg))) goto release_cluster; offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, @@ -4227,21 +4245,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, return -ENOSPC; } -static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, - struct btrfs_block_group *bg) -{ - if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) - return true; - if (!btrfs_block_group_should_use_size_class(bg)) - return true; - if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) - return true; - if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && - bg->size_class == BTRFS_BG_SZ_NONE) - return true; - return ffe_ctl->size_class == bg->size_class; -} - static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl, struct btrfs_space_info *space_info, @@ -4782,7 +4785,7 @@ static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, struct btrfs_fs_info *fs_info = trans->fs_info; int ret; - ret = remove_from_free_space_tree(trans, bytenr, num_bytes); + ret = btrfs_remove_from_free_space_tree(trans, bytenr, num_bytes); if (ret) return ret; @@ -4873,7 +4876,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, } static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_node *node, + const struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -4961,7 +4964,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID); - if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root)) + if (btrfs_is_data_reloc_root(root) && btrfs_is_fstree(root->relocation_src_root)) generic_ref.owning_root = root->relocation_src_root; btrfs_init_data_ref(&generic_ref, owner, offset, 0, false); @@ -4983,7 +4986,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, int ret; struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; - struct btrfs_squota_delta delta = { + const struct btrfs_squota_delta delta = { .root = root_objectid, .num_bytes = ins->offset, .generation = trans->transid, @@ -5111,11 +5114,11 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (buf->log_index == 0) btrfs_set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, - EXTENT_DIRTY, NULL); + EXTENT_DIRTY_LOG1, NULL); else btrfs_set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, - EXTENT_NEW, NULL); + EXTENT_DIRTY_LOG2, NULL); } else { buf->log_index = -1; btrfs_set_extent_bit(&trans->transaction->dirty_pages, buf->start, @@ -5552,7 +5555,7 @@ again: goto again; } - exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent); + exists = btrfs_find_delayed_tree_ref(head, btrfs_root_id(root), parent); mutex_unlock(&head->mutex); out: spin_unlock(&delayed_refs->lock); @@ -5872,15 +5875,20 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, if (wc->refs[level] == 1) { if (level == 0) { - if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) + if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { ret = btrfs_dec_ref(trans, root, eb, 1); - else + if (ret) { + btrfs_abort_transaction(trans, ret); + return ret; + } + } else { ret = btrfs_dec_ref(trans, root, eb, 0); - if (ret) { - btrfs_abort_transaction(trans, ret); - return ret; + if (ret) { + btrfs_abort_transaction(trans, ret); + return ret; + } } - if (is_fstree(btrfs_root_id(root))) { + if (btrfs_is_fstree(btrfs_root_id(root))) { ret = btrfs_qgroup_trace_leaf_items(trans, eb); if (ret) { btrfs_err_rl(fs_info, @@ -6341,7 +6349,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, btrfs_assert_tree_write_locked(parent); parent_level = btrfs_header_level(parent); - atomic_inc(&parent->refs); + refcount_inc(&parent->refs); path->nodes[parent_level] = parent; path->slots[parent_level] = btrfs_header_nritems(parent); @@ -6442,7 +6450,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) /* Check if there are any CHUNK_* bits left */ if (start > device->total_bytes) { DEBUG_WARN(); - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", start, end - start + 1, btrfs_dev_name(device), diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h index 72914074c304..82d3a82dc712 100644 --- a/fs/btrfs/extent-tree.h +++ b/fs/btrfs/extent-tree.h @@ -97,7 +97,7 @@ enum btrfs_inline_ref_type { }; int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, - struct btrfs_extent_inline_ref *iref, + const struct btrfs_extent_inline_ref *iref, enum btrfs_inline_ref_type is_data); u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1dc931c4937f..835b0deef9bb 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -75,9 +75,9 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) while (!list_empty(&fs_info->allocated_ebs)) { eb = list_first_entry(&fs_info->allocated_ebs, struct extent_buffer, leak_list); - pr_err( - "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n", - eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, + btrfs_err(fs_info, + "buffer leak start %llu len %u refs %d bflags %lu owner %llu", + eb->start, eb->len, refcount_read(&eb->refs), eb->bflags, btrfs_header_owner(eb)); list_del(&eb->leak_list); WARN_ON_ONCE(1); @@ -110,6 +110,7 @@ struct btrfs_bio_ctrl { * This is to avoid touching ranges covered by compression/inline. */ unsigned long submit_bitmap; + struct readahead_control *ractl; }; static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) @@ -266,8 +267,7 @@ static noinline int lock_delalloc_folios(struct inode *inode, goto out; } range_start = max_t(u64, folio_pos(folio), start); - range_len = min_t(u64, folio_pos(folio) + folio_size(folio), - end + 1) - range_start; + range_len = min_t(u64, folio_end(folio), end + 1) - range_start; btrfs_folio_set_lock(fs_info, folio, range_start, range_len); processed_end = range_start + range_len - 1; @@ -321,7 +321,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, ASSERT(orig_end > orig_start); /* The range should at least cover part of the folio */ - ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) || + ASSERT(!(orig_start >= folio_end(locked_folio) || orig_end <= folio_pos(locked_folio))); again: /* step one, find a bunch of delalloc bytes starting at start */ @@ -419,7 +419,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); ASSERT(folio_pos(folio) <= start && - start + len <= folio_pos(folio) + folio_size(folio)); + start + len <= folio_end(folio)); if (uptodate && btrfs_verify_folio(folio, start, len)) btrfs_folio_set_uptodate(fs_info, folio, start, len); @@ -782,7 +782,7 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl, static int attach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio, - struct btrfs_subpage *prealloc) + struct btrfs_folio_state *prealloc) { struct btrfs_fs_info *fs_info = eb->fs_info; int ret = 0; @@ -806,7 +806,7 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb, /* Already mapped, just free prealloc */ if (folio_test_private(folio)) { - btrfs_free_subpage(prealloc); + btrfs_free_folio_state(prealloc); return 0; } @@ -815,7 +815,7 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb, folio_attach_private(folio, prealloc); else /* Do new allocation to attach subpage */ - ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA); + ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); return ret; } @@ -831,7 +831,7 @@ int set_folio_extent_mapped(struct folio *folio) fs_info = folio_to_fs_info(folio); if (btrfs_is_subpage(fs_info, folio)) - return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA); + return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA); folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE); return 0; @@ -848,7 +848,7 @@ void clear_folio_extent_mapped(struct folio *folio) fs_info = folio_to_fs_info(folio); if (btrfs_is_subpage(fs_info, folio)) - return btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA); + return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA); folio_detach_private(folio); } @@ -882,6 +882,25 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode, return em; } + +static void btrfs_readahead_expand(struct readahead_control *ractl, + const struct extent_map *em) +{ + const u64 ra_pos = readahead_pos(ractl); + const u64 ra_end = ra_pos + readahead_length(ractl); + const u64 em_end = em->start + em->ram_bytes; + + /* No expansion for holes and inline extents. */ + if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE) + return; + + ASSERT(em_end >= ra_pos, + "extent_map %llu %llu ends before current readahead position %llu", + em->start, em->len, ra_pos); + if (em_end > ra_end) + readahead_expand(ractl, ra_pos, em_end - ra_pos); +} + /* * basic readpage implementation. Locked extent state structs are inserted * into the tree that are removed when the IO is done (by the end_io @@ -945,6 +964,16 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, compress_type = btrfs_extent_map_compression(em); + /* + * Only expand readahead for extents which are already creating + * the pages anyway in add_ra_bio_pages, which is compressed + * extents in the non subpage case. + */ + if (bio_ctrl->ractl && + !btrfs_is_subpage(fs_info, folio) && + compress_type != BTRFS_COMPRESS_NONE) + btrfs_readahead_expand(bio_ctrl->ractl, em); + if (compress_type != BTRFS_COMPRESS_NONE) disk_bytenr = em->disk_bytenr; else @@ -1086,7 +1115,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode, * finished our folio read and unlocked the folio. */ if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) { - u64 range_len = min(folio_pos(folio) + folio_size(folio), + u64 range_len = min(folio_end(folio), ordered->file_offset + ordered->num_bytes) - cur; ret = true; @@ -1108,7 +1137,7 @@ static bool can_skip_one_ordered_range(struct btrfs_inode *inode, * So we return true and update @next_ret to the OE/folio boundary. */ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { - u64 range_len = min(folio_pos(folio) + folio_size(folio), + u64 range_len = min(folio_end(folio), ordered->file_offset + ordered->num_bytes) - cur; /* @@ -1663,7 +1692,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl int ret; size_t pg_offset; loff_t i_size = i_size_read(&inode->vfs_inode); - unsigned long end_index = i_size >> PAGE_SHIFT; + const pgoff_t end_index = i_size >> PAGE_SHIFT; const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc); @@ -1704,7 +1733,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); btrfs_err_rl(fs_info, "root %lld ino %llu folio %llu is marked dirty without notifying the fs", - inode->root->root_key.objectid, + btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(folio)); ret = -EUCLEAN; goto done; @@ -1774,7 +1803,7 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e */ spin_lock(&eb->refs_lock); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { - XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); @@ -1874,7 +1903,7 @@ static void set_btree_ioerr(struct extent_buffer *eb) static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark) { struct btrfs_fs_info *fs_info = eb->fs_info; - XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; xas_lock_irqsave(&xas, flags); @@ -1886,7 +1915,7 @@ static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark) static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark) { struct btrfs_fs_info *fs_info = eb->fs_info; - XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; xas_lock_irqsave(&xas, flags); @@ -1961,7 +1990,7 @@ retry: if (!eb) return NULL; - if (!atomic_inc_not_zero(&eb->refs)) { + if (!refcount_inc_not_zero(&eb->refs)) { xas_reset(xas); goto retry; } @@ -1986,7 +2015,7 @@ static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info, rcu_read_lock(); while ((eb = find_get_eb(&xas, end, tag)) != NULL) { if (!eb_batch_add(batch, eb)) { - *start = ((eb->start + eb->len) >> fs_info->sectorsize_bits); + *start = ((eb->start + eb->len) >> fs_info->nodesize_bits); goto out; } } @@ -2008,11 +2037,11 @@ static struct extent_buffer *find_extent_buffer_nolock( struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb; - unsigned long index = (start >> fs_info->sectorsize_bits); + unsigned long index = (start >> fs_info->nodesize_bits); rcu_read_lock(); eb = xa_load(&fs_info->buffer_tree, index); - if (eb && !atomic_inc_not_zero(&eb->refs)) + if (eb && !refcount_inc_not_zero(&eb->refs)) eb = NULL; rcu_read_unlock(); return eb; @@ -2031,10 +2060,7 @@ static void end_bbio_meta_write(struct btrfs_bio *bbio) } buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK); - clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); - smp_mb__after_atomic(); - wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); - + clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); bio_put(&bbio->bio); } @@ -2085,7 +2111,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); - u32 range_len = min_t(u64, folio_pos(folio) + folio_size(folio), + u32 range_len = min_t(u64, folio_end(folio), eb->start + eb->len) - range_start; folio_lock(folio); @@ -2114,8 +2140,8 @@ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { struct eb_batch batch; - unsigned long start_index = (start >> fs_info->sectorsize_bits); - unsigned long end_index = (end >> fs_info->sectorsize_bits); + unsigned long start_index = (start >> fs_info->nodesize_bits); + unsigned long end_index = (end >> fs_info->nodesize_bits); eb_batch_init(&batch); while (start_index <= end_index) { @@ -2151,7 +2177,7 @@ int btree_write_cache_pages(struct address_space *mapping, eb_batch_init(&batch); if (wbc->range_cyclic) { - index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->sectorsize_bits); + index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits); end = -1; /* @@ -2160,8 +2186,8 @@ int btree_write_cache_pages(struct address_space *mapping, */ scanned = (index == 0); } else { - index = (wbc->range_start >> fs_info->sectorsize_bits); - end = (wbc->range_end >> fs_info->sectorsize_bits); + index = (wbc->range_start >> fs_info->nodesize_bits); + end = (wbc->range_end >> fs_info->nodesize_bits); scanned = 1; } @@ -2489,7 +2515,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f continue; } - cur_end = min_t(u64, folio_pos(folio) + folio_size(folio) - 1, end); + cur_end = min_t(u64, folio_end(folio) - 1, end); cur_len = cur_end + 1 - cur; ASSERT(folio_test_locked(folio)); @@ -2541,7 +2567,10 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb void btrfs_readahead(struct readahead_control *rac) { - struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD }; + struct btrfs_bio_ctrl bio_ctrl = { + .opf = REQ_OP_READ | REQ_RAHEAD, + .ractl = rac + }; struct folio *folio; struct btrfs_inode *inode = BTRFS_I(rac->mapping->host); const u64 start = readahead_pos(rac); @@ -2731,13 +2760,13 @@ static int extent_buffer_under_io(const struct extent_buffer *eb) static bool folio_range_has_eb(struct folio *folio) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; lockdep_assert_held(&folio->mapping->i_private_lock); if (folio_test_private(folio)) { - subpage = folio_get_private(folio); - if (atomic_read(&subpage->eb_refs)) + bfs = folio_get_private(folio); + if (atomic_read(&bfs->eb_refs)) return true; } return false; @@ -2787,7 +2816,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo * attached to one dummy eb, no sharing. */ if (!mapped) { - btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA); + btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); return; } @@ -2798,7 +2827,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo * page range and no unfinished IO. */ if (!folio_range_has_eb(folio)) - btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA); + btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); spin_unlock(&mapping->i_private_lock); } @@ -2842,7 +2871,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info btrfs_leak_debug_add_eb(eb); spin_lock_init(&eb->refs_lock); - atomic_set(&eb->refs, 1); + refcount_set(&eb->refs, 1); ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE); @@ -2975,13 +3004,13 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) * once io is initiated, TREE_REF can no longer be cleared, so that is * the moment at which any such race is best fixed. */ - refs = atomic_read(&eb->refs); + refs = refcount_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) return; spin_lock(&eb->refs_lock); if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) - atomic_inc(&eb->refs); + refcount_inc(&eb->refs); spin_unlock(&eb->refs_lock); } @@ -3038,7 +3067,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, eb->fs_info = fs_info; again: xa_lock_irq(&fs_info->buffer_tree); - exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->sectorsize_bits, + exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits, NULL, eb, GFP_NOFS); if (xa_is_err(exists)) { ret = xa_err(exists); @@ -3047,7 +3076,7 @@ again: return ERR_PTR(ret); } if (exists) { - if (!atomic_inc_not_zero(&exists->refs)) { + if (!refcount_inc_not_zero(&exists->refs)) { /* The extent buffer is being freed, retry. */ xa_unlock_irq(&fs_info->buffer_tree); goto again; @@ -3092,7 +3121,7 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info, * just overwrite folio private. */ exists = folio_get_private(folio); - if (atomic_inc_not_zero(&exists->refs)) + if (refcount_inc_not_zero(&exists->refs)) return exists; WARN_ON(folio_test_dirty(folio)); @@ -3141,13 +3170,13 @@ static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start) * The caller needs to free the existing folios and retry using the same order. */ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, - struct btrfs_subpage *prealloc, + struct btrfs_folio_state *prealloc, struct extent_buffer **found_eb_ret) { struct btrfs_fs_info *fs_info = eb->fs_info; struct address_space *mapping = fs_info->btree_inode->i_mapping; - const unsigned long index = eb->start >> PAGE_SHIFT; + const pgoff_t index = eb->start >> PAGE_SHIFT; struct folio *existing_folio; int ret; @@ -3224,7 +3253,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, int attached = 0; struct extent_buffer *eb; struct extent_buffer *existing_eb = NULL; - struct btrfs_subpage *prealloc = NULL; + struct btrfs_folio_state *prealloc = NULL; u64 lockdep_owner = owner_root; bool page_contig = true; int uptodate = 1; @@ -3269,7 +3298,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, * manually if we exit earlier. */ if (btrfs_meta_is_subpage(fs_info)) { - prealloc = btrfs_alloc_subpage(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA); + prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA); if (IS_ERR(prealloc)) { ret = PTR_ERR(prealloc); goto out; @@ -3280,7 +3309,7 @@ reallocate: /* Allocate all pages first. */ ret = alloc_eb_folio_array(eb, true); if (ret < 0) { - btrfs_free_subpage(prealloc); + btrfs_free_folio_state(prealloc); goto out; } @@ -3354,7 +3383,7 @@ reallocate: again: xa_lock_irq(&fs_info->buffer_tree); existing_eb = __xa_cmpxchg(&fs_info->buffer_tree, - start >> fs_info->sectorsize_bits, NULL, eb, + start >> fs_info->nodesize_bits, NULL, eb, GFP_NOFS); if (xa_is_err(existing_eb)) { ret = xa_err(existing_eb); @@ -3362,7 +3391,7 @@ again: goto out; } if (existing_eb) { - if (!atomic_inc_not_zero(&existing_eb->refs)) { + if (!refcount_inc_not_zero(&existing_eb->refs)) { xa_unlock_irq(&fs_info->buffer_tree); goto again; } @@ -3391,7 +3420,7 @@ again: return eb; out: - WARN_ON(!atomic_dec_and_test(&eb->refs)); + WARN_ON(!refcount_dec_and_test(&eb->refs)); /* * Any attached folios need to be detached before we unlock them. This @@ -3437,8 +3466,7 @@ static int release_extent_buffer(struct extent_buffer *eb) { lockdep_assert_held(&eb->refs_lock); - WARN_ON(atomic_read(&eb->refs) == 0); - if (atomic_dec_and_test(&eb->refs)) { + if (refcount_dec_and_test(&eb->refs)) { struct btrfs_fs_info *fs_info = eb->fs_info; spin_unlock(&eb->refs_lock); @@ -3458,7 +3486,7 @@ static int release_extent_buffer(struct extent_buffer *eb) * in this case. */ xa_cmpxchg_irq(&fs_info->buffer_tree, - eb->start >> fs_info->sectorsize_bits, eb, NULL, + eb->start >> fs_info->nodesize_bits, eb, NULL, GFP_ATOMIC); btrfs_leak_debug_del_eb(eb); @@ -3484,22 +3512,26 @@ void free_extent_buffer(struct extent_buffer *eb) if (!eb) return; - refs = atomic_read(&eb->refs); + refs = refcount_read(&eb->refs); while (1) { - if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) - || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && - refs == 1)) + if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) { + if (refs == 1) + break; + } else if (refs <= 3) { break; - if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) + } + + /* Optimization to avoid locking eb->refs_lock. */ + if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1)) return; } spin_lock(&eb->refs_lock); - if (atomic_read(&eb->refs) == 2 && + if (refcount_read(&eb->refs) == 2 && test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && !extent_buffer_under_io(eb) && test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) - atomic_dec(&eb->refs); + refcount_dec(&eb->refs); /* * I know this is terrible, but it's temporary until we stop tracking @@ -3516,9 +3548,9 @@ void free_extent_buffer_stale(struct extent_buffer *eb) spin_lock(&eb->refs_lock); set_bit(EXTENT_BUFFER_STALE, &eb->bflags); - if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && + if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) - atomic_dec(&eb->refs); + refcount_dec(&eb->refs); release_extent_buffer(eb); } @@ -3576,7 +3608,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans, btree_clear_folio_dirty_tag(folio); folio_unlock(folio); } - WARN_ON(atomic_read(&eb->refs) == 0); + WARN_ON(refcount_read(&eb->refs) == 0); } void set_extent_buffer_dirty(struct extent_buffer *eb) @@ -3587,7 +3619,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb) was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); - WARN_ON(atomic_read(&eb->refs) == 0); + WARN_ON(refcount_read(&eb->refs) == 0); WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)); @@ -3646,9 +3678,7 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) static void clear_extent_buffer_reading(struct extent_buffer *eb) { - clear_bit(EXTENT_BUFFER_READING, &eb->bflags); - smp_mb__after_atomic(); - wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); + clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags); } static void end_bbio_meta_read(struct btrfs_bio *bbio) @@ -3713,7 +3743,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, eb->read_mirror = 0; check_buffer_tree_ref(eb); - atomic_inc(&eb->refs); + refcount_inc(&eb->refs); bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_READ | REQ_META, eb->fs_info, @@ -3725,7 +3755,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); - u32 range_len = min_t(u64, folio_pos(folio) + folio_size(folio), + u32 range_len = min_t(u64, folio_end(folio), eb->start + eb->len) - range_start; bio_add_folio_nofail(&bbio->bio, folio, range_len, @@ -4104,8 +4134,8 @@ static inline void eb_bitmap_offset(const struct extent_buffer *eb, * @start: offset of the bitmap item in the extent buffer * @nr: bit number to test */ -int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, - unsigned long nr) +bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, + unsigned long nr) { unsigned long i; size_t offset; @@ -4296,9 +4326,9 @@ static int try_release_subpage_extent_buffer(struct folio *folio) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); struct extent_buffer *eb; - unsigned long start = (folio_pos(folio) >> fs_info->sectorsize_bits); + unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits); unsigned long index = start; - unsigned long end = index + (PAGE_SIZE >> fs_info->sectorsize_bits) - 1; + unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1; int ret; xa_lock_irq(&fs_info->buffer_tree); @@ -4308,7 +4338,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio) * won't disappear out from under us. */ spin_lock(&eb->refs_lock); - if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { + if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); continue; } @@ -4374,7 +4404,7 @@ int try_release_extent_buffer(struct folio *folio) * this page. */ spin_lock(&eb->refs_lock); - if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { + if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); spin_unlock(&folio->mapping->i_private_lock); return 0; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e36e8d6a00bc..61130786b9a3 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -98,7 +98,7 @@ struct extent_buffer { void *addr; spinlock_t refs_lock; - atomic_t refs; + refcount_t refs; int read_mirror; /* >= 0 if eb belongs to a log tree, -1 otherwise */ s8 log_index; @@ -345,8 +345,8 @@ void memmove_extent_buffer(const struct extent_buffer *dst, unsigned long len); void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, unsigned long len); -int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, - unsigned long pos); +bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, + unsigned long pos); void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len); void extent_buffer_bitmap_clear(const struct extent_buffer *eb, diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 02bfdb976e40..57f52585a6dd 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -84,7 +84,7 @@ static void remove_em(struct btrfs_inode *inode, struct extent_map *em) rb_erase(&em->rb_node, &inode->extent_tree.root); RB_CLEAR_NODE(&em->rb_node); - if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root))) + if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root))) percpu_counter_dec(&fs_info->evictable_extent_maps); } @@ -502,7 +502,7 @@ static int add_extent_mapping(struct btrfs_inode *inode, setup_extent_mapping(inode, em, modified); - if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root))) + if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root))) percpu_counter_inc(&fs_info->evictable_extent_maps); return 0; @@ -1337,7 +1337,7 @@ static void btrfs_extent_map_shrinker_worker(struct work_struct *work) if (!root) continue; - if (is_fstree(btrfs_root_id(root))) + if (btrfs_is_fstree(btrfs_root_id(root))) nr_dropped += btrfs_scan_root(root, &ctx); btrfs_put_root(root); diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c index 43bf0979fd53..7935586a9dbd 100644 --- a/fs/btrfs/fiemap.c +++ b/fs/btrfs/fiemap.c @@ -320,7 +320,7 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p * the cost of allocating a new one. */ ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags)); - atomic_inc(&clone->refs); + refcount_inc(&clone->refs); ret = btrfs_next_leaf(inode->root, path); if (ret != 0) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 54d523d4f421..c09fbc257634 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -427,7 +427,7 @@ int btrfs_lookup_bio_sums(struct btrfs_bio *bbio) memset(csum_dst, 0, csum_size); count = 1; - if (btrfs_root_id(inode->root) == BTRFS_DATA_RELOC_TREE_OBJECTID) { + if (btrfs_is_data_reloc_root(inode->root)) { u64 file_offset = bbio->file_offset + bio_offset; btrfs_set_extent_bit(&inode->io_tree, file_offset, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8ce6f45f45e0..204674934795 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -89,8 +89,7 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos num_bytes = round_up(write_bytes + pos - start_pos, fs_info->sectorsize); ASSERT(num_bytes <= U32_MAX); - ASSERT(folio_pos(folio) <= pos && - folio_pos(folio) + folio_size(folio) >= pos + write_bytes); + ASSERT(folio_pos(folio) <= pos && folio_end(folio) >= pos + write_bytes); end_of_last_block = start_pos + num_bytes - 1; @@ -801,7 +800,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 u64 len) { u64 clamp_start = max_t(u64, pos, folio_pos(folio)); - u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio)); + u64 clamp_end = min_t(u64, pos + len, folio_end(folio)); const u32 blocksize = inode_to_fs_info(inode)->sectorsize; int ret = 0; @@ -857,7 +856,7 @@ static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ loff_t pos, size_t write_bytes, bool nowait) { - unsigned long index = pos >> PAGE_SHIFT; + const pgoff_t index = pos >> PAGE_SHIFT; gfp_t mask = get_prepare_gfp_flags(inode, nowait); fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN) | fgf_set_order(write_bytes); @@ -963,6 +962,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio, * @pos: File offset. * @write_bytes: The length to write, will be updated to the nocow writeable * range. + * @nowait: Indicate if we can block or not (non-blocking IO context). * * This function will flush ordered extents in the range to ensure proper * nocow checks. @@ -971,7 +971,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio, * > 0 If we can nocow, and updates @write_bytes. * 0 If we can't do a nocow write. * -EAGAIN If we can't do a nocow write because snapshoting of the inode's - * root is in progress. + * root is in progress or because we are in a non-blocking IO + * context and need to block (@nowait is true). * < 0 If an error happened. * * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0. @@ -983,8 +984,8 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, struct btrfs_root *root = inode->root; struct extent_state *cached_state = NULL; u64 lockstart, lockend; - u64 num_bytes; - int ret; + u64 cur_offset; + int ret = 0; if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) return 0; @@ -995,7 +996,6 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, lockstart = round_down(pos, fs_info->sectorsize); lockend = round_up(pos + *write_bytes, fs_info->sectorsize) - 1; - num_bytes = lockend - lockstart + 1; if (nowait) { if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend, @@ -1007,14 +1007,36 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, &cached_state); } - ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, nowait); - if (ret <= 0) - btrfs_drew_write_unlock(&root->snapshot_lock); - else - *write_bytes = min_t(size_t, *write_bytes , - num_bytes - pos + lockstart); + + cur_offset = lockstart; + while (cur_offset < lockend) { + u64 num_bytes = lockend - cur_offset + 1; + + ret = can_nocow_extent(inode, cur_offset, &num_bytes, NULL, nowait); + if (ret <= 0) { + /* + * If cur_offset == lockstart it means we haven't found + * any extent against which we can NOCOW, so unlock the + * snapshot lock. + */ + if (cur_offset == lockstart) + btrfs_drew_write_unlock(&root->snapshot_lock); + break; + } + cur_offset += num_bytes; + } + btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); + /* + * cur_offset > lockstart means there's at least a partial range we can + * NOCOW, and that range can cover one or more extents. + */ + if (cur_offset > lockstart) { + *write_bytes = min_t(size_t, *write_bytes, cur_offset - pos); + return 1; + } + return ret; } @@ -1233,8 +1255,8 @@ again: * The reserved range goes beyond the current folio, shrink the reserved * space to the folio boundary. */ - if (reserved_start + reserved_len > folio_pos(folio) + folio_size(folio)) { - const u64 last_block = folio_pos(folio) + folio_size(folio); + if (reserved_start + reserved_len > folio_end(folio)) { + const u64 last_block = folio_end(folio); shrink_reserved_space(inode, *data_reserved, reserved_start, reserved_len, last_block - reserved_start, @@ -1832,9 +1854,9 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; struct folio *folio = page_folio(page); - struct inode *inode = file_inode(vmf->vma->vm_file); - struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_inode *inode = BTRFS_I(file_inode(vmf->vma->vm_file)); + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_io_tree *io_tree = &inode->io_tree; struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; @@ -1842,6 +1864,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) loff_t size; size_t fsize = folio_size(folio); int ret; + bool only_release_metadata = false; u64 reserved_space; u64 page_start; u64 page_end; @@ -1849,7 +1872,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) reserved_space = fsize; - sb_start_pagefault(inode->i_sb); + sb_start_pagefault(inode->vfs_inode.i_sb); page_start = folio_pos(folio); page_end = page_start + folio_size(folio) - 1; end = page_end; @@ -1862,20 +1885,43 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) * end up waiting indefinitely to get a lock on the page currently * being processed by btrfs_page_mkwrite() function. */ - ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, - page_start, reserved_space); - if (ret < 0) + ret = btrfs_check_data_free_space(inode, &data_reserved, page_start, + reserved_space, false); + if (ret < 0) { + size_t write_bytes = reserved_space; + + if (btrfs_check_nocow_lock(inode, page_start, &write_bytes, false) <= 0) + goto out_noreserve; + + only_release_metadata = true; + + /* + * Can't write the whole range, there may be shared extents or + * holes in the range, bail out with @only_release_metadata set + * to true so that we unlock the nocow lock before returning the + * error. + */ + if (write_bytes < reserved_space) + goto out_noreserve; + } + ret = btrfs_delalloc_reserve_metadata(inode, reserved_space, + reserved_space, false); + if (ret < 0) { + if (!only_release_metadata) + btrfs_free_reserved_data_space(inode, data_reserved, + page_start, reserved_space); goto out_noreserve; + } ret = file_update_time(vmf->vma->vm_file); if (ret < 0) goto out; again: - down_read(&BTRFS_I(inode)->i_mmap_lock); + down_read(&inode->i_mmap_lock); folio_lock(folio); - size = i_size_read(inode); + size = i_size_read(&inode->vfs_inode); - if ((folio->mapping != inode->i_mapping) || + if ((folio->mapping != inode->vfs_inode.i_mapping) || (page_start >= size)) { /* Page got truncated out from underneath us. */ goto out_unlock; @@ -1893,11 +1939,11 @@ again: * We can't set the delalloc bits if there are pending ordered * extents. Drop our locks and wait for them to finish. */ - ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize); + ordered = btrfs_lookup_ordered_range(inode, page_start, fsize); if (ordered) { btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); folio_unlock(folio); - up_read(&BTRFS_I(inode)->i_mmap_lock); + up_read(&inode->i_mmap_lock); btrfs_start_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); goto again; @@ -1906,10 +1952,14 @@ again: if (folio_contains(folio, (size - 1) >> PAGE_SHIFT)) { reserved_space = round_up(size - page_start, fs_info->sectorsize); if (reserved_space < fsize) { + const u64 to_free = fsize - reserved_space; + end = page_start + reserved_space - 1; - btrfs_delalloc_release_space(BTRFS_I(inode), - data_reserved, end + 1, - fsize - reserved_space, true); + if (only_release_metadata) + btrfs_delalloc_release_metadata(inode, to_free, true); + else + btrfs_delalloc_release_space(inode, data_reserved, + end + 1, to_free, true); } } @@ -1920,12 +1970,11 @@ again: * clear any delalloc bits within this page range since we have to * reserve data&meta space before lock_page() (see above comments). */ - btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, + btrfs_clear_extent_bit(io_tree, page_start, end, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, &cached_state); - ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, - &cached_state); + ret = btrfs_set_extent_delalloc(inode, page_start, end, 0, &cached_state); if (ret < 0) { btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); goto out_unlock; @@ -1944,26 +1993,38 @@ again: btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start); btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start); - btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); + btrfs_set_inode_last_sub_trans(inode); + + if (only_release_metadata) + btrfs_set_extent_bit(io_tree, page_start, end, EXTENT_NORESERVE, + &cached_state); btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); - up_read(&BTRFS_I(inode)->i_mmap_lock); + up_read(&inode->i_mmap_lock); - btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); - sb_end_pagefault(inode->i_sb); + btrfs_delalloc_release_extents(inode, fsize); + if (only_release_metadata) + btrfs_check_nocow_unlock(inode); + sb_end_pagefault(inode->vfs_inode.i_sb); extent_changeset_free(data_reserved); return VM_FAULT_LOCKED; out_unlock: folio_unlock(folio); - up_read(&BTRFS_I(inode)->i_mmap_lock); + up_read(&inode->i_mmap_lock); out: - btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); - btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, - reserved_space, true); + btrfs_delalloc_release_extents(inode, fsize); + if (only_release_metadata) + btrfs_delalloc_release_metadata(inode, reserved_space, true); + else + btrfs_delalloc_release_space(inode, data_reserved, page_start, + reserved_space, true); extent_changeset_free(data_reserved); out_noreserve: - sb_end_pagefault(inode->i_sb); + if (only_release_metadata) + btrfs_check_nocow_unlock(inode); + + sb_end_pagefault(inode->vfs_inode.i_sb); if (ret < 0) return vmf_error(ret); @@ -1978,15 +2039,16 @@ static const struct vm_operations_struct btrfs_file_vm_ops = { .page_mkwrite = btrfs_page_mkwrite, }; -static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) +static int btrfs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *filp = desc->file; struct address_space *mapping = filp->f_mapping; if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(filp); - vma->vm_ops = &btrfs_file_vm_ops; + desc->vm_ops = &btrfs_file_vm_ops; return 0; } @@ -2195,7 +2257,7 @@ static bool check_range_has_page(struct inode *inode, u64 start, u64 end) if (folio->index < start_index) continue; /* A large folio extends beyond the end. Not a target. */ - if (folio->index + folio_nr_pages(folio) > end_index) + if (folio_next_index(folio) > end_index) continue; /* A folio doesn't cover the head/tail index. Found a target. */ ret = true; @@ -2341,7 +2403,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize); struct btrfs_trans_handle *trans = NULL; - struct btrfs_block_rsv *rsv; + struct btrfs_block_rsv rsv; unsigned int rsv_count; u64 cur_offset; u64 len = end - start; @@ -2350,13 +2412,9 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, if (end <= start) return -EINVAL; - rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); - if (!rsv) { - ret = -ENOMEM; - goto out; - } - rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); - rsv->failfast = true; + btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP); + rsv.size = btrfs_calc_insert_metadata_size(fs_info, 1); + rsv.failfast = true; /* * 1 - update the inode @@ -2373,14 +2431,14 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; - goto out_free; + goto out_release; } - ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, + ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv, min_size, false); if (WARN_ON(ret)) goto out_trans; - trans->block_rsv = rsv; + trans->block_rsv = &rsv; cur_offset = start; drop_args.path = path; @@ -2496,10 +2554,10 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, } ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, - rsv, min_size, false); + &rsv, min_size, false); if (WARN_ON(ret)) break; - trans->block_rsv = rsv; + trans->block_rsv = &rsv; cur_offset = drop_args.drop_end; len = end - cur_offset; @@ -2576,16 +2634,15 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, out_trans: if (!trans) - goto out_free; + goto out_release; trans->block_rsv = &fs_info->trans_block_rsv; if (ret) btrfs_end_transaction(trans); else *trans_out = trans; -out_free: - btrfs_free_block_rsv(fs_info, rsv); -out: +out_release: + btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL); return ret; } @@ -3765,7 +3822,7 @@ const struct file_operations btrfs_file_operations = { .splice_read = filemap_splice_read, .write_iter = btrfs_file_write_iter, .splice_write = iter_file_splice_write, - .mmap = btrfs_file_mmap, + .mmap_prepare = btrfs_file_mmap_prepare, .open = btrfs_file_open, .release = btrfs_release_file, .get_unmapped_area = thp_get_unmapped_area, diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4b34ea1f01c2..5d8d1570a5c9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -366,7 +366,7 @@ fail: static void readahead_cache(struct inode *inode) { struct file_ra_state ra; - unsigned long last_index; + pgoff_t last_index; file_ra_state_init(&ra, inode->i_mapping); last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; @@ -3192,7 +3192,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, u64 *max_extent_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - int err; + int ret2; u64 search_start = cluster->window_start; u64 search_bytes = bytes; u64 ret = 0; @@ -3200,8 +3200,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); - if (err) { + ret2 = search_bitmap(ctl, entry, &search_start, &search_bytes, true); + if (ret2) { *max_extent_size = max(get_max_extent_size(entry), *max_extent_size); return 0; diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index a83c268f7f87..eba7f22ae49c 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -35,7 +35,7 @@ static struct btrfs_root *btrfs_free_space_root( return btrfs_global_root(block_group->fs_info, &key); } -void set_free_space_tree_thresholds(struct btrfs_block_group *cache) +void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *cache) { u32 bitmap_range; size_t bitmap_size; @@ -82,22 +82,19 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); if (ret) - goto out; + return ret; leaf = path->nodes[0]; info = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_info); btrfs_set_free_space_extent_count(leaf, info, 0); btrfs_set_free_space_flags(leaf, info, 0); - - ret = 0; -out: btrfs_release_path(path); - return ret; + return 0; } EXPORT_FOR_TESTS -struct btrfs_free_space_info *search_free_space_info( +struct btrfs_free_space_info *btrfs_search_free_space_info( struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, int cow) @@ -201,9 +198,9 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len) } EXPORT_FOR_TESTS -int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path) +int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = btrfs_free_space_root(block_group); @@ -281,7 +278,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, btrfs_release_path(path); } - info = search_free_space_info(trans, block_group, path, 1); + info = btrfs_search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); btrfs_abort_transaction(trans, ret); @@ -290,6 +287,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags |= BTRFS_FREE_SPACE_USING_BITMAPS; + block_group->using_free_space_bitmaps = true; + block_group->using_free_space_bitmaps_cached = true; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_release_path(path); @@ -343,9 +342,9 @@ out: } EXPORT_FOR_TESTS -int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path) +int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = btrfs_free_space_root(block_group); @@ -409,12 +408,12 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, data_size = free_space_bitmap_size(fs_info, found_key.offset); - ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); + path->slots[0]--; + ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); read_extent_buffer(leaf, bitmap_cursor, ptr, data_size); nr++; - path->slots[0]--; } else { ASSERT(0); } @@ -428,7 +427,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, btrfs_release_path(path); } - info = search_free_space_info(trans, block_group, path, 1); + info = btrfs_search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); btrfs_abort_transaction(trans, ret); @@ -437,20 +436,22 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; + block_group->using_free_space_bitmaps = false; + block_group->using_free_space_bitmaps_cached = true; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_release_path(path); - nrbits = block_group->length >> block_group->fs_info->sectorsize_bits; + nrbits = block_group->length >> fs_info->sectorsize_bits; start_bit = find_next_bit_le(bitmap, nrbits, 0); while (start_bit < nrbits) { end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); ASSERT(start_bit < end_bit); - key.objectid = start + start_bit * block_group->fs_info->sectorsize; + key.objectid = start + start_bit * fs_info->sectorsize; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; - key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; + key.offset = (end_bit - start_bit) * fs_info->sectorsize; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) { @@ -493,11 +494,10 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans, if (new_extents == 0) return 0; - info = search_free_space_info(trans, block_group, path, 1); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto out; - } + info = btrfs_search_free_space_info(trans, block_group, path, 1); + if (IS_ERR(info)) + return PTR_ERR(info); + flags = btrfs_free_space_flags(path->nodes[0], info); extent_count = btrfs_free_space_extent_count(path->nodes[0], info); @@ -507,19 +507,18 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans, if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && extent_count > block_group->bitmap_high_thresh) { - ret = convert_free_space_to_bitmaps(trans, block_group, path); + ret = btrfs_convert_free_space_to_bitmaps(trans, block_group, path); } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && extent_count < block_group->bitmap_low_thresh) { - ret = convert_free_space_to_extents(trans, block_group, path); + ret = btrfs_convert_free_space_to_extents(trans, block_group, path); } -out: return ret; } EXPORT_FOR_TESTS -int free_space_test_bit(struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 offset) +bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 offset) { struct extent_buffer *leaf; struct btrfs_key key; @@ -537,13 +536,13 @@ int free_space_test_bit(struct btrfs_block_group *block_group, ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); i = div_u64(offset - found_start, block_group->fs_info->sectorsize); - return !!extent_buffer_test_bit(leaf, ptr, i); + return extent_buffer_test_bit(leaf, ptr, i); } -static void free_space_set_bits(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 *start, u64 *size, - int bit) +static void free_space_modify_bits(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 *start, u64 *size, + bool set_bits) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct extent_buffer *leaf; @@ -567,7 +566,7 @@ static void free_space_set_bits(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); first = (*start - found_start) >> fs_info->sectorsize_bits; last = (end - found_start) >> fs_info->sectorsize_bits; - if (bit) + if (set_bits) extent_buffer_bitmap_set(leaf, ptr, first, last - first); else extent_buffer_bitmap_clear(leaf, ptr, first, last - first); @@ -611,13 +610,14 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans, static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, - u64 start, u64 size, int remove) + u64 start, u64 size, bool remove) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_key key; u64 end = start + size; u64 cur_start, cur_size; - int prev_bit, next_bit; + bool prev_bit_set = false; + bool next_bit_set = false; int new_extents; int ret; @@ -634,16 +634,16 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); if (ret) - goto out; + return ret; - prev_bit = free_space_test_bit(block_group, path, prev_block); + prev_bit_set = btrfs_free_space_test_bit(block_group, path, prev_block); /* The previous block may have been in the previous bitmap. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (start >= key.objectid + key.offset) { ret = free_space_next_bitmap(trans, root, path); if (ret) - goto out; + return ret; } } else { key.objectid = start; @@ -652,9 +652,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); if (ret) - goto out; - - prev_bit = -1; + return ret; } /* @@ -664,13 +662,13 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, cur_start = start; cur_size = size; while (1) { - free_space_set_bits(trans, block_group, path, &cur_start, &cur_size, - !remove); + free_space_modify_bits(trans, block_group, path, &cur_start, + &cur_size, !remove); if (cur_size == 0) break; ret = free_space_next_bitmap(trans, root, path); if (ret) - goto out; + return ret; } /* @@ -683,42 +681,36 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, if (end >= key.objectid + key.offset) { ret = free_space_next_bitmap(trans, root, path); if (ret) - goto out; + return ret; } - next_bit = free_space_test_bit(block_group, path, end); - } else { - next_bit = -1; + next_bit_set = btrfs_free_space_test_bit(block_group, path, end); } if (remove) { new_extents = -1; - if (prev_bit == 1) { + if (prev_bit_set) { /* Leftover on the left. */ new_extents++; } - if (next_bit == 1) { + if (next_bit_set) { /* Leftover on the right. */ new_extents++; } } else { new_extents = 1; - if (prev_bit == 1) { + if (prev_bit_set) { /* Merging with neighbor on the left. */ new_extents--; } - if (next_bit == 1) { + if (next_bit_set) { /* Merging with neighbor on the right. */ new_extents--; } } btrfs_release_path(path); - ret = update_free_space_extent_count(trans, block_group, path, - new_extents); - -out: - return ret; + return update_free_space_extent_count(trans, block_group, path, new_extents); } static int remove_free_space_extent(struct btrfs_trans_handle *trans, @@ -739,7 +731,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans, ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) - goto out; + return ret; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -771,7 +763,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans, /* Delete the existing key (cases 1-4). */ ret = btrfs_del_item(trans, root, path); if (ret) - goto out; + return ret; /* Add a key for leftovers at the beginning (cases 3 and 4). */ if (start > found_start) { @@ -782,7 +774,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans, btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) - goto out; + return ret; new_extents++; } @@ -795,50 +787,58 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans, btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) - goto out; + return ret; new_extents++; } btrfs_release_path(path); - ret = update_free_space_extent_count(trans, block_group, path, - new_extents); - -out: - return ret; + return update_free_space_extent_count(trans, block_group, path, new_extents); } -EXPORT_FOR_TESTS -int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 start, u64 size) +static int using_bitmaps(struct btrfs_block_group *bg, struct btrfs_path *path) { struct btrfs_free_space_info *info; u32 flags; - int ret; - if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) { - ret = __add_block_group_free_space(trans, block_group, path); - if (ret) - return ret; - } + if (bg->using_free_space_bitmaps_cached) + return bg->using_free_space_bitmaps; - info = search_free_space_info(NULL, block_group, path, 0); + info = btrfs_search_free_space_info(NULL, bg, path, 0); if (IS_ERR(info)) return PTR_ERR(info); flags = btrfs_free_space_flags(path->nodes[0], info); btrfs_release_path(path); - if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { + bg->using_free_space_bitmaps = (flags & BTRFS_FREE_SPACE_USING_BITMAPS); + bg->using_free_space_bitmaps_cached = true; + + return bg->using_free_space_bitmaps; +} + +EXPORT_FOR_TESTS +int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 start, u64 size) +{ + int ret; + + ret = __add_block_group_free_space(trans, block_group, path); + if (ret) + return ret; + + ret = using_bitmaps(block_group, path); + if (ret < 0) + return ret; + + if (ret) return modify_free_space_bitmap(trans, block_group, path, - start, size, 1); - } else { - return remove_free_space_extent(trans, block_group, path, - start, size); - } + start, size, true); + + return remove_free_space_extent(trans, block_group, path, start, size); } -int remove_from_free_space_tree(struct btrfs_trans_handle *trans, - u64 start, u64 size) +int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans, + u64 start, u64 size) { struct btrfs_block_group *block_group; struct btrfs_path *path; @@ -863,8 +863,7 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, } mutex_lock(&block_group->free_space_lock); - ret = __remove_from_free_space_tree(trans, block_group, path, start, - size); + ret = __btrfs_remove_from_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); if (ret) btrfs_abort_transaction(trans, ret); @@ -918,7 +917,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) - goto out; + return ret; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -941,7 +940,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, if (found_end == start) { ret = btrfs_del_item(trans, root, path); if (ret) - goto out; + return ret; new_key.objectid = found_start; new_key.offset += key.offset; new_extents--; @@ -958,7 +957,7 @@ right: ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) - goto out; + return ret; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -982,7 +981,7 @@ right: if (found_start == end) { ret = btrfs_del_item(trans, root, path); if (ret) - goto out; + return ret; new_key.offset += key.offset; new_extents--; } @@ -992,48 +991,36 @@ insert: /* Insert the new key (cases 1-4). */ ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); if (ret) - goto out; + return ret; btrfs_release_path(path); - ret = update_free_space_extent_count(trans, block_group, path, - new_extents); - -out: - return ret; + return update_free_space_extent_count(trans, block_group, path, new_extents); } EXPORT_FOR_TESTS -int __add_to_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 start, u64 size) +int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 start, u64 size) { - struct btrfs_free_space_info *info; - u32 flags; int ret; - if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) { - ret = __add_block_group_free_space(trans, block_group, path); - if (ret) - return ret; - } + ret = __add_block_group_free_space(trans, block_group, path); + if (ret) + return ret; - info = search_free_space_info(NULL, block_group, path, 0); - if (IS_ERR(info)) - return PTR_ERR(info); - flags = btrfs_free_space_flags(path->nodes[0], info); - btrfs_release_path(path); + ret = using_bitmaps(block_group, path); + if (ret < 0) + return ret; - if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { + if (ret) return modify_free_space_bitmap(trans, block_group, path, - start, size, 0); - } else { - return add_free_space_extent(trans, block_group, path, start, - size); - } + start, size, false); + + return add_free_space_extent(trans, block_group, path, start, size); } -int add_to_free_space_tree(struct btrfs_trans_handle *trans, - u64 start, u64 size) +int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans, + u64 start, u64 size) { struct btrfs_block_group *block_group; struct btrfs_path *path; @@ -1058,7 +1045,7 @@ int add_to_free_space_tree(struct btrfs_trans_handle *trans, } mutex_lock(&block_group->free_space_lock); - ret = __add_to_free_space_tree(trans, block_group, path, start, size); + ret = __btrfs_add_to_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); if (ret) btrfs_abort_transaction(trans, ret); @@ -1138,11 +1125,11 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, break; if (start < key.objectid) { - ret = __add_to_free_space_tree(trans, - block_group, - path2, start, - key.objectid - - start); + ret = __btrfs_add_to_free_space_tree(trans, + block_group, + path2, start, + key.objectid - + start); if (ret) goto out_locked; } @@ -1161,8 +1148,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, goto out_locked; } if (start < end) { - ret = __add_to_free_space_tree(trans, block_group, path2, - start, end - start); + ret = __btrfs_add_to_free_space_tree(trans, block_group, path2, + start, end - start); if (ret) goto out_locked; } @@ -1403,9 +1390,12 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) { + bool own_path = false; int ret; - clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags); + if (!test_and_clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, + &block_group->runtime_flags)) + return 0; /* * While rebuilding the free space tree we may allocate new metadata @@ -1430,47 +1420,49 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, */ set_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED, &block_group->runtime_flags); + if (!path) { + path = btrfs_alloc_path(); + if (!path) { + btrfs_abort_transaction(trans, -ENOMEM); + return -ENOMEM; + } + own_path = true; + } + ret = add_new_free_space_info(trans, block_group, path); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = __btrfs_add_to_free_space_tree(trans, block_group, path, + block_group->start, block_group->length); if (ret) - return ret; + btrfs_abort_transaction(trans, ret); - return __add_to_free_space_tree(trans, block_group, path, - block_group->start, - block_group->length); +out: + if (own_path) + btrfs_free_path(path); + + return ret; } -int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group) +int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group) { - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_path *path = NULL; - int ret = 0; + int ret; - if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) + if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) return 0; mutex_lock(&block_group->free_space_lock); - if (!test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) - goto out; - - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - - ret = __add_block_group_free_space(trans, block_group, path); - -out: - btrfs_free_path(path); + ret = __add_block_group_free_space(trans, block_group, NULL); mutex_unlock(&block_group->free_space_lock); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } -int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group) +int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_path *path; @@ -1491,6 +1483,7 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } @@ -1503,8 +1496,10 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } leaf = path->nodes[0]; nr = 0; @@ -1532,16 +1527,16 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } btrfs_release_path(path); } ret = 0; out: btrfs_free_path(path); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } @@ -1553,7 +1548,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; - int prev_bit = 0, bit; + bool prev_bit_set = false; /* Initialize to silence GCC. */ u64 extent_start = 0; u64 end, offset; @@ -1570,7 +1565,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, while (1) { ret = btrfs_next_item(root, path); if (ret < 0) - goto out; + return ret; if (ret) break; @@ -1584,10 +1579,12 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, offset = key.objectid; while (offset < key.objectid + key.offset) { - bit = free_space_test_bit(block_group, path, offset); - if (prev_bit == 0 && bit == 1) { + bool bit_set; + + bit_set = btrfs_free_space_test_bit(block_group, path, offset); + if (!prev_bit_set && bit_set) { extent_start = offset; - } else if (prev_bit == 1 && bit == 0) { + } else if (prev_bit_set && !bit_set) { u64 space_added; ret = btrfs_add_new_free_space(block_group, @@ -1595,7 +1592,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, offset, &space_added); if (ret) - goto out; + return ret; total_found += space_added; if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; @@ -1603,14 +1600,14 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, } extent_count++; } - prev_bit = bit; + prev_bit_set = bit_set; offset += fs_info->sectorsize; } } - if (prev_bit == 1) { + if (prev_bit_set) { ret = btrfs_add_new_free_space(block_group, extent_start, end, NULL); if (ret) - goto out; + return ret; extent_count++; } @@ -1620,13 +1617,10 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, block_group->start, extent_count, expected_extent_count); DEBUG_WARN(); - ret = -EIO; - goto out; + return -EIO; } - ret = 0; -out: - return ret; + return 0; } static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, @@ -1653,7 +1647,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, ret = btrfs_next_item(root, path); if (ret < 0) - goto out; + return ret; if (ret) break; @@ -1669,7 +1663,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, key.objectid + key.offset, &space_added); if (ret) - goto out; + return ret; total_found += space_added; if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; @@ -1684,16 +1678,13 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, block_group->start, extent_count, expected_extent_count); DEBUG_WARN(); - ret = -EIO; - goto out; + return -EIO; } - ret = 0; -out: - return ret; + return 0; } -int load_free_space_tree(struct btrfs_caching_control *caching_ctl) +int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl) { struct btrfs_block_group *block_group; struct btrfs_free_space_info *info; @@ -1714,7 +1705,7 @@ int load_free_space_tree(struct btrfs_caching_control *caching_ctl) path->search_commit_root = 1; path->reada = READA_FORWARD; - info = search_free_space_info(NULL, block_group, path, 0); + info = btrfs_search_free_space_info(NULL, block_group, path, 0); if (IS_ERR(info)) return PTR_ERR(info); diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h index e6c6d6f4f221..3d9a5d4477fc 100644 --- a/fs/btrfs/free-space-tree.h +++ b/fs/btrfs/free-space-tree.h @@ -22,39 +22,39 @@ struct btrfs_trans_handle; #define BTRFS_FREE_SPACE_BITMAP_SIZE 256 #define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE) -void set_free_space_tree_thresholds(struct btrfs_block_group *block_group); +void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *block_group); int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info); -int load_free_space_tree(struct btrfs_caching_control *caching_ctl); -int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group); -int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group); -int add_to_free_space_tree(struct btrfs_trans_handle *trans, - u64 start, u64 size); -int remove_from_free_space_tree(struct btrfs_trans_handle *trans, - u64 start, u64 size); +int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl); +int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group); +int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group); +int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans, + u64 start, u64 size); +int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans, + u64 start, u64 size); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_free_space_info * -search_free_space_info(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path, int cow); -int __add_to_free_space_tree(struct btrfs_trans_handle *trans, +btrfs_search_free_space_info(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 start, u64 size); -int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 start, u64 size); -int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path); -int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group *block_group, - struct btrfs_path *path); -int free_space_test_bit(struct btrfs_block_group *block_group, - struct btrfs_path *path, u64 offset); + struct btrfs_path *path, int cow); +int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 start, u64 size); +int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 start, u64 size); +int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path); +int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group, + struct btrfs_path *path); +bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group, + struct btrfs_path *path, u64 offset); #endif #endif diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h index 4394de12a767..8cc07cc70b12 100644 --- a/fs/btrfs/fs.h +++ b/fs/btrfs/fs.h @@ -420,6 +420,8 @@ struct btrfs_commit_stats { u64 last_commit_dur; /* The total commit duration in ns */ u64 total_commit_dur; + /* Start of the last critical section in ns. */ + u64 critical_section_start_time; }; struct btrfs_fs_info { @@ -713,8 +715,6 @@ struct btrfs_fs_info { u32 data_chunk_allocations; u32 metadata_ratio; - void *bdev_holder; - /* Private scrub information */ struct mutex scrub_lock; atomic_t scrubs_running; @@ -739,12 +739,6 @@ struct btrfs_fs_info { spinlock_t qgroup_lock; /* - * Used to avoid frequently calling ulist_alloc()/ulist_free() - * when doing qgroup accounting, it must be protected by qgroup_lock. - */ - struct ulist *qgroup_ulist; - - /* * Protect user change for quota operations. If a transaction is needed, * it must be started before locking this lock. */ @@ -779,7 +773,7 @@ struct btrfs_fs_info { struct btrfs_delayed_root *delayed_root; - /* Entries are eb->start / sectorsize */ + /* Entries are eb->start >> nodesize_bits */ struct xarray buffer_tree; /* Next backup root to be overwritten */ @@ -811,6 +805,7 @@ struct btrfs_fs_info { /* Cached block sizes */ u32 nodesize; + u32 nodesize_bits; u32 sectorsize; /* ilog2 of sectorsize, use to avoid 64bit division */ u32 sectorsize_bits; diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index a61c3540d67b..f06cf701ae5a 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -78,13 +78,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( } /* Returns NULL if no extref found */ -struct btrfs_inode_extref * -btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - const struct fscrypt_str *name, - u64 inode_objectid, u64 ref_objectid, int ins_len, - int cow) +struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root, + struct btrfs_path *path, + const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid) { int ret; struct btrfs_key key; @@ -93,7 +90,7 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, key.type = BTRFS_INODE_EXTREF_KEY; key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); - ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ERR_PTR(ret); if (ret > 0) @@ -720,13 +717,12 @@ delete: } out: if (ret >= 0 && pending_del_nr) { - int err; + int ret2; - err = btrfs_del_items(trans, root, path, pending_del_slot, - pending_del_nr); - if (err) { - btrfs_abort_transaction(trans, err); - ret = err; + ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr); + if (ret2) { + btrfs_abort_transaction(trans, ret2); + ret = ret2; } } diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h index c11b97fdccc4..6d9f5ad20646 100644 --- a/fs/btrfs/inode-item.h +++ b/fs/btrfs/inode-item.h @@ -101,13 +101,10 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *location, int mod); -struct btrfs_inode_extref *btrfs_lookup_inode_extref( - struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - const struct fscrypt_str *name, - u64 inode_objectid, u64 ref_objectid, int ins_len, - int cow); +struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root, + struct btrfs_path *path, + const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid); struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, int slot, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fc66872b4c74..b77dd22b8cdb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -308,7 +308,7 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, const u32 csum_size = root->fs_info->csum_size; /* For data reloc tree, it's better to do a backref lookup instead. */ - if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID) + if (btrfs_is_data_reloc_root(root)) return print_data_reloc_error(inode, logical_start, csum, csum_expected, mirror_num); @@ -395,8 +395,8 @@ void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, u64 offset, u64 bytes) { - unsigned long index = offset >> PAGE_SHIFT; - unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; + pgoff_t index = offset >> PAGE_SHIFT; + const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT; struct folio *folio; while (index <= end_index) { @@ -423,18 +423,18 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode); static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, struct btrfs_new_inode_args *args) { - int err; + int ret; if (args->default_acl) { - err = __btrfs_set_acl(trans, args->inode, args->default_acl, + ret = __btrfs_set_acl(trans, args->inode, args->default_acl, ACL_TYPE_DEFAULT); - if (err) - return err; + if (ret) + return ret; } if (args->acl) { - err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); - if (err) - return err; + ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); + if (ret) + return ret; } if (!args->default_acl && !args->acl) cache_no_acl(args->inode); @@ -781,12 +781,15 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, return 0; } + /* Defrag ioctl takes precedence over mount options and properties. */ + if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS) + return 0; + if (BTRFS_COMPRESS_NONE < inode->defrag_compress && + inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) + return 1; /* force compress */ if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) return 1; - /* defrag ioctl */ - if (inode->defrag_compress) - return 1; /* bad compression ratios */ if (inode->flags & BTRFS_INODE_NOCOMPRESS) return 0; @@ -808,12 +811,11 @@ static inline void inode_should_defrag(struct btrfs_inode *inode, static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end) { - unsigned long end_index = end >> PAGE_SHIFT; + const pgoff_t end_index = end >> PAGE_SHIFT; struct folio *folio; int ret = 0; - for (unsigned long index = start >> PAGE_SHIFT; - index <= end_index; index++) { + for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) { folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); if (IS_ERR(folio)) { if (!ret) @@ -943,7 +945,7 @@ again: goto cleanup_and_bail_uncompressed; } - if (inode->defrag_compress) { + if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) { compress_type = inode->defrag_compress; compress_level = inode->defrag_compress_level; } else if (inode->prop_compress) { @@ -1755,7 +1757,8 @@ static int fallback_to_cow(struct btrfs_inode *inode, spin_unlock(&sinfo->lock); if (count > 0) - btrfs_clear_extent_bits(io_tree, start, end, EXTENT_NORESERVE); + btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, + &cached_state); } btrfs_unlock_extent(io_tree, start, end, &cached_state); @@ -2328,8 +2331,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol * The range must cover part of the @locked_folio, or a return of 1 * can confuse the caller. */ - ASSERT(!(end <= folio_pos(locked_folio) || - start >= folio_pos(locked_folio) + folio_size(locked_folio))); + ASSERT(!(end <= folio_pos(locked_folio) || start >= folio_end(locked_folio))); if (should_nocow(inode, start, end)) { ret = run_delalloc_nocow(inode, locked_folio, start, end); @@ -2737,7 +2739,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) struct btrfs_inode *inode = fixup->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; u64 page_start = folio_pos(folio); - u64 page_end = folio_pos(folio) + folio_size(folio) - 1; + u64 page_end = folio_end(folio) - 1; int ret = 0; bool free_delalloc_space = true; @@ -2881,7 +2883,7 @@ int btrfs_writepage_cow_fixup(struct folio *folio) DEBUG_WARN(); btrfs_err_rl(fs_info, "root %lld ino %llu folio %llu is marked dirty without notifying the fs", - BTRFS_I(inode)->root->root_key.objectid, + btrfs_root_id(BTRFS_I(inode)->root), btrfs_ino(BTRFS_I(inode)), folio_pos(folio)); return -EUCLEAN; @@ -3375,8 +3377,8 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, NULL)) { /* Skip the range without csum for data reloc inode */ - btrfs_clear_extent_bits(&inode->io_tree, file_offset, end, - EXTENT_NODATASUM); + btrfs_clear_extent_bit(&inode->io_tree, file_offset, end, + EXTENT_NODATASUM, NULL); return true; } @@ -3946,6 +3948,7 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), &inode->flags, &inode->ro_flags); btrfs_update_inode_mapping_flags(inode); + btrfs_set_inode_mapping_order(inode); cache_index: /* @@ -4078,45 +4081,35 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, struct btrfs_inode_item *item, struct inode *inode) { - struct btrfs_map_token token; u64 flags; - btrfs_init_map_token(&token, leaf); - - btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); - btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); - btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); - btrfs_set_token_inode_mode(&token, item, inode->i_mode); - btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); - - btrfs_set_token_timespec_sec(&token, &item->atime, - inode_get_atime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->atime, - inode_get_atime_nsec(inode)); - - btrfs_set_token_timespec_sec(&token, &item->mtime, - inode_get_mtime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->mtime, - inode_get_mtime_nsec(inode)); - - btrfs_set_token_timespec_sec(&token, &item->ctime, - inode_get_ctime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->ctime, - inode_get_ctime_nsec(inode)); - - btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); - btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec); - - btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); - btrfs_set_token_inode_generation(&token, item, - BTRFS_I(inode)->generation); - btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); - btrfs_set_token_inode_transid(&token, item, trans->transid); - btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); + btrfs_set_inode_uid(leaf, item, i_uid_read(inode)); + btrfs_set_inode_gid(leaf, item, i_gid_read(inode)); + btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); + btrfs_set_inode_mode(leaf, item, inode->i_mode); + btrfs_set_inode_nlink(leaf, item, inode->i_nlink); + + btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode)); + + btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode)); + + btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode)); + + btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec); + btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec); + + btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); + btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); + btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode)); + btrfs_set_inode_transid(leaf, item, trans->transid); + btrfs_set_inode_rdev(leaf, item, inode->i_rdev); flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, BTRFS_I(inode)->ro_flags); - btrfs_set_token_inode_flags(&token, item, flags); - btrfs_set_token_inode_block_group(&token, item, 0); + btrfs_set_inode_flags(leaf, item, flags); + btrfs_set_inode_block_group(leaf, item, 0); } /* @@ -4215,20 +4208,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } + if (!path) + return -ENOMEM; di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); if (IS_ERR_OR_NULL(di)) { - ret = di ? PTR_ERR(di) : -ENOENT; - goto err; + btrfs_free_path(path); + return di ? PTR_ERR(di) : -ENOENT; } ret = btrfs_delete_one_dir_name(trans, root, path, di); + /* + * Down the call chains below we'll also need to allocate a path, so no + * need to hold on to this one for longer than necessary. + */ + btrfs_free_path(path); if (ret) - goto err; - btrfs_release_path(path); + return ret; /* * If we don't have dir index, we have to get it by looking up @@ -4254,7 +4249,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, "failed to delete reference to %.*s, root %llu inode %llu parent %llu", name->len, name->name, btrfs_root_id(root), ino, dir_ino); btrfs_abort_transaction(trans, ret); - goto err; + return ret; } skip_backref: if (rename_ctx) @@ -4263,7 +4258,7 @@ skip_backref: ret = btrfs_delete_delayed_dir_index(trans, dir, index); if (ret) { btrfs_abort_transaction(trans, ret); - goto err; + return ret; } /* @@ -4287,19 +4282,14 @@ skip_backref: * holding. */ btrfs_run_delayed_iput(fs_info, inode); -err: - btrfs_free_path(path); - if (ret) - goto out; btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); inode_inc_iversion(&inode->vfs_inode); inode_set_ctime_current(&inode->vfs_inode); inode_inc_iversion(&dir->vfs_inode); inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); - ret = btrfs_update_inode(trans, dir); -out: - return ret; + + return btrfs_update_inode(trans, dir); } int btrfs_unlink_inode(struct btrfs_trans_handle *trans, @@ -4704,32 +4694,33 @@ out_up_write: return ret; } -static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) +static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry) { - struct inode *inode = d_inode(dentry); - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_inode *dir = BTRFS_I(vfs_dir); + struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); + struct btrfs_fs_info *fs_info = inode->root->fs_info; int ret = 0; struct btrfs_trans_handle *trans; struct fscrypt_name fname; - if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; - if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { btrfs_err(fs_info, "extent tree v2 doesn't support snapshot deletion yet"); return -EOPNOTSUPP; } - return btrfs_delete_subvolume(BTRFS_I(dir), dentry); + return btrfs_delete_subvolume(dir, dentry); } - ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); + ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname); if (ret) return ret; /* This needs to handle no-key deletions later on */ - trans = __unlink_start_trans(BTRFS_I(dir)); + trans = __unlink_start_trans(dir); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_notrans; @@ -4749,23 +4740,22 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) * This is because we can't unlink other roots when replaying the dir * deletes for directory foo. */ - if (BTRFS_I(inode)->last_unlink_trans >= trans->transid) - btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); + if (inode->last_unlink_trans >= trans->transid) + btrfs_record_snapshot_destroy(trans, dir); - if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + ret = btrfs_unlink_subvol(trans, dir, dentry); goto out; } - ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + ret = btrfs_orphan_add(trans, inode); if (ret) goto out; /* now the directory is empty */ - ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), - &fname.disk_name); + ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name); if (!ret) - btrfs_i_size_write(BTRFS_I(inode), 0); + btrfs_i_size_write(inode, 0); out: btrfs_end_transaction(trans); out_notrans: @@ -4821,9 +4811,9 @@ again: */ zero_start = max_t(u64, folio_pos(folio), start); - zero_end = folio_pos(folio) + folio_size(folio) - 1; + zero_end = folio_end(folio); folio_zero_range(folio, zero_start - folio_pos(folio), - zero_end - zero_start + 1); + zero_end - zero_start); out_unlock: folio_unlock(folio); @@ -4861,7 +4851,6 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e pgoff_t index = (offset >> PAGE_SHIFT); struct folio *folio; gfp_t mask = btrfs_alloc_write_mask(mapping); - size_t write_bytes = blocksize; int ret = 0; const bool in_head_block = is_inside_block(offset, round_down(start, blocksize), blocksize); @@ -4913,8 +4902,12 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, blocksize, false); if (ret < 0) { + size_t write_bytes = blocksize; + if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { - /* For nocow case, no need to reserve data space */ + /* For nocow case, no need to reserve data space. */ + ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u", + write_bytes, blocksize); only_release_metadata = true; } else { goto out; @@ -5001,8 +4994,7 @@ again: * not reach disk, it still affects our page caches. */ zero_start = max_t(u64, folio_pos(folio), start); - zero_end = min_t(u64, folio_pos(folio) + folio_size(folio) - 1, - end); + zero_end = min_t(u64, folio_end(folio) - 1, end); } else { zero_start = max_t(u64, block_start, start); zero_end = min_t(u64, block_end, end); @@ -5014,11 +5006,12 @@ again: block_end + 1 - block_start); btrfs_folio_set_dirty(fs_info, folio, block_start, block_end + 1 - block_start); - btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); if (only_release_metadata) btrfs_set_extent_bit(&inode->io_tree, block_start, block_end, - EXTENT_NORESERVE, NULL); + EXTENT_NORESERVE, &cached_state); + + btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); out_unlock: if (ret) { @@ -5256,7 +5249,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); if (ret && inode->i_nlink) { - int err; + int ret2; /* * Truncate failed, so fix up the in-memory size. We @@ -5264,9 +5257,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) * wait for disk_i_size to be stable and then update the * in-memory size to match. */ - err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); - if (err) - return err; + ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); + if (ret2) + return ret2; i_size_write(inode, BTRFS_I(inode)->disk_i_size); } } @@ -5279,31 +5272,31 @@ static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, { struct inode *inode = d_inode(dentry); struct btrfs_root *root = BTRFS_I(inode)->root; - int err; + int ret; if (btrfs_root_readonly(root)) return -EROFS; - err = setattr_prepare(idmap, dentry, attr); - if (err) - return err; + ret = setattr_prepare(idmap, dentry, attr); + if (ret) + return ret; if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { - err = btrfs_setsize(inode, attr); - if (err) - return err; + ret = btrfs_setsize(inode, attr); + if (ret) + return ret; } if (attr->ia_valid) { setattr_copy(idmap, inode, attr); inode_inc_iversion(inode); - err = btrfs_dirty_inode(BTRFS_I(inode)); + ret = btrfs_dirty_inode(BTRFS_I(inode)); - if (!err && attr->ia_valid & ATTR_MODE) - err = posix_acl_chmod(idmap, dentry, inode->i_mode); + if (!ret && attr->ia_valid & ATTR_MODE) + ret = posix_acl_chmod(idmap, dentry, inode->i_mode); } - return err; + return ret; } /* @@ -5437,7 +5430,7 @@ void btrfs_evict_inode(struct inode *inode) struct btrfs_fs_info *fs_info; struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_block_rsv *rsv = NULL; + struct btrfs_block_rsv rsv; int ret; trace_btrfs_inode_evict(inode); @@ -5485,11 +5478,9 @@ void btrfs_evict_inode(struct inode *inode) */ btrfs_kill_delayed_inode_items(BTRFS_I(inode)); - rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); - if (!rsv) - goto out; - rsv->size = btrfs_calc_metadata_size(fs_info, 1); - rsv->failfast = true; + btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP); + rsv.size = btrfs_calc_metadata_size(fs_info, 1); + rsv.failfast = true; btrfs_i_size_write(BTRFS_I(inode), 0); @@ -5501,11 +5492,11 @@ void btrfs_evict_inode(struct inode *inode) .min_type = 0, }; - trans = evict_refill_and_join(root, rsv); + trans = evict_refill_and_join(root, &rsv); if (IS_ERR(trans)) - goto out; + goto out_release; - trans->block_rsv = rsv; + trans->block_rsv = &rsv; ret = btrfs_truncate_inode_items(trans, root, &control); trans->block_rsv = &fs_info->trans_block_rsv; @@ -5517,7 +5508,7 @@ void btrfs_evict_inode(struct inode *inode) */ btrfs_btree_balance_dirty_nodelay(fs_info); if (ret && ret != -ENOSPC && ret != -EAGAIN) - goto out; + goto out_release; else if (!ret) break; } @@ -5531,16 +5522,17 @@ void btrfs_evict_inode(struct inode *inode) * If it turns out that we are dropping too many of these, we might want * to add a mechanism for retrying these after a commit. */ - trans = evict_refill_and_join(root, rsv); + trans = evict_refill_and_join(root, &rsv); if (!IS_ERR(trans)) { - trans->block_rsv = rsv; + trans->block_rsv = &rsv; btrfs_orphan_del(trans, BTRFS_I(inode)); trans->block_rsv = &fs_info->trans_block_rsv; btrfs_end_transaction(trans); } +out_release: + btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL); out: - btrfs_free_block_rsv(fs_info, rsv); /* * If we didn't successfully delete, the orphan item will still be in * the tree and we'll retry on the next mount. Again, we might also want @@ -6173,8 +6165,7 @@ again: if (ret) goto nopos; - ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); - if (ret) + if (btrfs_readdir_delayed_dir_index(ctx, &ins_list)) goto nopos; /* @@ -6467,6 +6458,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | BTRFS_INODE_NODATASUM; btrfs_update_inode_mapping_flags(BTRFS_I(inode)); + btrfs_set_inode_mapping_order(BTRFS_I(inode)); } ret = btrfs_insert_inode_locked(inode); @@ -6610,13 +6602,17 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, if (args->orphan) { ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto discard; + } } else { ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 0, BTRFS_I(inode)->dir_index); - } - if (ret) { - btrfs_abort_transaction(trans, ret); - goto discard; + if (ret) { + btrfs_abort_transaction(trans, ret); + goto discard; + } } return 0; @@ -6703,20 +6699,18 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, fail_dir_item: if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { u64 local_index; - int err; - err = btrfs_del_root_ref(trans, key.objectid, - btrfs_root_id(root), parent_ino, - &local_index, name); - if (err) - btrfs_abort_transaction(trans, err); + int ret2; + + ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root), + parent_ino, &local_index, name); + if (ret2) + btrfs_abort_transaction(trans, ret2); } else if (add_backref) { - u64 local_index; - int err; + int ret2; - err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, - &local_index); - if (err) - btrfs_abort_transaction(trans, err); + ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL); + if (ret2) + btrfs_abort_transaction(trans, ret2); } /* Return the original error code */ @@ -6735,20 +6729,20 @@ static int btrfs_create_common(struct inode *dir, struct dentry *dentry, }; unsigned int trans_num_items; struct btrfs_trans_handle *trans; - int err; + int ret; - err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); - if (err) + ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (ret) goto out_inode; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); goto out_new_inode_args; } - err = btrfs_create_new_inode(trans, &new_inode_args); - if (!err) + ret = btrfs_create_new_inode(trans, &new_inode_args); + if (!ret) d_instantiate_new(dentry, inode); btrfs_end_transaction(trans); @@ -6756,9 +6750,9 @@ static int btrfs_create_common(struct inode *dir, struct dentry *dentry, out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); out_inode: - if (err) + if (ret) iput(inode); - return err; + return ret; } static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, @@ -6799,7 +6793,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct fscrypt_name fname; u64 index; - int err; + int ret; int drop_inode = 0; /* do not allow sys_link's with other subvols of the same device */ @@ -6809,12 +6803,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink >= BTRFS_LINK_MAX) return -EMLINK; - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); - if (err) + ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); + if (ret) goto fail; - err = btrfs_set_inode_index(BTRFS_I(dir), &index); - if (err) + ret = btrfs_set_inode_index(BTRFS_I(dir), &index); + if (ret) goto fail; /* @@ -6825,7 +6819,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, */ trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); trans = NULL; goto fail; } @@ -6838,24 +6832,24 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, ihold(inode); set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); - err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), &fname.disk_name, 1, index); - if (err) { + if (ret) { drop_inode = 1; } else { struct dentry *parent = dentry->d_parent; - err = btrfs_update_inode(trans, BTRFS_I(inode)); - if (err) + ret = btrfs_update_inode(trans, BTRFS_I(inode)); + if (ret) goto fail; if (inode->i_nlink == 1) { /* * If new hard link count is 1, it's a file created * with open(2) O_TMPFILE flag. */ - err = btrfs_orphan_del(trans, BTRFS_I(inode)); - if (err) + ret = btrfs_orphan_del(trans, BTRFS_I(inode)); + if (ret) goto fail; } d_instantiate(dentry, inode); @@ -6871,7 +6865,7 @@ fail: iput(inode); } btrfs_btree_balance_dirty(fs_info); - return err; + return ret; } static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, @@ -7364,13 +7358,13 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, static void wait_subpage_spinlock(struct folio *folio) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; if (!btrfs_is_subpage(fs_info, folio)) return; ASSERT(folio_test_private(folio) && folio_get_private(folio)); - subpage = folio_get_private(folio); + bfs = folio_get_private(folio); /* * This may look insane as we just acquire the spinlock and release it, @@ -7383,8 +7377,8 @@ static void wait_subpage_spinlock(struct folio *folio) * Here we just acquire the spinlock so that all existing callers * should exit and we're safe to release/invalidate the page. */ - spin_lock_irq(&subpage->lock); - spin_unlock_irq(&subpage->lock); + spin_lock_irq(&bfs->lock); + spin_unlock_irq(&bfs->lock); } static int btrfs_launder_folio(struct folio *folio) @@ -7607,7 +7601,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) }; struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_block_rsv *rsv; + struct btrfs_block_rsv rsv; int ret; struct btrfs_trans_handle *trans; u64 mask = fs_info->sectorsize - 1; @@ -7649,11 +7643,9 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) * 2) fs_info->trans_block_rsv - this will have 1 items worth left for * updating the inode. */ - rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); - if (!rsv) - return -ENOMEM; - rsv->size = min_size; - rsv->failfast = true; + btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP); + rsv.size = min_size; + rsv.failfast = true; /* * 1 for the truncate slack space @@ -7666,7 +7658,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) } /* Migrate the slack space for the truncate to our reserve */ - ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, + ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv, min_size, false); /* * We have reserved 2 metadata units when we started the transaction and @@ -7678,7 +7670,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) goto out; } - trans->block_rsv = rsv; + trans->block_rsv = &rsv; while (1) { struct extent_state *cached_state = NULL; @@ -7721,9 +7713,9 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) break; } - btrfs_block_rsv_release(fs_info, rsv, -1, NULL); + btrfs_block_rsv_release(fs_info, &rsv, -1, NULL); ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, - rsv, min_size, false); + &rsv, min_size, false); /* * We have reserved 2 metadata units when we started the * transaction and min_size matches 1 unit, so this should never @@ -7732,7 +7724,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) if (WARN_ON(ret)) break; - trans->block_rsv = rsv; + trans->block_rsv = &rsv; } /* @@ -7771,7 +7763,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) btrfs_btree_balance_dirty(fs_info); } out: - btrfs_free_block_rsv(fs_info, rsv); + btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL); /* * So if we truncate and then write and fsync we normally would just * write the extents that changed, which is a problem if we need to @@ -8026,7 +8018,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap, generic_fillattr(idmap, request_mask, inode, stat); stat->dev = BTRFS_I(inode)->root->anon_dev; - stat->subvol = BTRFS_I(inode)->root->root_key.objectid; + stat->subvol = btrfs_root_id(BTRFS_I(inode)->root); stat->result_mask |= STATX_SUBVOL; spin_lock(&BTRFS_I(inode)->lock); @@ -8823,7 +8815,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, .dentry = dentry, }; unsigned int trans_num_items; - int err; + int ret; int name_len; int datasize; unsigned long ptr; @@ -8850,26 +8842,26 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, inode_set_bytes(inode, name_len); new_inode_args.inode = inode; - err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); - if (err) + ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (ret) goto out_inode; /* 1 additional item for the inline extent */ trans_num_items++; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); goto out_new_inode_args; } - err = btrfs_create_new_inode(trans, &new_inode_args); - if (err) + ret = btrfs_create_new_inode(trans, &new_inode_args); + if (ret) goto out; path = btrfs_alloc_path(); if (!path) { - err = -ENOMEM; - btrfs_abort_transaction(trans, err); + ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); discard_new_inode(inode); inode = NULL; goto out; @@ -8878,10 +8870,9 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; datasize = btrfs_file_extent_calc_inline_size(name_len); - err = btrfs_insert_empty_item(trans, root, path, &key, - datasize); - if (err) { - btrfs_abort_transaction(trans, err); + ret = btrfs_insert_empty_item(trans, root, path, &key, datasize); + if (ret) { + btrfs_abort_transaction(trans, ret); btrfs_free_path(path); discard_new_inode(inode); inode = NULL; @@ -8903,16 +8894,16 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, btrfs_free_path(path); d_instantiate_new(dentry, inode); - err = 0; + ret = 0; out: btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); out_inode: - if (err) + if (ret) iput(inode); - return err; + return ret; } static struct btrfs_trans_handle *insert_prealloc_file_extent( diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8a60983a697c..895009b147ac 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -245,7 +245,7 @@ static int btrfs_check_ioctl_vol_args2_subvol_name(const struct btrfs_ioctl_vol_ * Set flags/xflags from the internal inode flags. The remaining items of * fsxattr are zeroed. */ -int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { const struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); @@ -254,7 +254,7 @@ int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int btrfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); struct btrfs_root *root = inode->root; @@ -841,7 +841,7 @@ free_pending: static int btrfs_may_delete(struct mnt_idmap *idmap, struct inode *dir, struct dentry *victim, int isdir) { - int error; + int ret; if (d_really_is_negative(victim)) return -ENOENT; @@ -851,9 +851,9 @@ static int btrfs_may_delete(struct mnt_idmap *idmap, return -EINVAL; audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); - error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); - if (error) - return error; + ret = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); + if (ret) + return ret; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(idmap, dir, d_inode(victim)) || @@ -892,39 +892,37 @@ static inline int btrfs_may_create(struct mnt_idmap *idmap, * sys_mkdirat and vfs_mkdir, but we only do a single component lookup * inside this filesystem so it's quite a bit simpler. */ -static noinline int btrfs_mksubvol(const struct path *parent, +static noinline int btrfs_mksubvol(struct dentry *parent, struct mnt_idmap *idmap, - const char *name, int namelen, - struct btrfs_root *snap_src, + struct qstr *qname, struct btrfs_root *snap_src, bool readonly, struct btrfs_qgroup_inherit *inherit) { - struct inode *dir = d_inode(parent->dentry); + struct inode *dir = d_inode(parent); struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct dentry *dentry; - struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen); - int error; + struct fscrypt_str name_str = FSTR_INIT((char *)qname->name, qname->len); + int ret; - error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); - if (error == -EINTR) - return error; + ret = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); + if (ret == -EINTR) + return ret; - dentry = lookup_one(idmap, &QSTR_LEN(name, namelen), parent->dentry); - error = PTR_ERR(dentry); + dentry = lookup_one(idmap, qname, parent); + ret = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; - error = btrfs_may_create(idmap, dir, dentry); - if (error) + ret = btrfs_may_create(idmap, dir, dentry); + if (ret) goto out_dput; /* * even if this name doesn't exist, we may get hash collisions. * check for them now when we can safely fail */ - error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, - dir->i_ino, &name_str); - if (error) + ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, &name_str); + if (ret) goto out_dput; down_read(&fs_info->subvol_sem); @@ -933,11 +931,11 @@ static noinline int btrfs_mksubvol(const struct path *parent, goto out_up_read; if (snap_src) - error = create_snapshot(snap_src, dir, dentry, readonly, inherit); + ret = create_snapshot(snap_src, dir, dentry, readonly, inherit); else - error = create_subvol(idmap, dir, dentry, inherit); + ret = create_subvol(idmap, dir, dentry, inherit); - if (!error) + if (!ret) fsnotify_mkdir(dir, dentry); out_up_read: up_read(&fs_info->subvol_sem); @@ -945,12 +943,12 @@ out_dput: dput(dentry); out_unlock: btrfs_inode_unlock(BTRFS_I(dir), 0); - return error; + return ret; } -static noinline int btrfs_mksnapshot(const struct path *parent, +static noinline int btrfs_mksnapshot(struct dentry *parent, struct mnt_idmap *idmap, - const char *name, int namelen, + struct qstr *qname, struct btrfs_root *root, bool readonly, struct btrfs_qgroup_inherit *inherit) @@ -977,8 +975,8 @@ static noinline int btrfs_mksnapshot(const struct path *parent, btrfs_wait_ordered_extents(root, U64_MAX, NULL); - ret = btrfs_mksubvol(parent, idmap, name, namelen, - root, readonly, inherit); + ret = btrfs_mksubvol(parent, idmap, qname, root, readonly, inherit); + atomic_dec(&root->snapshot_force_cow); out: btrfs_drew_read_unlock(&root->snapshot_lock); @@ -1169,7 +1167,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, } /* equal, nothing need to do */ if (ret == 0 && new_size != old_size) - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "resize device %s (devid %llu) from %llu to %llu", btrfs_dev_name(device), device->devid, old_size, new_size); @@ -1184,12 +1182,12 @@ out_drop: static noinline int __btrfs_ioctl_snap_create(struct file *file, struct mnt_idmap *idmap, - const char *name, unsigned long fd, int subvol, + const char *name, unsigned long fd, bool subvol, bool readonly, struct btrfs_qgroup_inherit *inherit) { - int namelen; int ret = 0; + struct qstr qname = QSTR_INIT(name, strlen(name)); if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; @@ -1198,21 +1196,20 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file, if (ret) goto out; - namelen = strlen(name); if (strchr(name, '/')) { ret = -EINVAL; goto out_drop_write; } - if (name[0] == '.' && - (namelen == 1 || (name[1] == '.' && namelen == 2))) { + if (qname.name[0] == '.' && + (qname.len == 1 || (qname.name[1] == '.' && qname.len == 2))) { ret = -EEXIST; goto out_drop_write; } if (subvol) { - ret = btrfs_mksubvol(&file->f_path, idmap, name, - namelen, NULL, readonly, inherit); + ret = btrfs_mksubvol(file_dentry(file), idmap, &qname, NULL, + readonly, inherit); } else { CLASS(fd, src)(fd); struct inode *src_inode; @@ -1242,8 +1239,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file, */ ret = -EINVAL; } else { - ret = btrfs_mksnapshot(&file->f_path, idmap, - name, namelen, + ret = btrfs_mksnapshot(file_dentry(file), idmap, &qname, BTRFS_I(src_inode)->root, readonly, inherit); } @@ -1280,7 +1276,7 @@ out: } static noinline int btrfs_ioctl_snap_create_v2(struct file *file, - void __user *arg, int subvol) + void __user *arg, bool subvol) { struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; @@ -2558,8 +2554,14 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) ret = -EOPNOTSUPP; goto out; } - /* compression requires us to start the IO */ - if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { + if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) && + (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) { + ret = -EINVAL; + goto out; + } + /* Compression or no-compression require to start the IO. */ + if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) || + (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) { range.flags |= BTRFS_DEFRAG_RANGE_START_IO; range.extent_thresh = (u32)-1; } @@ -2700,7 +2702,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) err_drop: mnt_drop_write_file(file); if (bdev_file) - fput(bdev_file); + bdev_fput(bdev_file); out: btrfs_put_dev_args_from_path(&args); kfree(vol_args); @@ -2751,7 +2753,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) mnt_drop_write_file(file); if (bdev_file) - fput(bdev_file); + bdev_fput(bdev_file); out: btrfs_put_dev_args_from_path(&args); out_free: @@ -2890,7 +2892,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) ret = PTR_ERR(new_root); goto out; } - if (!is_fstree(btrfs_root_id(new_root))) { + if (!btrfs_is_fstree(btrfs_root_id(new_root))) { ret = -ENOENT; goto out_free; } @@ -3357,7 +3359,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, int size; struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_data_container *inodes = NULL; - struct btrfs_path *path = NULL; bool ignore_offset; if (!capable(CAP_SYS_ADMIN)) @@ -3391,14 +3392,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, goto out_loi; } - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - ret = iterate_inodes_from_logical(loi->logical, fs_info, path, - inodes, ignore_offset); - btrfs_free_path(path); + ret = iterate_inodes_from_logical(loi->logical, fs_info, inodes, ignore_offset); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) @@ -3715,22 +3709,6 @@ drop_write: return ret; } -/* - * Quick check for ioctl handlers if quotas are enabled. Proper locking must be - * done before any operations. - */ -static bool qgroup_enabled(struct btrfs_fs_info *fs_info) -{ - bool ret = true; - - mutex_lock(&fs_info->qgroup_ioctl_lock); - if (!fs_info->quota_root) - ret = false; - mutex_unlock(&fs_info->qgroup_ioctl_lock); - - return ret; -} - static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); @@ -3745,7 +3723,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!qgroup_enabled(root->fs_info)) + if (!btrfs_qgroup_enabled(fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); @@ -3815,7 +3793,7 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!qgroup_enabled(root->fs_info)) + if (!btrfs_qgroup_enabled(root->fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); @@ -3833,7 +3811,7 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) goto out; } - if (sa->create && is_fstree(sa->qgroupid)) { + if (sa->create && btrfs_is_fstree(sa->qgroupid)) { ret = -EINVAL; goto out; } @@ -3874,7 +3852,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!qgroup_enabled(root->fs_info)) + if (!btrfs_qgroup_enabled(root->fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); @@ -3922,7 +3900,7 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!qgroup_enabled(fs_info)) + if (!btrfs_qgroup_enabled(fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); @@ -4200,7 +4178,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) } spin_lock(&fs_info->super_lock); - strcpy(super_block->label, label); + strscpy(super_block->label, label); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); @@ -4659,7 +4637,7 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss struct btrfs_uring_priv *priv = bc->priv; struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp)); struct extent_io_tree *io_tree = &inode->io_tree; - unsigned long index; + pgoff_t index; u64 cur; size_t page_offset; ssize_t ret; @@ -4829,7 +4807,8 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags); #else - return -ENOTTY; + ret = -ENOTTY; + goto out_acct; #endif } else { copy_end = copy_end_kernel; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index e08ea446cf48..ccf6bed9cc24 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -8,7 +8,7 @@ struct file; struct dentry; struct mnt_idmap; -struct fileattr; +struct file_kattr; struct io_uring_cmd; struct btrfs_inode; struct btrfs_fs_info; @@ -16,9 +16,9 @@ struct btrfs_ioctl_balance_args; long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int btrfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); int btrfs_ioctl_get_supported_features(void __user *arg); void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode); void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h index 6abf81bb00c2..022ebc89af85 100644 --- a/fs/btrfs/messages.h +++ b/fs/btrfs/messages.h @@ -37,106 +37,46 @@ void _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); btrfs_no_printk(fs_info, fmt, ##args) #endif -#define btrfs_emerg(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_EMERG fmt, ##args) -#define btrfs_alert(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_ALERT fmt, ##args) -#define btrfs_crit(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_CRIT fmt, ##args) -#define btrfs_err(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_ERR fmt, ##args) -#define btrfs_warn(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_WARNING fmt, ##args) -#define btrfs_notice(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) -#define btrfs_info(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_INFO fmt, ##args) - /* - * Wrappers that use printk_in_rcu + * Print a message with filesystem info, enclosed in RCU protection. */ -#define btrfs_emerg_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args) -#define btrfs_alert_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args) -#define btrfs_crit_in_rcu(fs_info, fmt, args...) \ +#define btrfs_crit(fs_info, fmt, args...) \ btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args) -#define btrfs_err_in_rcu(fs_info, fmt, args...) \ +#define btrfs_err(fs_info, fmt, args...) \ btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args) -#define btrfs_warn_in_rcu(fs_info, fmt, args...) \ +#define btrfs_warn(fs_info, fmt, args...) \ btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args) -#define btrfs_notice_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args) -#define btrfs_info_in_rcu(fs_info, fmt, args...) \ +#define btrfs_info(fs_info, fmt, args...) \ btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args) /* - * Wrappers that use a ratelimited printk_in_rcu - */ -#define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args) -#define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args) -#define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) -#define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) -#define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) -#define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args) -#define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) - -/* * Wrappers that use a ratelimited printk */ -#define btrfs_emerg_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args) -#define btrfs_alert_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args) #define btrfs_crit_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args) + btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) #define btrfs_err_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args) + btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) #define btrfs_warn_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args) -#define btrfs_notice_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) + btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) #define btrfs_info_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) + btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) #if defined(CONFIG_DYNAMIC_DEBUG) #define btrfs_debug(fs_info, fmt, args...) \ - _dynamic_func_call_no_desc(fmt, btrfs_printk, \ - fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ _dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \ fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ - _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \ - fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_rl(fs_info, fmt, args...) \ - _dynamic_func_call_no_desc(fmt, btrfs_printk_ratelimited, \ + _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \ fs_info, KERN_DEBUG fmt, ##args) #elif defined(DEBUG) #define btrfs_debug(fs_info, fmt, args...) \ - btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_rl(fs_info, fmt, args...) \ - btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) + btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) #else -#define btrfs_debug(fs_info, fmt, args...) \ - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ - btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) -#define btrfs_debug_rl(fs_info, fmt, args...) \ - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) +/* When printk() is no_printk(), expand to no-op. */ +#define btrfs_debug(fs_info, fmt, args...) do { (void)(fs_info); } while(0) +#define btrfs_debug_rl(fs_info, fmt, args...) do { (void)(fs_info); } while(0) #endif #define btrfs_printk_in_rcu(fs_info, fmt, args...) \ @@ -146,26 +86,15 @@ do { \ rcu_read_unlock(); \ } while (0) -#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \ -do { \ - rcu_read_lock(); \ - btrfs_no_printk(fs_info, fmt, ##args); \ - rcu_read_unlock(); \ -} while (0) - -#define btrfs_printk_ratelimited(fs_info, fmt, args...) \ +#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ + \ + rcu_read_lock(); \ if (__ratelimit(&_rs)) \ btrfs_printk(fs_info, fmt, ##args); \ -} while (0) - -#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ -do { \ - rcu_read_lock(); \ - btrfs_printk_ratelimited(fs_info, fmt, ##args); \ rcu_read_unlock(); \ } while (0) diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index 0d599fd847c9..ff5eac84d819 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -7,6 +7,8 @@ #include <linux/bitmap.h> #include <linux/sched.h> #include <linux/wait.h> +#include <linux/mm.h> +#include <linux/pagemap.h> #include <linux/math64.h> #include <linux/rbtree.h> @@ -119,28 +121,23 @@ static inline struct rb_node *rb_simple_search_first(const struct rb_root *root, return ret; } -static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr, - struct rb_node *node) +static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct rb_simple_node *entry; + struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node); + struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node); - while (*p) { - parent = *p; - entry = rb_entry(parent, struct rb_simple_node, rb_node); + if (new_entry->bytenr < existing_entry->bytenr) + return -1; + else if (new_entry->bytenr > existing_entry->bytenr) + return 1; - if (bytenr < entry->bytenr) - p = &(*p)->rb_left; - else if (bytenr > entry->bytenr) - p = &(*p)->rb_right; - else - return parent; - } + return 0; +} - rb_link_node(node, parent, p); - rb_insert_color(node, root); - return NULL; +static inline struct rb_node *rb_simple_insert(struct rb_root *root, + struct rb_simple_node *simple_node) +{ + return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp); } static inline bool bitmap_test_range_all_set(const unsigned long *addr, @@ -163,4 +160,9 @@ static inline bool bitmap_test_range_all_zero(const unsigned long *addr, return (found_set == start + nbits); } +static inline u64 folio_end(struct folio *folio) +{ + return folio_pos(folio) + folio_size(folio); +} + #endif diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9212ce110cde..2829f20d7bb5 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -359,7 +359,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, if (folio) { ASSERT(folio->mapping); ASSERT(folio_pos(folio) <= file_offset); - ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio)); + ASSERT(file_offset + len <= folio_end(folio)); /* * Ordered flag indicates whether we still have diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index fc821aa446f0..74e38da9bd39 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -190,7 +190,7 @@ static void print_uuid_item(const struct extent_buffer *l, unsigned long offset, u32 item_size) { if (!IS_ALIGNED(item_size, sizeof(u64))) { - pr_warn("BTRFS: uuid item with illegal size %lu!\n", + btrfs_warn(l->fs_info, "uuid item with illegal size %lu", (unsigned long)item_size); return; } @@ -223,7 +223,7 @@ static void print_eb_refs_lock(const struct extent_buffer *eb) { #ifdef CONFIG_BTRFS_DEBUG btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u", - atomic_read(&eb->refs), eb->lock_owner, current->pid); + refcount_read(&eb->refs), eb->lock_owner, current->pid); #endif } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index b3176edbde82..1a5972178b3a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -160,23 +160,34 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, int init_flags); static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); +static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *qgroupid = key; + const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node); + + if (qgroup->qgroupid < *qgroupid) + return -1; + else if (qgroup->qgroupid > *qgroupid) + return 1; + + return 0; +} + /* must be called with qgroup_ioctl_lock held */ static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info, u64 qgroupid) { - struct rb_node *n = fs_info->qgroup_tree.rb_node; - struct btrfs_qgroup *qgroup; + struct rb_node *node; - while (n) { - qgroup = rb_entry(n, struct btrfs_qgroup, node); - if (qgroup->qgroupid < qgroupid) - n = n->rb_left; - else if (qgroup->qgroupid > qgroupid) - n = n->rb_right; - else - return qgroup; - } - return NULL; + node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp); + return rb_entry_safe(node, struct btrfs_qgroup, node); +} + +static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node); + + return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing); } /* @@ -191,39 +202,25 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *prealloc, u64 qgroupid) { - struct rb_node **p = &fs_info->qgroup_tree.rb_node; - struct rb_node *parent = NULL; - struct btrfs_qgroup *qgroup; + struct rb_node *node; /* Caller must have pre-allocated @prealloc. */ ASSERT(prealloc); - while (*p) { - parent = *p; - qgroup = rb_entry(parent, struct btrfs_qgroup, node); - - if (qgroup->qgroupid < qgroupid) { - p = &(*p)->rb_left; - } else if (qgroup->qgroupid > qgroupid) { - p = &(*p)->rb_right; - } else { - kfree(prealloc); - return qgroup; - } + prealloc->qgroupid = qgroupid; + node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp); + if (node) { + kfree(prealloc); + return rb_entry(node, struct btrfs_qgroup, node); } - qgroup = prealloc; - qgroup->qgroupid = qgroupid; - INIT_LIST_HEAD(&qgroup->groups); - INIT_LIST_HEAD(&qgroup->members); - INIT_LIST_HEAD(&qgroup->dirty); - INIT_LIST_HEAD(&qgroup->iterator); - INIT_LIST_HEAD(&qgroup->nested_iterator); + INIT_LIST_HEAD(&prealloc->groups); + INIT_LIST_HEAD(&prealloc->members); + INIT_LIST_HEAD(&prealloc->dirty); + INIT_LIST_HEAD(&prealloc->iterator); + INIT_LIST_HEAD(&prealloc->nested_iterator); - rb_link_node(&qgroup->node, parent, p); - rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); - - return qgroup; + return prealloc; } static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) @@ -349,13 +346,27 @@ int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid } #endif -static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info) +__printf(2, 3) +static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...) { + const u64 old_flags = fs_info->qgroup_flags; + if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) return; fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); + if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf); + va_end(args); + } } static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, @@ -386,12 +397,6 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) if (!fs_info->quota_root) return 0; - fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); - if (!fs_info->qgroup_ulist) { - ret = -ENOMEM; - goto out; - } - path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; @@ -434,13 +439,10 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) goto out; } fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) { + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) qgroup_read_enable_gen(fs_info, l, slot, ptr); - } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) { - qgroup_mark_inconsistent(fs_info); - btrfs_err(fs_info, - "qgroup generation mismatch, marked as inconsistent"); - } + else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) + qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch"); rescan_progress = btrfs_qgroup_status_rescan(l, ptr); goto next1; } @@ -451,10 +453,8 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) qgroup = find_qgroup_rb(fs_info, found_key.offset); if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || - (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { - btrfs_err(fs_info, "inconsistent qgroup config"); - qgroup_mark_inconsistent(fs_info); - } + (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) + qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config"); if (!qgroup) { struct btrfs_qgroup *prealloc; struct btrfs_root *tree_root = fs_info->tree_root; @@ -476,7 +476,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) * during mount before we start doing things like creating * subvolumes. */ - if (is_fstree(qgroup->qgroupid) && + if (btrfs_is_fstree(qgroup->qgroupid) && qgroup->qgroupid > tree_root->free_objectid) /* * Don't need to check against BTRFS_LAST_FREE_OBJECTID, @@ -581,8 +581,6 @@ out: if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ret = qgroup_rescan_init(fs_info, rescan_progress, 0); } else { - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; btrfs_sysfs_del_qgroups(fs_info); } @@ -630,29 +628,30 @@ bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info) /* * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), - * first two are in single-threaded paths.And for the third one, we have set - * quota_root to be null with qgroup_lock held before, so it is safe to clean - * up the in-memory structures without qgroup_lock held. + * first two are in single-threaded paths. */ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) { struct rb_node *n; struct btrfs_qgroup *qgroup; + /* + * btrfs_quota_disable() can be called concurrently with + * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the + * lock. + */ + spin_lock(&fs_info->qgroup_lock); while ((n = rb_first(&fs_info->qgroup_tree))) { qgroup = rb_entry(n, struct btrfs_qgroup, node); rb_erase(n, &fs_info->qgroup_tree); __del_qgroup_rb(qgroup); + spin_unlock(&fs_info->qgroup_lock); btrfs_sysfs_del_one_qgroup(fs_info, qgroup); kfree(qgroup); + spin_lock(&fs_info->qgroup_lock); } - /* - * We call btrfs_free_qgroup_config() when unmounting - * filesystem and disabling quota, so we set qgroup_ulist - * to be null here to avoid double free. - */ - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; + spin_unlock(&fs_info->qgroup_lock); + btrfs_sysfs_del_qgroups(fs_info); } @@ -998,7 +997,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup = NULL; struct btrfs_qgroup *prealloc = NULL; struct btrfs_trans_handle *trans = NULL; - struct ulist *ulist = NULL; const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); int ret = 0; int slot; @@ -1021,12 +1019,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, if (fs_info->quota_root) goto out; - ulist = ulist_alloc(GFP_KERNEL); - if (!ulist) { - ret = -ENOMEM; - goto out; - } - ret = btrfs_sysfs_add_qgroups(fs_info); if (ret < 0) goto out; @@ -1066,9 +1058,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, if (fs_info->quota_root) goto out; - fs_info->qgroup_ulist = ulist; - ulist = NULL; - /* * initially create the quota tree */ @@ -1155,11 +1144,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); prealloc = NULL; - if (IS_ERR(qgroup)) { - ret = PTR_ERR(qgroup); - btrfs_abort_transaction(trans, ret); - goto out_free_path; - } ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); if (ret < 0) { btrfs_abort_transaction(trans, ret); @@ -1272,17 +1256,13 @@ out_free_root: if (ret) btrfs_put_root(quota_root); out: - if (ret) { - ulist_free(fs_info->qgroup_ulist); - fs_info->qgroup_ulist = NULL; + if (ret) btrfs_sysfs_del_qgroups(fs_info); - } mutex_unlock(&fs_info->qgroup_ioctl_lock); if (ret && trans) btrfs_end_transaction(trans); else if (trans) ret = btrfs_end_transaction(trans); - ulist_free(ulist); kfree(prealloc); return ret; } @@ -1354,11 +1334,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) /* * We have nothing held here and no trans handle, just return the error - * if there is one. + * if there is one and set back the quota enabled bit since we didn't + * actually disable quotas. */ ret = flush_reservations(fs_info); - if (ret) + if (ret) { + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); return ret; + } /* * 1 For the root item @@ -1679,9 +1662,6 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) struct btrfs_qgroup *prealloc = NULL; int ret = 0; - if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) - return 0; - mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { ret = -ENOTCONN; @@ -1844,13 +1824,12 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) if (qgroup->rfer || qgroup->excl || qgroup->rfer_cmpr || qgroup->excl_cmpr) { DEBUG_WARN(); - btrfs_warn_rl(fs_info, -"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", - btrfs_qgroup_level(qgroup->qgroupid), - btrfs_qgroup_subvolid(qgroup->qgroupid), - qgroup->rfer, qgroup->rfer_cmpr, - qgroup->excl, qgroup->excl_cmpr); - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, + "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + qgroup->rfer, qgroup->rfer_cmpr, + qgroup->excl, qgroup->excl_cmpr); } } del_qgroup_rb(fs_info, qgroupid); @@ -1873,7 +1852,8 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su struct btrfs_trans_handle *trans; int ret; - if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root) + if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || + !fs_info->quota_root) return 0; /* @@ -1968,11 +1948,8 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, spin_unlock(&fs_info->qgroup_lock); ret = update_qgroup_limit_item(trans, qgroup); - if (ret) { - qgroup_mark_inconsistent(fs_info); - btrfs_info(fs_info, "unable to update quota limit for %llu", - qgroupid); - } + if (ret) + qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); @@ -2027,7 +2004,7 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC); xa_unlock(&delayed_refs->dirty_extents); if (xa_is_err(ret)) { - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret)); return xa_err(ret); } @@ -2094,10 +2071,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, ret = btrfs_find_all_roots(&ctx, true); if (ret < 0) { - qgroup_mark_inconsistent(fs_info); - btrfs_warn(fs_info, -"error accounting new delayed refs extent (err code: %d), quota inconsistent", - ret); + qgroup_mark_inconsistent(fs_info, + "error accounting new delayed refs extent: %d", ret); return 0; } @@ -2341,7 +2316,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); /* For src_path */ - atomic_inc(&src_eb->refs); + refcount_inc(&src_eb->refs); src_path->nodes[root_level] = src_eb; src_path->slots[root_level] = dst_path->slots[root_level]; src_path->locks[root_level] = 0; @@ -2574,7 +2549,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, goto out; } /* For dst_path */ - atomic_inc(&dst_eb->refs); + refcount_inc(&dst_eb->refs); dst_path->nodes[level] = dst_eb; dst_path->slots[level] = 0; dst_path->locks[level] = 0; @@ -2589,7 +2564,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, out: btrfs_free_path(dst_path); if (ret < 0) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); return ret; } @@ -2633,7 +2608,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, * mark qgroup inconsistent. */ if (root_level >= drop_subptree_thres) { - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, "subtree level reached threshold"); return 0; } @@ -2666,7 +2641,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, * walk back up the tree (adjusting slot pointers as we go) * and restart the search process. */ - atomic_inc(&root_eb->refs); /* For path */ + refcount_inc(&root_eb->refs); /* For path */ path->nodes[root_level] = root_eb; path->slots[root_level] = 0; path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ @@ -2932,7 +2907,7 @@ static int maybe_fs_roots(struct ulist *roots) * trees. * If it contains a non-fs tree, it won't be shared with fs/subvol trees. */ - return is_fstree(unode->val); + return btrfs_is_fstree(unode->val); } int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, @@ -3133,10 +3108,12 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) spin_unlock(&fs_info->qgroup_lock); ret = update_qgroup_info_item(trans, qgroup); if (ret) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, + "qgroup info item update error %d", ret); ret = update_qgroup_limit_item(trans, qgroup); if (ret) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, + "qgroup limit item update error %d", ret); spin_lock(&fs_info->qgroup_lock); } if (btrfs_qgroup_enabled(fs_info)) @@ -3147,7 +3124,8 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) ret = update_qgroup_status_item(trans); if (ret) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, + "qgroup status item update error %d", ret); return ret; } @@ -3329,6 +3307,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, u32 level_size = 0; u64 nums; + if (!btrfs_qgroup_enabled(fs_info)) + return 0; + prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); if (!prealloc) return -ENOMEM; @@ -3352,8 +3333,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, if (!committing) mutex_lock(&fs_info->qgroup_ioctl_lock); - if (!btrfs_qgroup_enabled(fs_info)) - goto out; quota_root = fs_info->quota_root; if (!quota_root) { @@ -3554,7 +3533,7 @@ out: if (!committing) mutex_unlock(&fs_info->qgroup_ioctl_lock); if (need_rescan) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan"); if (qlist_prealloc) { for (int i = 0; i < inherit->num_qgroups; i++) kfree(qlist_prealloc[i]); @@ -3588,7 +3567,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, int ret = 0; LIST_HEAD(qgroup_list); - if (!is_fstree(ref_root)) + if (!btrfs_is_fstree(ref_root)) return 0; if (num_bytes == 0) @@ -3648,7 +3627,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup; LIST_HEAD(qgroup_list); - if (!is_fstree(ref_root)) + if (!btrfs_is_fstree(ref_root)) return; if (num_bytes == 0) @@ -4036,12 +4015,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) qgroup_rescan_zero_tracking(fs_info); mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = true; - btrfs_queue_work(fs_info->qgroup_rescan_workers, - &fs_info->qgroup_rescan_work); + /* + * The rescan worker is only for full accounting qgroups, check if it's + * enabled as it is pointless to queue it otherwise. A concurrent quota + * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED. + */ + if (btrfs_qgroup_full_accounting(fs_info)) { + fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } else { + ret = -ENOTCONN; + } mutex_unlock(&fs_info->qgroup_rescan_lock); - return 0; + return ret; } int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, @@ -4128,8 +4116,8 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode, * Now the entry is in [start, start + len), revert the * EXTENT_QGROUP_RESERVED bit. */ - clear_ret = btrfs_clear_extent_bits(&inode->io_tree, entry_start, - entry_end, EXTENT_QGROUP_RESERVED); + clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end, + EXTENT_QGROUP_RESERVED, NULL); if (!ret && clear_ret < 0) ret = clear_ret; @@ -4216,7 +4204,7 @@ static int qgroup_reserve_data(struct btrfs_inode *inode, int ret; if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || - !is_fstree(btrfs_root_id(root)) || len == 0) + !btrfs_is_fstree(btrfs_root_id(root)) || len == 0) return 0; /* @reserved parameter is mandatory for qgroup */ @@ -4469,7 +4457,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, int ret; if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || - !is_fstree(btrfs_root_id(root)) || num_bytes == 0) + !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0) return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); @@ -4514,7 +4502,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) struct btrfs_fs_info *fs_info = root->fs_info; if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || - !is_fstree(btrfs_root_id(root))) + !btrfs_is_fstree(btrfs_root_id(root))) return; /* TODO: Update trace point to handle such free */ @@ -4530,7 +4518,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, struct btrfs_fs_info *fs_info = root->fs_info; if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || - !is_fstree(btrfs_root_id(root))) + !btrfs_is_fstree(btrfs_root_id(root))) return; /* @@ -4589,7 +4577,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) struct btrfs_fs_info *fs_info = root->fs_info; if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || - !is_fstree(btrfs_root_id(root))) + !btrfs_is_fstree(btrfs_root_id(root))) return; /* Same as btrfs_qgroup_free_meta_prealloc() */ num_bytes = sub_root_meta_rsv(root, num_bytes, @@ -4673,6 +4661,28 @@ out: spin_unlock(&swapped_blocks->lock); } +static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *bytenr = key; + const struct btrfs_qgroup_swapped_block *block = rb_entry(node, + struct btrfs_qgroup_swapped_block, node); + + if (block->subvol_bytenr < *bytenr) + return -1; + else if (block->subvol_bytenr > *bytenr) + return 1; + + return 0; +} + +static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new, + struct btrfs_qgroup_swapped_block, node); + + return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing); +} + /* * Add subtree roots record into @subvol_root. * @@ -4692,8 +4702,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root, struct btrfs_fs_info *fs_info = subvol_root->fs_info; struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; struct btrfs_qgroup_swapped_block *block; - struct rb_node **cur; - struct rb_node *parent = NULL; + struct rb_node *node; int level = btrfs_header_level(subvol_parent) - 1; int ret = 0; @@ -4742,46 +4751,32 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root, /* Insert @block into @blocks */ spin_lock(&blocks->lock); - cur = &blocks->blocks[level].rb_node; - while (*cur) { + node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp); + if (node) { struct btrfs_qgroup_swapped_block *entry; - parent = *cur; - entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, - node); + entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node); - if (entry->subvol_bytenr < block->subvol_bytenr) { - cur = &(*cur)->rb_left; - } else if (entry->subvol_bytenr > block->subvol_bytenr) { - cur = &(*cur)->rb_right; - } else { - if (entry->subvol_generation != - block->subvol_generation || - entry->reloc_bytenr != block->reloc_bytenr || - entry->reloc_generation != - block->reloc_generation) { - /* - * Duplicated but mismatch entry found. - * Shouldn't happen. - * - * Marking qgroup inconsistent should be enough - * for end users. - */ - DEBUG_WARN("duplicated but mismatched entry found"); - ret = -EEXIST; - } - kfree(block); - goto out_unlock; + if (entry->subvol_generation != block->subvol_generation || + entry->reloc_bytenr != block->reloc_bytenr || + entry->reloc_generation != block->reloc_generation) { + /* + * Duplicated but mismatch entry found. Shouldn't happen. + * Marking qgroup inconsistent should be enough for end + * users. + */ + DEBUG_WARN("duplicated but mismatched entry found"); + ret = -EEXIST; } + kfree(block); + goto out_unlock; } - rb_link_node(&block->node, parent, cur); - rb_insert_color(&block->node, &blocks->blocks[level]); blocks->swapped = true; out_unlock: spin_unlock(&blocks->lock); out: if (ret < 0) - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); return ret; } @@ -4801,7 +4796,6 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, struct btrfs_qgroup_swapped_block *block; struct extent_buffer *reloc_eb = NULL; struct rb_node *node; - bool found = false; bool swapped = false; int level = btrfs_header_level(subvol_eb); int ret = 0; @@ -4809,7 +4803,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, if (!btrfs_qgroup_full_accounting(fs_info)) return 0; - if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root) + if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root) return 0; spin_lock(&blocks->lock); @@ -4817,23 +4811,14 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, spin_unlock(&blocks->lock); return 0; } - node = blocks->blocks[level].rb_node; - - while (node) { - block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); - if (block->subvol_bytenr < subvol_eb->start) { - node = node->rb_left; - } else if (block->subvol_bytenr > subvol_eb->start) { - node = node->rb_right; - } else { - found = true; - break; - } - } - if (!found) { + node = rb_find(&subvol_eb->start, &blocks->blocks[level], + qgroup_swapped_block_bytenr_key_cmp); + if (!node) { spin_unlock(&blocks->lock); goto out; } + block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); + /* Found one, remove it from @blocks first and update blocks->swapped */ rb_erase(&block->node, &blocks->blocks[level]); for (i = 0; i < BTRFS_MAX_LEVEL; i++) { @@ -4869,10 +4854,9 @@ free_out: free_extent_buffer(reloc_eb); out: if (ret < 0) { - btrfs_err_rl(fs_info, - "failed to account subtree at bytenr %llu: %d", - subvol_eb->start, ret); - qgroup_mark_inconsistent(fs_info); + qgroup_mark_inconsistent(fs_info, + "failed to account subtree at bytenr %llu: %d", + subvol_eb->start, ret); } return ret; } @@ -4903,7 +4887,7 @@ int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) return 0; - if (!is_fstree(root)) + if (!btrfs_is_fstree(root)) return 0; /* If the extent predates enabling quotas, don't count it. */ diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c index 1834011ccc49..cab0b291088c 100644 --- a/fs/btrfs/raid-stripe-tree.c +++ b/fs/btrfs/raid-stripe-tree.c @@ -329,11 +329,14 @@ int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans, ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent, item_size); - if (ret == -EEXIST) + if (ret == -EEXIST) { ret = update_raid_extent_item(trans, &stripe_key, stripe_extent, item_size); - if (ret) + if (ret) + btrfs_abort_transaction(trans, ret); + } else if (ret) { btrfs_abort_transaction(trans, ret); + } kfree(stripe_extent); diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h deleted file mode 100644 index 1c2d7cb1fe6f..000000000000 --- a/fs/btrfs/rcu-string.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2012 Red Hat. All rights reserved. - */ - -#ifndef BTRFS_RCU_STRING_H -#define BTRFS_RCU_STRING_H - -#include <linux/types.h> -#include <linux/string.h> -#include <linux/slab.h> -#include <linux/rcupdate.h> -#include <linux/printk.h> - -struct rcu_string { - struct rcu_head rcu; - char str[]; -}; - -static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) -{ - size_t len = strlen(src) + 1; - struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + - (len * sizeof(char)), mask); - if (!ret) - return ret; - /* Warn if the source got unexpectedly truncated. */ - if (WARN_ON(strscpy(ret->str, src, len) < 0)) { - kfree(ret); - return NULL; - } - return ret; -} - -static inline void rcu_string_free(struct rcu_string *str) -{ - if (str) - kfree_rcu(str, rcu); -} - -#define printk_in_rcu(fmt, ...) do { \ - rcu_read_lock(); \ - printk(fmt, __VA_ARGS__); \ - rcu_read_unlock(); \ -} while (0) - -#define printk_ratelimited_in_rcu(fmt, ...) do { \ - rcu_read_lock(); \ - printk_ratelimited(fmt, __VA_ARGS__); \ - rcu_read_unlock(); \ -} while (0) - -#define rcu_str_deref(rcu_str) ({ \ - struct rcu_string *__str = rcu_dereference(rcu_str); \ - __str->str; \ -}) - -#endif diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 2928abf7eb82..3871c3a6c743 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -75,69 +75,70 @@ struct block_entry { struct list_head actions; }; +static int block_entry_bytenr_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *bytenr = key; + const struct block_entry *entry = rb_entry(node, struct block_entry, node); + + if (entry->bytenr < *bytenr) + return 1; + else if (entry->bytenr > *bytenr) + return -1; + + return 0; +} + +static int block_entry_bytenr_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct block_entry *new_entry = rb_entry(new, struct block_entry, node); + + return block_entry_bytenr_key_cmp(&new_entry->bytenr, existing); +} + static struct block_entry *insert_block_entry(struct rb_root *root, struct block_entry *be) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent_node = NULL; - struct block_entry *entry; - - while (*p) { - parent_node = *p; - entry = rb_entry(parent_node, struct block_entry, node); - if (entry->bytenr > be->bytenr) - p = &(*p)->rb_left; - else if (entry->bytenr < be->bytenr) - p = &(*p)->rb_right; - else - return entry; - } + struct rb_node *node; - rb_link_node(&be->node, parent_node, p); - rb_insert_color(&be->node, root); - return NULL; + node = rb_find_add(&be->node, root, block_entry_bytenr_cmp); + return rb_entry_safe(node, struct block_entry, node); } static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr) { - struct rb_node *n; - struct block_entry *entry = NULL; + struct rb_node *node; - n = root->rb_node; - while (n) { - entry = rb_entry(n, struct block_entry, node); - if (entry->bytenr < bytenr) - n = n->rb_right; - else if (entry->bytenr > bytenr) - n = n->rb_left; - else - return entry; - } - return NULL; + node = rb_find(&bytenr, root, block_entry_bytenr_key_cmp); + return rb_entry_safe(node, struct block_entry, node); +} + +static int root_entry_root_objectid_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *objectid = key; + const struct root_entry *entry = rb_entry(node, struct root_entry, node); + + if (entry->root_objectid < *objectid) + return 1; + else if (entry->root_objectid > *objectid) + return -1; + + return 0; +} + +static int root_entry_root_objectid_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct root_entry *new_entry = rb_entry(new, struct root_entry, node); + + return root_entry_root_objectid_key_cmp(&new_entry->root_objectid, existing); } static struct root_entry *insert_root_entry(struct rb_root *root, struct root_entry *re) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent_node = NULL; - struct root_entry *entry; - - while (*p) { - parent_node = *p; - entry = rb_entry(parent_node, struct root_entry, node); - if (entry->root_objectid > re->root_objectid) - p = &(*p)->rb_left; - else if (entry->root_objectid < re->root_objectid) - p = &(*p)->rb_right; - else - return entry; - } - - rb_link_node(&re->node, parent_node, p); - rb_insert_color(&re->node, root); - return NULL; + struct rb_node *node; + node = rb_find_add(&re->node, root, root_entry_root_objectid_cmp); + return rb_entry_safe(node, struct root_entry, node); } static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2) @@ -161,48 +162,29 @@ static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2) return 0; } +static int ref_entry_cmp(struct rb_node *new, const struct rb_node *existing) +{ + struct ref_entry *new_entry = rb_entry(new, struct ref_entry, node); + struct ref_entry *existing_entry = rb_entry(existing, struct ref_entry, node); + + return comp_refs(new_entry, existing_entry); +} + static struct ref_entry *insert_ref_entry(struct rb_root *root, struct ref_entry *ref) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent_node = NULL; - struct ref_entry *entry; - int cmp; - - while (*p) { - parent_node = *p; - entry = rb_entry(parent_node, struct ref_entry, node); - cmp = comp_refs(entry, ref); - if (cmp > 0) - p = &(*p)->rb_left; - else if (cmp < 0) - p = &(*p)->rb_right; - else - return entry; - } - - rb_link_node(&ref->node, parent_node, p); - rb_insert_color(&ref->node, root); - return NULL; + struct rb_node *node; + node = rb_find_add(&ref->node, root, ref_entry_cmp); + return rb_entry_safe(node, struct ref_entry, node); } static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid) { - struct rb_node *n; - struct root_entry *entry = NULL; + struct rb_node *node; - n = root->rb_node; - while (n) { - entry = rb_entry(n, struct root_entry, node); - if (entry->root_objectid < objectid) - n = n->rb_right; - else if (entry->root_objectid > objectid) - n = n->rb_left; - else - return entry; - } - return NULL; + node = rb_find(&objectid, root, root_entry_root_objectid_key_cmp); + return rb_entry_safe(node, struct root_entry, node); } #ifdef CONFIG_STACKTRACE @@ -668,7 +650,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info, * our sanity checks pass as they are no longer needed. */ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, - struct btrfs_ref *generic_ref) + const struct btrfs_ref *generic_ref) { struct ref_entry *ref = NULL, *exist; struct ref_action *ra = NULL; diff --git a/fs/btrfs/ref-verify.h b/fs/btrfs/ref-verify.h index 3511e1a5c96b..559bd25a2b7a 100644 --- a/fs/btrfs/ref-verify.h +++ b/fs/btrfs/ref-verify.h @@ -19,7 +19,7 @@ struct btrfs_ref; int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info); void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info); int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, - struct btrfs_ref *generic_ref); + const struct btrfs_ref *generic_ref); void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start, u64 len); @@ -39,7 +39,7 @@ static inline void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info) } static inline int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, - struct btrfs_ref *generic_ref) + const struct btrfs_ref *generic_ref) { return 0; } diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 62161beca559..ce25ab7f0e99 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -46,11 +46,9 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); - goto out; + return ret; } - ret = btrfs_end_transaction(trans); -out: - return ret; + return btrfs_end_transaction(trans); } static int copy_inline_to_page(struct btrfs_inode *inode, @@ -95,8 +93,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode, if (ret < 0) goto out_unlock; - btrfs_clear_extent_bits(&inode->io_tree, file_offset, range_end, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG); + btrfs_clear_extent_bit(&inode->io_tree, file_offset, range_end, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, NULL); ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); if (ret) goto out_unlock; @@ -270,11 +268,15 @@ copy_inline_extent: drop_args.end = aligned_end; drop_args.drop_cache = true; ret = btrfs_drop_extents(trans, root, inode, &drop_args); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } ret = btrfs_insert_empty_item(trans, root, path, new_key, size); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } write_extent_buffer(path->nodes[0], inline_data, btrfs_item_ptr_offset(path->nodes[0], @@ -283,6 +285,8 @@ copy_inline_extent: btrfs_update_inode_bytes(inode, datal, drop_args.bytes_found); btrfs_set_inode_full_sync(inode); ret = btrfs_inode_set_file_extent_range(inode, 0, aligned_end); + if (ret) + btrfs_abort_transaction(trans, ret); out: if (!ret && !trans) { /* @@ -297,10 +301,8 @@ out: trans = NULL; } } - if (ret && trans) { - btrfs_abort_transaction(trans, ret); + if (ret && trans) btrfs_end_transaction(trans); - } if (!ret) *trans_out = trans; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 02086191630d..e58151933844 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -90,10 +90,15 @@ * map address of tree root to tree */ struct mapping_node { - struct { - struct rb_node rb_node; - u64 bytenr; - }; /* Use rb_simle_node for search/insert */ + union { + /* Use rb_simple_node for search/insert */ + struct { + struct rb_node rb_node; + u64 bytenr; + }; + + struct rb_simple_node simple_node; + }; void *data; }; @@ -106,10 +111,15 @@ struct mapping_tree { * present a tree block to process */ struct tree_block { - struct { - struct rb_node rb_node; - u64 bytenr; - }; /* Use rb_simple_node for search/insert */ + union { + /* Use rb_simple_node for search/insert */ + struct { + struct rb_node rb_node; + u64 bytenr; + }; + + struct rb_simple_node simple_node; + }; u64 owner; struct btrfs_key key; u8 level; @@ -480,8 +490,7 @@ static int __add_reloc_root(struct btrfs_root *root) node->data = root; spin_lock(&rc->reloc_root_tree.lock); - rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) { btrfs_err(fs_info, @@ -564,8 +573,7 @@ static int __update_reloc_root(struct btrfs_root *root) spin_lock(&rc->reloc_root_tree.lock); node->bytenr = root->node->start; - rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); @@ -1516,7 +1524,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_root_level(root_item); - atomic_inc(&reloc_root->node->refs); + refcount_inc(&reloc_root->node->refs); path->nodes[level] = reloc_root->node; path->slots[level] = 0; } else { @@ -2617,7 +2625,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, * tree. */ if (block->owner && - (!is_fstree(block->owner) || + (!btrfs_is_fstree(block->owner) || block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) { ret = relocate_cowonly_block(trans, rc, block, path); if (ret) @@ -2658,66 +2666,24 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control u64 num_bytes; int nr; int ret = 0; - u64 i_size = i_size_read(&inode->vfs_inode); u64 prealloc_start = cluster->start - offset; u64 prealloc_end = cluster->end - offset; u64 cur_offset = prealloc_start; /* - * For subpage case, previous i_size may not be aligned to PAGE_SIZE. - * This means the range [i_size, PAGE_END + 1) is filled with zeros by - * btrfs_do_readpage() call of previously relocated file cluster. + * For blocksize < folio size case (either bs < page size or large folios), + * beyond i_size, all blocks are filled with zero. * - * If the current cluster starts in the above range, btrfs_do_readpage() + * If the current cluster covers the above range, btrfs_do_readpage() * will skip the read, and relocate_one_folio() will later writeback * the padding zeros as new data, causing data corruption. * - * Here we have to manually invalidate the range (i_size, PAGE_END + 1). + * Here we have to invalidate the cache covering our cluster. */ - if (!PAGE_ALIGNED(i_size)) { - struct address_space *mapping = inode->vfs_inode.i_mapping; - struct btrfs_fs_info *fs_info = inode->root->fs_info; - const u32 sectorsize = fs_info->sectorsize; - struct folio *folio; - - ASSERT(sectorsize < PAGE_SIZE); - ASSERT(IS_ALIGNED(i_size, sectorsize)); - - /* - * Subpage can't handle page with DIRTY but without UPTODATE - * bit as it can lead to the following deadlock: - * - * btrfs_read_folio() - * | Page already *locked* - * |- btrfs_lock_and_flush_ordered_range() - * |- btrfs_start_ordered_extent() - * |- extent_write_cache_pages() - * |- lock_page() - * We try to lock the page we already hold. - * - * Here we just writeback the whole data reloc inode, so that - * we will be ensured to have no dirty range in the page, and - * are safe to clear the uptodate bits. - * - * This shouldn't cause too much overhead, as we need to write - * the data back anyway. - */ - ret = filemap_write_and_wait(mapping); - if (ret < 0) - return ret; - - folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT); - /* - * If page is freed we don't need to do anything then, as we - * will re-read the whole page anyway. - */ - if (!IS_ERR(folio)) { - btrfs_subpage_clear_uptodate(fs_info, folio, i_size, - round_up(i_size, PAGE_SIZE) - i_size); - folio_unlock(folio); - folio_put(folio); - } - } + ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start, + prealloc_end); + if (ret < 0) + return ret; BUG_ON(cluster->start != cluster->boundary[0]); ret = btrfs_alloc_data_chunk_ondemand(inode, @@ -2806,13 +2772,15 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster, static int relocate_one_folio(struct reloc_control *rc, struct file_ra_state *ra, - int *cluster_nr, unsigned long index) + int *cluster_nr, u64 *file_offset_ret) { const struct file_extent_cluster *cluster = &rc->cluster; struct inode *inode = rc->data_inode; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + const u64 orig_file_offset = *file_offset_ret; u64 offset = BTRFS_I(inode)->reloc_block_group_start; - const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; + const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT; + const pgoff_t index = orig_file_offset >> PAGE_SHIFT; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); struct folio *folio; u64 folio_start; @@ -2845,8 +2813,6 @@ again: return PTR_ERR(folio); } - WARN_ON(folio_order(folio)); - if (folio_test_readahead(folio) && !use_rst) page_cache_async_readahead(inode->i_mapping, ra, NULL, folio, last_index + 1 - index); @@ -2875,7 +2841,7 @@ again: goto release_folio; folio_start = folio_pos(folio); - folio_end = folio_start + PAGE_SIZE - 1; + folio_end = folio_start + folio_size(folio) - 1; /* * Start from the cluster, as for subpage case, the cluster can start @@ -2923,7 +2889,8 @@ again: * EXTENT_BOUNDARY bit prevents current extent from being merged * with previous extent. */ - if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) { + if (in_range(cluster->boundary[*cluster_nr] - offset, + folio_start, folio_size(folio))) { u64 boundary_start = cluster->boundary[*cluster_nr] - offset; u64 boundary_end = boundary_start + @@ -2953,6 +2920,7 @@ again: btrfs_throttle(fs_info); if (btrfs_should_cancel_balance(fs_info)) ret = -ECANCELED; + *file_offset_ret = folio_end + 1; return ret; release_folio: @@ -2966,8 +2934,7 @@ static int relocate_file_extent_cluster(struct reloc_control *rc) struct inode *inode = rc->data_inode; const struct file_extent_cluster *cluster = &rc->cluster; u64 offset = BTRFS_I(inode)->reloc_block_group_start; - unsigned long index; - unsigned long last_index; + u64 cur_file_offset = cluster->start - offset; struct file_ra_state *ra; int cluster_nr = 0; int ret = 0; @@ -2989,10 +2956,11 @@ static int relocate_file_extent_cluster(struct reloc_control *rc) if (ret) goto out; - last_index = (cluster->end - offset) >> PAGE_SHIFT; - for (index = (cluster->start - offset) >> PAGE_SHIFT; - index <= last_index && !ret; index++) - ret = relocate_one_folio(rc, ra, &cluster_nr, index); + while (cur_file_offset < cluster->end - offset) { + ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset); + if (ret) + break; + } if (ret == 0) WARN_ON(cluster_nr != cluster->nr); out: @@ -3155,7 +3123,7 @@ static int add_tree_block(struct reloc_control *rc, block->key_ready = false; block->owner = owner; - rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); + rb_node = rb_simple_insert(blocks, &block->simple_node); if (rb_node) btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, -EEXIST); @@ -3643,7 +3611,7 @@ restart: } btrfs_release_path(path); - btrfs_clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); + btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL); if (trans) { btrfs_end_transaction_throttle(trans); @@ -3880,7 +3848,7 @@ static void free_reloc_control(struct reloc_control *rc) */ static void describe_relocation(struct btrfs_block_group *block_group) { - char buf[128] = {'\0'}; + char buf[128] = "NONE"; btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); @@ -3900,7 +3868,8 @@ static const char *stage_to_string(enum reloc_stage stage) /* * function to relocate all extents in a block group. */ -int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) +int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start, + bool verbose) { struct btrfs_block_group *bg; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); @@ -3992,7 +3961,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) goto out; } - describe_relocation(rc->block_group); + if (verbose) + describe_relocation(rc->block_group); btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_nocow_writers(rc->block_group); @@ -4036,8 +4006,10 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) if (rc->extents_found == 0) break; - btrfs_info(fs_info, "found %llu extents, stage: %s", - rc->extents_found, stage_to_string(finishes_stage)); + if (verbose) + btrfs_info(fs_info, "found %llu extents, stage: %s", + rc->extents_found, + stage_to_string(finishes_stage)); } WARN_ON(rc->block_group->pinned > 0); @@ -4339,7 +4311,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, } btrfs_backref_drop_node_buffer(node); - atomic_inc(&cow->refs); + refcount_inc(&cow->refs); node->eb = cow; node->new_bytenr = cow->start; diff --git a/fs/btrfs/relocation.h b/fs/btrfs/relocation.h index 788c86d8633a..5c36b3f84b57 100644 --- a/fs/btrfs/relocation.h +++ b/fs/btrfs/relocation.h @@ -12,7 +12,8 @@ struct btrfs_trans_handle; struct btrfs_ordered_extent; struct btrfs_pending_snapshot; -int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start); +int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start, + bool verbose); int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 7cd5e76a783c..6776e6ab8d10 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -556,7 +556,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu length %u links %u (path: %s)", swarn->errstr, swarn->logical, btrfs_dev_name(swarn->dev), @@ -570,7 +570,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, return 0; err: - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, btrfs_dev_name(swarn->dev), @@ -596,7 +596,7 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device * /* Super block error, no need to search extent tree. */ if (is_super) { - btrfs_warn_in_rcu(fs_info, "scrub: %s on device %s, physical %llu", + btrfs_warn(fs_info, "scrub: %s on device %s, physical %llu", errstr, btrfs_dev_name(dev), physical); return; } @@ -637,7 +637,7 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device * } if (ret > 0) break; - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "scrub: %s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, btrfs_dev_name(dev), swarn.physical, (ref_level ? "node" : "leaf"), @@ -1045,12 +1045,12 @@ skip: */ if (repaired) { if (dev) { - btrfs_err_rl_in_rcu(fs_info, + btrfs_err_rl(fs_info, "scrub: fixed up error at logical %llu on dev %s physical %llu", stripe->logical, btrfs_dev_name(dev), physical); } else { - btrfs_err_rl_in_rcu(fs_info, + btrfs_err_rl(fs_info, "scrub: fixed up error at logical %llu on mirror %u", stripe->logical, stripe->mirror_num); } @@ -1059,12 +1059,12 @@ skip: /* The remaining are all for unrepaired. */ if (dev) { - btrfs_err_rl_in_rcu(fs_info, + btrfs_err_rl(fs_info, "scrub: unable to fixup (regular) error at logical %llu on dev %s physical %llu", stripe->logical, btrfs_dev_name(dev), physical); } else { - btrfs_err_rl_in_rcu(fs_info, + btrfs_err_rl(fs_info, "scrub: unable to fixup (regular) error at logical %llu on mirror %u", stripe->logical, stripe->mirror_num); } @@ -1806,7 +1806,7 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe) struct btrfs_io_context *bioc = NULL; const u64 logical = stripe->logical + (i << fs_info->sectorsize_bits); - int err; + int ret; io_stripe.rst_search_commit_root = true; stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits; @@ -1814,11 +1814,11 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe) * For RST cases, we need to manually split the bbio to * follow the RST boundary. */ - err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, + ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, &stripe_len, &bioc, &io_stripe, &mirror); btrfs_put_bioc(bioc); - if (err < 0) { - if (err != -ENODATA) { + if (ret < 0) { + if (ret != -ENODATA) { /* * Earlier btrfs_get_raid_extent_offset() * returned -ENODATA, which means there's @@ -3057,7 +3057,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "scrub: devid %llu: filesystem on %s is not writable", devid, btrfs_dev_name(dev)); ret = -EROFS; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 2891ec4056c6..7664025a5af4 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4,6 +4,7 @@ */ #include <linux/bsearch.h> +#include <linux/falloc.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/sort.h> @@ -758,7 +759,7 @@ static int send_header(struct send_ctx *sctx) { struct btrfs_stream_header hdr; - strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); + strscpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); hdr.version = cpu_to_le32(sctx->proto); return write_buf(sctx->send_filp, &hdr, sizeof(hdr), &sctx->send_off); @@ -1804,7 +1805,7 @@ static int gen_unique_name(struct send_ctx *sctx, ino, gen, idx); ASSERT(len < sizeof(tmp)); tmp_name.name = tmp; - tmp_name.len = strlen(tmp); + tmp_name.len = len; di = btrfs_lookup_dir_item(NULL, sctx->send_root, path, BTRFS_FIRST_FREE_OBJECTID, @@ -1843,7 +1844,7 @@ static int gen_unique_name(struct send_ctx *sctx, break; } - ret = fs_path_add(dest, tmp, strlen(tmp)); + ret = fs_path_add(dest, tmp, len); out: btrfs_free_path(path); @@ -4628,7 +4629,6 @@ static int rbtree_ref_comp(const void *k, const struct rb_node *node) { const struct recorded_ref *data = k; const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node); - int result; if (data->dir > ref->dir) return 1; @@ -4642,12 +4642,7 @@ static int rbtree_ref_comp(const void *k, const struct rb_node *node) return 1; if (data->name_len < ref->name_len) return -1; - result = strcmp(data->name, ref->name); - if (result > 0) - return 1; - if (result < 0) - return -1; - return 0; + return strcmp(data->name, ref->name); } static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent) @@ -5411,6 +5406,30 @@ tlv_put_failure: return ret; } +static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len) +{ + struct fs_path *path; + int ret; + + path = get_cur_inode_path(sctx); + if (IS_ERR(path)) + return PTR_ERR(path); + + ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE); + if (ret < 0) + return ret; + + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); + TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode); + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); + TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); + + ret = send_cmd(sctx); + +tlv_put_failure: + return ret; +} + static int send_hole(struct send_ctx *sctx, u64 end) { struct fs_path *p = NULL; @@ -5419,6 +5438,14 @@ static int send_hole(struct send_ctx *sctx, u64 end) int ret = 0; /* + * Starting with send stream v2 we have fallocate and can use it to + * punch holes instead of sending writes full of zeroes. + */ + if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE)) + return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + offset, end - offset); + + /* * A hole that starts at EOF or beyond it. Since we do not yet support * fallocate (for extent preallocation and hole punching), sending a * write of zeroes starting at EOF or beyond would later require issuing diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index d9087aa81b21..0481c693ac2e 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -615,7 +615,7 @@ static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, - int dump_block_groups) + bool dump_block_groups) { struct btrfs_block_group *cache; u64 total_avail = 0; @@ -1887,7 +1887,7 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, space_info->flags, orig_bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0); + btrfs_dump_space_info(fs_info, space_info, orig_bytes, false); } return ret; } @@ -1918,7 +1918,7 @@ int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes, trace_btrfs_space_reservation(fs_info, "space_info:enospc", space_info->flags, bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_dump_space_info(fs_info, space_info, bytes, 0); + btrfs_dump_space_info(fs_info, space_info, bytes, false); } return ret; } @@ -1973,13 +1973,13 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) static u64 calc_pct_ratio(u64 x, u64 y) { - int err; + int ret; if (!y) return 0; again: - err = check_mul_overflow(100, x, &x); - if (err) + ret = check_mul_overflow(100, x, &x); + if (ret) goto lose_precision; return div64_u64(x, y); lose_precision: @@ -2139,7 +2139,7 @@ void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool } } -bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) +static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) { bool ret; diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 92b7f5e2b850..679f22efb407 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -278,7 +278,7 @@ u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, - int dump_block_groups); + bool dump_block_groups); int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, @@ -306,7 +306,6 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes); void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); -bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info); void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info); void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len); diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index d4f019233493..c9b3821957f7 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -49,7 +49,7 @@ * Implementation: * * - Common - * Both metadata and data will use a new structure, btrfs_subpage, to + * Both metadata and data will use a new structure, btrfs_folio_state, to * record the status of each sector inside a page. This provides the extra * granularity needed. * @@ -63,10 +63,10 @@ * This means a slightly higher tree locking latency. */ -int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, - struct folio *folio, enum btrfs_subpage_type type) +int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info, + struct folio *folio, enum btrfs_folio_type type) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; /* For metadata we don't support large folio yet. */ if (type == BTRFS_SUBPAGE_METADATA) @@ -87,18 +87,18 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio)) return 0; - subpage = btrfs_alloc_subpage(fs_info, folio_size(folio), type); - if (IS_ERR(subpage)) - return PTR_ERR(subpage); + bfs = btrfs_alloc_folio_state(fs_info, folio_size(folio), type); + if (IS_ERR(bfs)) + return PTR_ERR(bfs); - folio_attach_private(folio, subpage); + folio_attach_private(folio, bfs); return 0; } -void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio, - enum btrfs_subpage_type type) +void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio, + enum btrfs_folio_type type) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; /* Either not subpage, or the folio already has private attached. */ if (!folio_test_private(folio)) @@ -108,15 +108,15 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *fol if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio)) return; - subpage = folio_detach_private(folio); - ASSERT(subpage); - btrfs_free_subpage(subpage); + bfs = folio_detach_private(folio); + ASSERT(bfs); + btrfs_free_folio_state(bfs); } -struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, - size_t fsize, enum btrfs_subpage_type type) +struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info, + size_t fsize, enum btrfs_folio_type type) { - struct btrfs_subpage *ret; + struct btrfs_folio_state *ret; unsigned int real_size; ASSERT(fs_info->sectorsize < fsize); @@ -136,11 +136,6 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, return ret; } -void btrfs_free_subpage(struct btrfs_subpage *subpage) -{ - kfree(subpage); -} - /* * Increase the eb_refs of current subpage. * @@ -152,7 +147,7 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage) */ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; if (!btrfs_meta_is_subpage(fs_info)) return; @@ -160,13 +155,13 @@ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio * ASSERT(folio_test_private(folio) && folio->mapping); lockdep_assert_held(&folio->mapping->i_private_lock); - subpage = folio_get_private(folio); - atomic_inc(&subpage->eb_refs); + bfs = folio_get_private(folio); + atomic_inc(&bfs->eb_refs); } void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; if (!btrfs_meta_is_subpage(fs_info)) return; @@ -174,9 +169,9 @@ void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio * ASSERT(folio_test_private(folio) && folio->mapping); lockdep_assert_held(&folio->mapping->i_private_lock); - subpage = folio_get_private(folio); - ASSERT(atomic_read(&subpage->eb_refs)); - atomic_dec(&subpage->eb_refs); + bfs = folio_get_private(folio); + ASSERT(atomic_read(&bfs->eb_refs)); + atomic_dec(&bfs->eb_refs); } static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, @@ -191,8 +186,9 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, * unmapped page like dummy extent buffer pages. */ if (folio->mapping) - ASSERT(folio_pos(folio) <= start && - start + len <= folio_pos(folio) + folio_size(folio)); + ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio), + "start=%llu len=%u folio_pos=%llu folio_size=%zu", + start, len, folio_pos(folio), folio_size(folio)); } #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ @@ -221,14 +217,13 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) if (folio_pos(folio) >= orig_start + orig_len) *len = 0; else - *len = min_t(u64, folio_pos(folio) + folio_size(folio), - orig_start + orig_len) - *start; + *len = min_t(u64, folio_end(folio), orig_start + orig_len) - *start; } static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); const int nbits = (len >> fs_info->sectorsize_bits); unsigned long flags; @@ -238,7 +233,7 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, btrfs_subpage_assert(fs_info, folio, start, len); - spin_lock_irqsave(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); /* * We have call sites passing @lock_page into * extent_clear_unlock_delalloc() for compression path. @@ -246,18 +241,18 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, * This @locked_page is locked by plain lock_page(), thus its * subpage::locked is 0. Handle them in a special way. */ - if (atomic_read(&subpage->nr_locked) == 0) { - spin_unlock_irqrestore(&subpage->lock, flags); + if (atomic_read(&bfs->nr_locked) == 0) { + spin_unlock_irqrestore(&bfs->lock, flags); return true; } - for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) { - clear_bit(bit, subpage->bitmaps); + for_each_set_bit_from(bit, bfs->bitmaps, start_bit + nbits) { + clear_bit(bit, bfs->bitmaps); cleared++; } - ASSERT(atomic_read(&subpage->nr_locked) >= cleared); - last = atomic_sub_and_test(cleared, &subpage->nr_locked); - spin_unlock_irqrestore(&subpage->lock, flags); + ASSERT(atomic_read(&bfs->nr_locked) >= cleared); + last = atomic_sub_and_test(cleared, &bfs->nr_locked); + spin_unlock_irqrestore(&bfs->lock, flags); return last; } @@ -280,7 +275,7 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); ASSERT(folio_test_locked(folio)); @@ -296,7 +291,7 @@ void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info, * Since we own the page lock, no one else could touch subpage::locked * and we are safe to do several atomic operations without spinlock. */ - if (atomic_read(&subpage->nr_locked) == 0) { + if (atomic_read(&bfs->nr_locked) == 0) { /* No subpage lock, locked by plain lock_page(). */ folio_unlock(folio); return; @@ -310,7 +305,7 @@ void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info, void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info, struct folio *folio, unsigned long bitmap) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked; unsigned long flags; @@ -323,42 +318,42 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info, return; } - if (atomic_read(&subpage->nr_locked) == 0) { + if (atomic_read(&bfs->nr_locked) == 0) { /* No subpage lock, locked by plain lock_page(). */ folio_unlock(folio); return; } - spin_lock_irqsave(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); for_each_set_bit(bit, &bitmap, blocks_per_folio) { - if (test_and_clear_bit(bit + start_bit, subpage->bitmaps)) + if (test_and_clear_bit(bit + start_bit, bfs->bitmaps)) cleared++; } - ASSERT(atomic_read(&subpage->nr_locked) >= cleared); - last = atomic_sub_and_test(cleared, &subpage->nr_locked); - spin_unlock_irqrestore(&subpage->lock, flags); + ASSERT(atomic_read(&bfs->nr_locked) >= cleared); + last = atomic_sub_and_test(cleared, &bfs->nr_locked); + spin_unlock_irqrestore(&bfs->lock, flags); if (last) folio_unlock(folio); } #define subpage_test_bitmap_all_set(fs_info, folio, name) \ ({ \ - struct btrfs_subpage *subpage = folio_get_private(folio); \ + struct btrfs_folio_state *bfs = folio_get_private(folio); \ const unsigned int blocks_per_folio = \ btrfs_blocks_per_folio(fs_info, folio); \ \ - bitmap_test_range_all_set(subpage->bitmaps, \ + bitmap_test_range_all_set(bfs->bitmaps, \ blocks_per_folio * btrfs_bitmap_nr_##name, \ blocks_per_folio); \ }) #define subpage_test_bitmap_all_zero(fs_info, folio, name) \ ({ \ - struct btrfs_subpage *subpage = folio_get_private(folio); \ + struct btrfs_folio_state *bfs = folio_get_private(folio); \ const unsigned int blocks_per_folio = \ btrfs_blocks_per_folio(fs_info, folio); \ \ - bitmap_test_range_all_zero(subpage->bitmaps, \ + bitmap_test_range_all_zero(bfs->bitmaps, \ blocks_per_folio * btrfs_bitmap_nr_##name, \ blocks_per_folio); \ }) @@ -366,43 +361,43 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info, void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, uptodate, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_set(fs_info, folio, uptodate)) folio_mark_uptodate(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, uptodate, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_clear_uptodate(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_unlock_irqrestore(&bfs->lock, flags); folio_mark_dirty(folio); } @@ -419,17 +414,17 @@ void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); unsigned long flags; bool last = false; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, folio, dirty)) last = true; - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); return last; } @@ -446,91 +441,91 @@ void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, writeback, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (!folio_test_writeback(folio)) folio_start_writeback(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, writeback, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) { ASSERT(folio_test_writeback(folio)); folio_end_writeback(folio); } - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, ordered, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_set_ordered(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, ordered, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, folio, ordered)) folio_clear_ordered(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, checked, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_set(fs_info, folio, checked)) folio_set_checked(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage = folio_get_private(folio); + struct btrfs_folio_state *bfs = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, checked, start, len); unsigned long flags; - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); + spin_lock_irqsave(&bfs->lock, flags); + bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_clear_checked(folio); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } /* @@ -541,16 +536,16 @@ void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ - struct btrfs_subpage *subpage = folio_get_private(folio); \ + struct btrfs_folio_state *bfs = folio_get_private(folio); \ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \ name, start, len); \ unsigned long flags; \ bool ret; \ \ - spin_lock_irqsave(&subpage->lock, flags); \ - ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ + spin_lock_irqsave(&bfs->lock, flags); \ + ret = bitmap_test_range_all_set(bfs->bitmaps, start_bit, \ len >> fs_info->sectorsize_bits); \ - spin_unlock_irqrestore(&subpage->lock, flags); \ + spin_unlock_irqrestore(&bfs->lock, flags); \ return ret; \ } IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); @@ -662,10 +657,10 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, { \ const unsigned int blocks_per_folio = \ btrfs_blocks_per_folio(fs_info, folio); \ - const struct btrfs_subpage *subpage = folio_get_private(folio); \ + const struct btrfs_folio_state *bfs = folio_get_private(folio); \ \ ASSERT(blocks_per_folio <= BITS_PER_LONG); \ - *dst = bitmap_read(subpage->bitmaps, \ + *dst = bitmap_read(bfs->bitmaps, \ blocks_per_folio * btrfs_bitmap_nr_##name, \ blocks_per_folio); \ } @@ -690,7 +685,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; unsigned int start_bit; unsigned int nbits; unsigned long flags; @@ -705,15 +700,15 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); nbits = len >> fs_info->sectorsize_bits; - subpage = folio_get_private(folio); - ASSERT(subpage); - spin_lock_irqsave(&subpage->lock, flags); - if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) { + bfs = folio_get_private(folio); + ASSERT(bfs); + spin_lock_irqsave(&bfs->lock, flags); + if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) { SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len); - ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); + ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits)); } - ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); - spin_unlock_irqrestore(&subpage->lock, flags); + ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits)); + spin_unlock_irqrestore(&bfs->lock, flags); } /* @@ -726,7 +721,7 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; unsigned long flags; unsigned int start_bit; unsigned int nbits; @@ -736,19 +731,19 @@ void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info, if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) return; - subpage = folio_get_private(folio); + bfs = folio_get_private(folio); start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); nbits = len >> fs_info->sectorsize_bits; - spin_lock_irqsave(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); /* Target range should not yet be locked. */ - if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) { + if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) { SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len); - ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); + ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits)); } - bitmap_set(subpage->bitmaps, start_bit, nbits); - ret = atomic_add_return(nbits, &subpage->nr_locked); + bitmap_set(bfs->bitmaps, start_bit, nbits); + ret = atomic_add_return(nbits, &bfs->nr_locked); ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio)); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } /* @@ -776,7 +771,7 @@ bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct ext void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); unsigned long uptodate_bitmap; unsigned long dirty_bitmap; @@ -788,18 +783,18 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(blocks_per_folio > 1); - subpage = folio_get_private(folio); + bfs = folio_get_private(folio); - spin_lock_irqsave(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap); GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap); GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap); GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap); GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap); GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); - dump_page(folio_page(folio, 0), "btrfs subpage dump"); + dump_page(folio_page(folio, 0), "btrfs folio state dump"); btrfs_warn(fs_info, "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", start, len, folio_pos(folio), @@ -815,14 +810,14 @@ void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info, struct folio *folio, unsigned long *ret_bitmap) { - struct btrfs_subpage *subpage; + struct btrfs_folio_state *bfs; unsigned long flags; ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1); - subpage = folio_get_private(folio); + bfs = folio_get_private(folio); - spin_lock_irqsave(&subpage->lock, flags); + spin_lock_irqsave(&bfs->lock, flags); GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap); - spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock_irqrestore(&bfs->lock, flags); } diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index 3042c5ea840a..ee0710eb13fd 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -32,9 +32,31 @@ struct folio; enum { btrfs_bitmap_nr_uptodate = 0, btrfs_bitmap_nr_dirty, + + /* + * This can be changed to atomic eventually. But this change will rely + * on the async delalloc range rework for locked bitmap. As async + * delalloc can unlock its range and mark blocks writeback at random + * timing. + */ btrfs_bitmap_nr_writeback, + + /* + * The ordered and checked flags are for COW fixup, already marked + * deprecated, and will be removed eventually. + */ btrfs_bitmap_nr_ordered, btrfs_bitmap_nr_checked, + + /* + * The locked bit is for async delalloc range (compression), currently + * async extent is queued with the range locked, until the compression + * is done. + * So an async extent can unlock the range at any random timing. + * + * This will need a rework on the async extent lifespan (mark writeback + * and do compression) before deprecating this flag. + */ btrfs_bitmap_nr_locked, btrfs_bitmap_nr_max }; @@ -43,7 +65,7 @@ enum { * Structure to trace status of each sector inside a page, attached to * page::private for both data and metadata inodes. */ -struct btrfs_subpage { +struct btrfs_folio_state { /* Common members for both data and metadata pages */ spinlock_t lock; union { @@ -51,7 +73,7 @@ struct btrfs_subpage { * Structures only used by metadata * * @eb_refs should only be operated under private_lock, as it - * manages whether the subpage can be detached. + * manages whether the btrfs_folio_state can be detached. */ atomic_t eb_refs; @@ -65,12 +87,11 @@ struct btrfs_subpage { unsigned long bitmaps[]; }; -enum btrfs_subpage_type { +enum btrfs_folio_type { BTRFS_SUBPAGE_METADATA, BTRFS_SUBPAGE_DATA, }; -#if PAGE_SIZE > BTRFS_MIN_BLOCKSIZE /* * Subpage support for metadata is more complex, as we can have dummy extent * buffers, where folios have no mapping to determine the owning inode. @@ -91,29 +112,19 @@ static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, ASSERT(is_data_inode(BTRFS_I(folio->mapping->host))); return fs_info->sectorsize < folio_size(folio); } -#else -static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info) -{ - return false; -} -static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, - struct folio *folio) -{ - if (folio->mapping && folio->mapping->host) - ASSERT(is_data_inode(BTRFS_I(folio->mapping->host))); - return false; -} -#endif -int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, - struct folio *folio, enum btrfs_subpage_type type); -void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio, - enum btrfs_subpage_type type); +int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info, + struct folio *folio, enum btrfs_folio_type type); +void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio, + enum btrfs_folio_type type); /* Allocate additional data where page represents more than one sector */ -struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, - size_t fsize, enum btrfs_subpage_type type); -void btrfs_free_subpage(struct btrfs_subpage *subpage); +struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info, + size_t fsize, enum btrfs_folio_type type); +static inline void btrfs_free_folio_state(struct btrfs_folio_state *bfs) +{ + kfree(bfs); +} void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a0c65adce1ab..68e35a3700ff 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -261,10 +261,65 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = { {} }; -/* No support for restricting writes to btrfs devices yet... */ -static inline blk_mode_t btrfs_open_mode(struct fs_context *fc) +static bool btrfs_match_compress_type(const char *string, const char *type, bool may_have_level) { - return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES; + const int len = strlen(type); + + return (strncmp(string, type, len) == 0) && + ((may_have_level && string[len] == ':') || string[len] == '\0'); +} + +static int btrfs_parse_compress(struct btrfs_fs_context *ctx, + const struct fs_parameter *param, int opt) +{ + const char *string = param->string; + + /* + * Provide the same semantics as older kernels that don't use fs + * context, specifying the "compress" option clears "force-compress" + * without the need to pass "compress-force=[no|none]" before + * specifying "compress". + */ + if (opt != Opt_compress_force && opt != Opt_compress_force_type) + btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); + + if (opt == Opt_compress || opt == Opt_compress_force) { + ctx->compress_type = BTRFS_COMPRESS_ZLIB; + ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL; + btrfs_set_opt(ctx->mount_opt, COMPRESS); + btrfs_clear_opt(ctx->mount_opt, NODATACOW); + btrfs_clear_opt(ctx->mount_opt, NODATASUM); + } else if (btrfs_match_compress_type(string, "zlib", true)) { + ctx->compress_type = BTRFS_COMPRESS_ZLIB; + ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, + string + 4); + btrfs_set_opt(ctx->mount_opt, COMPRESS); + btrfs_clear_opt(ctx->mount_opt, NODATACOW); + btrfs_clear_opt(ctx->mount_opt, NODATASUM); + } else if (btrfs_match_compress_type(string, "lzo", false)) { + ctx->compress_type = BTRFS_COMPRESS_LZO; + ctx->compress_level = 0; + btrfs_set_opt(ctx->mount_opt, COMPRESS); + btrfs_clear_opt(ctx->mount_opt, NODATACOW); + btrfs_clear_opt(ctx->mount_opt, NODATASUM); + } else if (btrfs_match_compress_type(string, "zstd", true)) { + ctx->compress_type = BTRFS_COMPRESS_ZSTD; + ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, + string + 4); + btrfs_set_opt(ctx->mount_opt, COMPRESS); + btrfs_clear_opt(ctx->mount_opt, NODATACOW); + btrfs_clear_opt(ctx->mount_opt, NODATASUM); + } else if (btrfs_match_compress_type(string, "no", false) || + btrfs_match_compress_type(string, "none", false)) { + ctx->compress_level = 0; + ctx->compress_type = 0; + btrfs_clear_opt(ctx->mount_opt, COMPRESS); + btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); + } else { + btrfs_err(NULL, "unrecognized compression value %s", string); + return -EINVAL; + } + return 0; } static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) @@ -303,10 +358,9 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) break; case Opt_device: { struct btrfs_device *device; - blk_mode_t mode = btrfs_open_mode(fc); mutex_lock(&uuid_mutex); - device = btrfs_scan_one_device(param->string, mode, false); + device = btrfs_scan_one_device(param->string, false); mutex_unlock(&uuid_mutex); if (IS_ERR(device)) return PTR_ERR(device); @@ -336,53 +390,8 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) fallthrough; case Opt_compress: case Opt_compress_type: - /* - * Provide the same semantics as older kernels that don't use fs - * context, specifying the "compress" option clears - * "force-compress" without the need to pass - * "compress-force=[no|none]" before specifying "compress". - */ - if (opt != Opt_compress_force && opt != Opt_compress_force_type) - btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); - - if (opt == Opt_compress || opt == Opt_compress_force) { - ctx->compress_type = BTRFS_COMPRESS_ZLIB; - ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL; - btrfs_set_opt(ctx->mount_opt, COMPRESS); - btrfs_clear_opt(ctx->mount_opt, NODATACOW); - btrfs_clear_opt(ctx->mount_opt, NODATASUM); - } else if (strncmp(param->string, "zlib", 4) == 0) { - ctx->compress_type = BTRFS_COMPRESS_ZLIB; - ctx->compress_level = - btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, - param->string + 4); - btrfs_set_opt(ctx->mount_opt, COMPRESS); - btrfs_clear_opt(ctx->mount_opt, NODATACOW); - btrfs_clear_opt(ctx->mount_opt, NODATASUM); - } else if (strncmp(param->string, "lzo", 3) == 0) { - ctx->compress_type = BTRFS_COMPRESS_LZO; - ctx->compress_level = 0; - btrfs_set_opt(ctx->mount_opt, COMPRESS); - btrfs_clear_opt(ctx->mount_opt, NODATACOW); - btrfs_clear_opt(ctx->mount_opt, NODATASUM); - } else if (strncmp(param->string, "zstd", 4) == 0) { - ctx->compress_type = BTRFS_COMPRESS_ZSTD; - ctx->compress_level = - btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, - param->string + 4); - btrfs_set_opt(ctx->mount_opt, COMPRESS); - btrfs_clear_opt(ctx->mount_opt, NODATACOW); - btrfs_clear_opt(ctx->mount_opt, NODATASUM); - } else if (strncmp(param->string, "no", 2) == 0) { - ctx->compress_level = 0; - ctx->compress_type = 0; - btrfs_clear_opt(ctx->mount_opt, COMPRESS); - btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); - } else { - btrfs_err(NULL, "unrecognized compression value %s", - param->string); + if (btrfs_parse_compress(ctx, param, opt)) return -EINVAL; - } break; case Opt_ssd: if (result.negated) { @@ -945,12 +954,12 @@ static int btrfs_fill_super(struct super_block *sb, { struct btrfs_inode *inode; struct btrfs_fs_info *fs_info = btrfs_sb(sb); - int err; + int ret; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = BTRFS_SUPER_MAGIC; sb->s_op = &btrfs_super_ops; - sb->s_d_op = &btrfs_dentry_operations; + set_default_d_op(sb, &btrfs_dentry_operations); sb->s_export_op = &btrfs_export_ops; #ifdef CONFIG_FS_VERITY sb->s_vop = &btrfs_verityops; @@ -959,28 +968,28 @@ static int btrfs_fill_super(struct super_block *sb, sb->s_time_gran = 1; sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; - err = super_setup_bdi(sb); - if (err) { + ret = super_setup_bdi(sb); + if (ret) { btrfs_err(fs_info, "super_setup_bdi failed"); - return err; + return ret; } - err = open_ctree(sb, fs_devices); - if (err) { - btrfs_err(fs_info, "open_ctree failed: %d", err); - return err; + ret = open_ctree(sb, fs_devices); + if (ret) { + btrfs_err(fs_info, "open_ctree failed: %d", ret); + return ret; } inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); if (IS_ERR(inode)) { - err = PTR_ERR(inode); - btrfs_handle_fs_error(fs_info, err, NULL); + ret = PTR_ERR(inode); + btrfs_handle_fs_error(fs_info, ret, NULL); goto fail_close; } sb->s_root = d_make_root(&inode->vfs_inode); if (!sb->s_root) { - err = -ENOMEM; + ret = -ENOMEM; goto fail_close; } @@ -989,7 +998,7 @@ static int btrfs_fill_super(struct super_block *sb, fail_close: close_ctree(fs_info); - return err; + return ret; } int btrfs_sync_fs(struct super_block *sb, int wait) @@ -1826,10 +1835,9 @@ static int btrfs_get_tree_super(struct fs_context *fc) struct btrfs_fs_info *fs_info = fc->s_fs_info; struct btrfs_fs_context *ctx = fc->fs_private; struct btrfs_fs_devices *fs_devices = NULL; - struct block_device *bdev; struct btrfs_device *device; struct super_block *sb; - blk_mode_t mode = btrfs_open_mode(fc); + blk_mode_t mode = sb_open_mode(fc->sb_flags); int ret; btrfs_ctx_to_info(fs_info, ctx); @@ -1839,47 +1847,60 @@ static int btrfs_get_tree_super(struct fs_context *fc) * With 'true' passed to btrfs_scan_one_device() (mount time) we expect * either a valid device or an error. */ - device = btrfs_scan_one_device(fc->source, mode, true); + device = btrfs_scan_one_device(fc->source, true); ASSERT(device != NULL); if (IS_ERR(device)) { mutex_unlock(&uuid_mutex); return PTR_ERR(device); } - fs_devices = device->fs_devices; + /* + * We cannot hold uuid_mutex calling sget_fc(), it will lead to a + * locking order reversal with s_umount. + * + * So here we increase the holding number of fs_devices, this will ensure + * the fs_devices itself won't be freed. + */ + btrfs_fs_devices_inc_holding(fs_devices); fs_info->fs_devices = fs_devices; - - ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type); mutex_unlock(&uuid_mutex); - if (ret) - return ret; - - if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { - ret = -EACCES; - goto error; - } - bdev = fs_devices->latest_dev->bdev; - /* - * From now on the error handling is not straightforward. - * - * If successful, this will transfer the fs_info into the super block, - * and fc->s_fs_info will be NULL. However if there's an existing - * super, we'll still have fc->s_fs_info populated. If we error - * completely out it'll be cleaned up when we drop the fs_context, - * otherwise it's tied to the lifetime of the super_block. - */ sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc); if (IS_ERR(sb)) { - ret = PTR_ERR(sb); - goto error; + mutex_lock(&uuid_mutex); + btrfs_fs_devices_dec_holding(fs_devices); + /* + * Since the fs_devices is not opened, it can be freed at any + * time after unlocking uuid_mutex. We need to avoid double + * free through put_fs_context()->btrfs_free_fs_info(). + * So here we reset fs_info->fs_devices to NULL, and let the + * regular fs_devices reclaim path to handle it. + * + * This applies to all later branches where no fs_devices is + * opened. + */ + fs_info->fs_devices = NULL; + mutex_unlock(&uuid_mutex); + return PTR_ERR(sb); } set_device_specific_options(fs_info); if (sb->s_root) { - btrfs_close_devices(fs_devices); + /* + * Not the first mount of the fs thus got an existing super block. + * Will reuse the returned super block, fs_info and fs_devices. + * + * fc->s_fs_info is not touched and will be later freed by + * put_fs_context() through btrfs_free_fs_context(). + */ + ASSERT(fc->s_fs_info == fs_info); + + mutex_lock(&uuid_mutex); + btrfs_fs_devices_dec_holding(fs_devices); + fs_info->fs_devices = NULL; + mutex_unlock(&uuid_mutex); /* * At this stage we may have RO flag mismatch between * fc->sb_flags and sb->s_flags. Caller should detect such @@ -1887,9 +1908,32 @@ static int btrfs_get_tree_super(struct fs_context *fc) * needed. */ } else { + struct block_device *bdev; + + /* + * The first mount of the fs thus a new superblock, fc->s_fs_info + * must be NULL, and the ownership of our fs_info and fs_devices is + * transferred to the super block. + */ + ASSERT(fc->s_fs_info == NULL); + + mutex_lock(&uuid_mutex); + btrfs_fs_devices_dec_holding(fs_devices); + ret = btrfs_open_devices(fs_devices, mode, sb); + if (ret < 0) + fs_info->fs_devices = NULL; + mutex_unlock(&uuid_mutex); + if (ret < 0) { + deactivate_locked_super(sb); + return ret; + } + if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { + deactivate_locked_super(sb); + return -EACCES; + } + bdev = fs_devices->latest_dev->bdev; snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id); - btrfs_sb(sb)->bdev_holder = &btrfs_fs_type; ret = btrfs_fill_super(sb, fs_devices); if (ret) { deactivate_locked_super(sb); @@ -1901,10 +1945,6 @@ static int btrfs_get_tree_super(struct fs_context *fc) fc->root = dget(sb->s_root); return 0; - -error: - btrfs_close_devices(fs_devices); - return ret; } /* @@ -1980,17 +2020,13 @@ error: * btrfs or not, setting the whole super block RO. To make per-subvolume mounting * work with different options work we need to keep backward compatibility. */ -static int btrfs_reconfigure_for_mount(struct fs_context *fc, struct vfsmount *mnt) +static int btrfs_reconfigure_for_mount(struct fs_context *fc) { int ret = 0; - if (fc->sb_flags & SB_RDONLY) - return ret; - - down_write(&mnt->mnt_sb->s_umount); - if (!(fc->sb_flags & SB_RDONLY) && (mnt->mnt_sb->s_flags & SB_RDONLY)) + if (!(fc->sb_flags & SB_RDONLY) && (fc->root->d_sb->s_flags & SB_RDONLY)) ret = btrfs_reconfigure(fc); - up_write(&mnt->mnt_sb->s_umount); + return ret; } @@ -2035,25 +2071,18 @@ static int btrfs_get_tree_subvol(struct fs_context *fc) */ dup_fc->s_fs_info = fs_info; - /* - * We'll do the security settings in our btrfs_get_tree_super() mount - * loop, they were duplicated into dup_fc, we can drop the originals - * here. - */ - security_free_mnt_opts(&fc->security); - fc->security = NULL; + ret = btrfs_get_tree_super(dup_fc); + if (ret) + goto error; - mnt = fc_mount(dup_fc); - if (IS_ERR(mnt)) { - put_fs_context(dup_fc); - return PTR_ERR(mnt); - } - ret = btrfs_reconfigure_for_mount(dup_fc, mnt); + ret = btrfs_reconfigure_for_mount(dup_fc); + up_write(&dup_fc->root->d_sb->s_umount); + if (ret) + goto error; + mnt = vfs_create_mount(dup_fc); put_fs_context(dup_fc); - if (ret) { - mntput(mnt); - return ret; - } + if (IS_ERR(mnt)) + return PTR_ERR(mnt); /* * This free's ->subvol_name, because if it isn't set we have to @@ -2067,25 +2096,15 @@ static int btrfs_get_tree_subvol(struct fs_context *fc) fc->root = dentry; return 0; +error: + put_fs_context(dup_fc); + return ret; } static int btrfs_get_tree(struct fs_context *fc) { - /* - * Since we use mount_subtree to mount the default/specified subvol, we - * have to do mounts in two steps. - * - * First pass through we call btrfs_get_tree_subvol(), this is just a - * wrapper around fc_mount() to call back into here again, and this time - * we'll call btrfs_get_tree_super(). This will do the open_ctree() and - * everything to open the devices and file system. Then we return back - * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and - * from there we can do our mount_subvol() call, which will lookup - * whichever subvol we're mounting and setup this fc with the - * appropriate dentry for the subvol. - */ - if (fc->s_fs_info) - return btrfs_get_tree_super(fc); + ASSERT(fc->s_fs_info == NULL); + return btrfs_get_tree_subvol(fc); } @@ -2217,7 +2236,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, * Scanning outside of mount can return NULL which would turn * into 0 error code. */ - device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); + device = btrfs_scan_one_device(vol->name, false); ret = PTR_ERR_OR_ZERO(device); mutex_unlock(&uuid_mutex); break; @@ -2235,7 +2254,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, * Scanning outside of mount can return NULL which would turn * into 0 error code. */ - device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); + device = btrfs_scan_one_device(vol->name, false); if (IS_ERR_OR_NULL(device)) { mutex_unlock(&uuid_mutex); if (IS_ERR(device)) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 5d93d9dd2c12..9d398f7a36ad 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -160,8 +160,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa) clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR; break; default: - pr_warn("btrfs: sysfs: unknown feature set %d\n", - fa->feature_set); + btrfs_warn(NULL, "sysfs: unknown feature set %d", fa->feature_set); return 0; } @@ -1138,13 +1137,21 @@ static ssize_t btrfs_commit_stats_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); + u64 now = ktime_get_ns(); + u64 start_time = fs_info->commit_stats.critical_section_start_time; + u64 pending = 0; + + if (start_time) + pending = now - start_time; return sysfs_emit(buf, "commits %llu\n" + "cur_commit_ms %llu\n" "last_commit_ms %llu\n" "max_commit_ms %llu\n" "total_commit_ms %llu\n", fs_info->commit_stats.commit_count, + div_u64(pending, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.last_commit_dur, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.max_commit_dur, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.total_commit_dur, NSEC_PER_MSEC)); @@ -1202,7 +1209,7 @@ static ssize_t quota_override_store(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); unsigned long knob; - int err; + int ret; if (!fs_info) return -EPERM; @@ -1210,9 +1217,9 @@ static ssize_t quota_override_store(struct kobject *kobj, if (!capable(CAP_SYS_RESOURCE)) return -EPERM; - err = kstrtoul(buf, 10, &knob); - if (err) - return err; + ret = kstrtoul(buf, 10, &knob); + if (ret) + return ret; if (knob > 1) return -EINVAL; @@ -2239,7 +2246,7 @@ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); if (ret) - pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", + btrfs_warn(NULL, "sending event %d to kobject: '%s' (%p): failed", action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), &disk_to_dev(bdev->bd_disk)->kobj); } @@ -2282,15 +2289,15 @@ static struct kset *btrfs_kset; */ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs) { - int error; + int ret; init_completion(&fs_devs->kobj_unregister); fs_devs->fsid_kobj.kset = btrfs_kset; - error = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL, - "%pU", fs_devs->fsid); - if (error) { + ret = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL, + "%pU", fs_devs->fsid); + if (ret) { kobject_put(&fs_devs->fsid_kobj); - return error; + return ret; } fs_devs->devices_kobj = kobject_create_and_add("devices", @@ -2316,71 +2323,70 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs) int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) { - int error; + int ret; struct btrfs_fs_devices *fs_devs = fs_info->fs_devices; struct kobject *fsid_kobj = &fs_devs->fsid_kobj; - error = btrfs_sysfs_add_fs_devices(fs_devs); - if (error) - return error; + ret = btrfs_sysfs_add_fs_devices(fs_devs); + if (ret) + return ret; - error = sysfs_create_files(fsid_kobj, btrfs_attrs); - if (error) { + ret = sysfs_create_files(fsid_kobj, btrfs_attrs); + if (ret) { btrfs_sysfs_remove_fs_devices(fs_devs); - return error; + return ret; } - error = sysfs_create_group(fsid_kobj, - &btrfs_feature_attr_group); - if (error) + ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group); + if (ret) goto failure; #ifdef CONFIG_BTRFS_DEBUG fs_info->debug_kobj = kobject_create_and_add("debug", fsid_kobj); if (!fs_info->debug_kobj) { - error = -ENOMEM; + ret = -ENOMEM; goto failure; } - error = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); - if (error) + ret = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); + if (ret) goto failure; #endif /* Discard directory */ fs_info->discard_kobj = kobject_create_and_add("discard", fsid_kobj); if (!fs_info->discard_kobj) { - error = -ENOMEM; + ret = -ENOMEM; goto failure; } - error = sysfs_create_files(fs_info->discard_kobj, discard_attrs); - if (error) + ret = sysfs_create_files(fs_info->discard_kobj, discard_attrs); + if (ret) goto failure; - error = addrm_unknown_feature_attrs(fs_info, true); - if (error) + ret = addrm_unknown_feature_attrs(fs_info, true); + if (ret) goto failure; - error = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi"); - if (error) + ret = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi"); + if (ret) goto failure; fs_info->space_info_kobj = kobject_create_and_add("allocation", fsid_kobj); if (!fs_info->space_info_kobj) { - error = -ENOMEM; + ret = -ENOMEM; goto failure; } - error = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs); - if (error) + ret = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs); + if (ret) goto failure; return 0; failure: btrfs_sysfs_remove_mounted(fs_info); - return error; + return ret; } static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj, diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 00da54f0164c..b19328d077d3 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -23,8 +23,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, { int ret; struct folio_batch fbatch; - unsigned long index = start >> PAGE_SHIFT; - unsigned long end_index = end >> PAGE_SHIFT; + pgoff_t index = start >> PAGE_SHIFT; + pgoff_t end_index = end >> PAGE_SHIFT; int i; int count = 0; int loops = 0; @@ -75,7 +75,8 @@ static void extent_flag_to_str(const struct extent_state *state, char *dest) dest[0] = 0; PRINT_ONE_FLAG(state, dest, cur, DIRTY); PRINT_ONE_FLAG(state, dest, cur, LOCKED); - PRINT_ONE_FLAG(state, dest, cur, NEW); + PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG1); + PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG2); PRINT_ONE_FLAG(state, dest, cur, DELALLOC); PRINT_ONE_FLAG(state, dest, cur, DEFRAG); PRINT_ONE_FLAG(state, dest, cur, BOUNDARY); @@ -113,7 +114,6 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) struct extent_io_tree *tmp; struct page *page; struct page *locked_page = NULL; - unsigned long index = 0; /* In this test we need at least 2 file extents at its maximum size */ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; u64 total_dirty = 2 * max_bytes; @@ -156,7 +156,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) * everything to make sure our pages don't get evicted and screw up our * test. */ - for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { + for (pgoff_t index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { test_err("failed to allocate test page"); @@ -326,7 +326,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) out_bits: if (ret) dump_extent_io_tree(tmp); - btrfs_clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1); + btrfs_clear_extent_bit(tmp, 0, total_dirty - 1, (unsigned)-1, NULL); out: if (locked_page) put_page(locked_page); @@ -343,11 +343,11 @@ static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb) unsigned long i; for (i = 0; i < eb->len * BITS_PER_BYTE; i++) { - int bit, bit1; + bool bit_set, bit1_set; - bit = !!test_bit(i, bitmap); - bit1 = !!extent_buffer_test_bit(eb, 0, i); - if (bit1 != bit) { + bit_set = test_bit(i, bitmap); + bit1_set = extent_buffer_test_bit(eb, 0, i); + if (bit1_set != bit_set) { u8 has; u8 expect; @@ -360,9 +360,9 @@ static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb) return -EINVAL; } - bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, - i % BITS_PER_BYTE); - if (bit1 != bit) { + bit1_set = extent_buffer_test_bit(eb, i / BITS_PER_BYTE, + i % BITS_PER_BYTE); + if (bit1_set != bit_set) { u8 has; u8 expect; @@ -662,7 +662,7 @@ static int test_find_first_clear_extent_bit(void) out: if (ret) dump_extent_io_tree(&tree); - btrfs_clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_clear_extent_bit(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); return ret; } diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index b61972046feb..c8822edd32e2 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -32,7 +32,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans, unsigned int i; int ret; - info = search_free_space_info(trans, cache, path, 0); + info = btrfs_search_free_space_info(trans, cache, path, 0); if (IS_ERR(info)) { test_err("could not find free space info"); ret = PTR_ERR(info); @@ -57,7 +57,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans, goto invalid; offset = key.objectid; while (offset < key.objectid + key.offset) { - bit = free_space_test_bit(cache, path, offset); + bit = btrfs_free_space_test_bit(cache, path, offset); if (prev_bit == 0 && bit == 1) { extent_start = offset; } else if (prev_bit == 1 && bit == 0) { @@ -115,7 +115,7 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans, u32 flags; int ret; - info = search_free_space_info(trans, cache, path, 0); + info = btrfs_search_free_space_info(trans, cache, path, 0); if (IS_ERR(info)) { test_err("could not find free space info"); btrfs_release_path(path); @@ -131,13 +131,13 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans, /* Flip it to the other format and check that for good measure. */ if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { - ret = convert_free_space_to_extents(trans, cache, path); + ret = btrfs_convert_free_space_to_extents(trans, cache, path); if (ret) { test_err("could not convert to extents"); return ret; } } else { - ret = convert_free_space_to_bitmaps(trans, cache, path); + ret = btrfs_convert_free_space_to_bitmaps(trans, cache, path); if (ret) { test_err("could not convert to bitmaps"); return ret; @@ -170,9 +170,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans, const struct free_space_extent extents[] = {}; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, - cache->length); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; @@ -193,8 +192,8 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, alignment); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -216,7 +215,7 @@ static int test_remove_end(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, cache->start + cache->length - alignment, alignment); if (ret) { @@ -240,9 +239,9 @@ static int test_remove_middle(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start + alignment, - alignment); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start + alignment, + alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -263,23 +262,22 @@ static int test_merge_left(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, cache->length); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->start, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start, + alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + alignment, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -300,24 +298,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, cache->length); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + 2 * alignment, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + 2 * alignment, + alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + alignment, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -338,29 +335,29 @@ static int test_merge_both(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, cache->length); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->start, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start, + alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + 2 * alignment, alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + alignment, alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -383,29 +380,29 @@ static int test_merge_none(struct btrfs_trans_handle *trans, }; int ret; - ret = __remove_from_free_space_tree(trans, cache, path, - cache->start, cache->length); + ret = __btrfs_remove_from_free_space_tree(trans, cache, path, + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->start, - alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start, + alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + 4 * alignment, alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + 4 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, - cache->start + 2 * alignment, alignment); + ret = __btrfs_add_to_free_space_tree(trans, cache, path, + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -483,14 +480,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, goto out; } - ret = add_block_group_free_space(&trans, cache); + ret = btrfs_add_block_group_free_space(&trans, cache); if (ret) { test_err("could not add block group free space"); goto out; } if (bitmaps) { - ret = convert_free_space_to_bitmaps(&trans, cache, path); + ret = btrfs_convert_free_space_to_bitmaps(&trans, cache, path); if (ret) { test_err("could not convert block group to bitmaps"); goto out; @@ -501,7 +498,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, if (ret) goto out; - ret = remove_block_group_free_space(&trans, cache); + ret = btrfs_remove_block_group_free_space(&trans, cache); if (ret) { test_err("could not remove block group free space"); goto out; diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index a29d2c02c2c8..a4c2b7748b95 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -950,10 +950,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */ - ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, - BTRFS_MAX_EXTENT_SIZE >> 1, - (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); + ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE >> 1, + (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1017,10 +1017,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ - ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, - BTRFS_MAX_EXTENT_SIZE + sectorsize, - BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); + ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE + sectorsize, + BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1051,8 +1051,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* Empty */ - ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); + ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1066,8 +1066,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = 0; out: if (ret) - btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index b96195d6480f..c5c0d9cf1a80 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1211,15 +1211,15 @@ static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { bool errors = false; - int err; + int ret; - err = __btrfs_wait_marked_extents(fs_info, dirty_pages); + ret = __btrfs_wait_marked_extents(fs_info, dirty_pages); if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) errors = true; - if (errors && !err) - err = -EIO; - return err; + if (errors && !ret) + ret = -EIO; + return ret; } int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) @@ -1227,22 +1227,22 @@ int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) struct btrfs_fs_info *fs_info = log_root->fs_info; struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; bool errors = false; - int err; + int ret; ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID); - err = __btrfs_wait_marked_extents(fs_info, dirty_pages); - if ((mark & EXTENT_DIRTY) && + ret = __btrfs_wait_marked_extents(fs_info, dirty_pages); + if ((mark & EXTENT_DIRTY_LOG1) && test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) errors = true; - if ((mark & EXTENT_NEW) && + if ((mark & EXTENT_DIRTY_LOG2) && test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) errors = true; - if (errors && !err) - err = -EIO; - return err; + if (errors && !ret) + ret = -EIO; + return ret; } /* @@ -1735,8 +1735,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_create_qgroup(trans, objectid); if (ret && ret != -EEXIST) { - btrfs_abort_transaction(trans, ret); - goto fail; + if (ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info)) { + btrfs_abort_transaction(trans, ret); + goto fail; + } } /* @@ -2163,13 +2165,19 @@ static void add_pending_snapshot(struct btrfs_trans_handle *trans) list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); } -static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval) +static void update_commit_stats(struct btrfs_fs_info *fs_info) { + ktime_t now = ktime_get_ns(); + ktime_t interval = now - fs_info->commit_stats.critical_section_start_time; + + ASSERT(fs_info->commit_stats.critical_section_start_time); + fs_info->commit_stats.commit_count++; fs_info->commit_stats.last_commit_dur = interval; fs_info->commit_stats.max_commit_dur = max_t(u64, fs_info->commit_stats.max_commit_dur, interval); fs_info->commit_stats.total_commit_dur += interval; + fs_info->commit_stats.critical_section_start_time = 0; } int btrfs_commit_transaction(struct btrfs_trans_handle *trans) @@ -2178,8 +2186,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *prev_trans = NULL; int ret; - ktime_t start_time; - ktime_t interval; ASSERT(refcount_read(&trans->use_count) == 1); btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); @@ -2312,8 +2318,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * Get the time spent on the work done by the commit thread and not * the time spent waiting on a previous commit */ - start_time = ktime_get_ns(); - + fs_info->commit_stats.critical_section_start_time = ktime_get_ns(); extwriter_counter_dec(cur_trans, trans->type); ret = btrfs_start_delalloc_flush(fs_info); @@ -2545,6 +2550,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) if (ret) goto scrub_continue; + update_commit_stats(fs_info); /* * We needn't acquire the lock here because there is no other task * which can change it. @@ -2581,8 +2587,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) trace_btrfs_transaction_commit(fs_info); - interval = ktime_get_ns() - start_time; - btrfs_scrub_continue(fs_info); if (current->journal_info == trans) @@ -2590,8 +2594,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) kmem_cache_free(btrfs_trans_handle_cachep, trans); - update_commit_stats(fs_info, interval); - return ret; unlock_reloc: diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 8f4703b488b7..0f556f4de3f9 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -191,7 +191,7 @@ static bool check_prev_ino(struct extent_buffer *leaf, * Only subvolume trees along with their reloc trees need this check. * Things like log tree doesn't follow this ino requirement. */ - if (!is_fstree(btrfs_header_owner(leaf))) + if (!btrfs_is_fstree(btrfs_header_owner(leaf))) return true; if (key->objectid == prev_key->objectid) @@ -475,7 +475,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, * to be COWed to be relocated. */ if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID && - !is_fstree(key->offset))) { + !btrfs_is_fstree(key->offset))) { generic_err(leaf, slot, "invalid reloc tree for root %lld, root id is not a subvolume tree", key->offset); @@ -493,7 +493,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, } /* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */ - if (unlikely(!is_fstree(key->objectid) && !is_root_item)) { + if (unlikely(!btrfs_is_fstree(key->objectid) && !is_root_item)) { dir_item_err(leaf, slot, "invalid location key objectid, have %llu expect [%llu, %llu]", key->objectid, BTRFS_FIRST_FREE_OBJECTID, @@ -1311,7 +1311,7 @@ static bool is_valid_dref_root(u64 rootid) * - tree root * For v1 space cache */ - return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID || + return btrfs_is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID || rootid == BTRFS_ROOT_TREE_OBJECTID; } @@ -2167,7 +2167,7 @@ ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO); int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner) { - const bool is_subvol = is_fstree(root_owner); + const bool is_subvol = btrfs_is_fstree(root_owner); const u64 eb_owner = btrfs_header_owner(eb); /* @@ -2209,7 +2209,7 @@ int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner) * For subvolume trees, owners can mismatch, but they should all belong * to subvolume trees. */ - if (unlikely(is_subvol != is_fstree(eb_owner))) { + if (unlikely(is_subvol != btrfs_is_fstree(eb_owner))) { btrfs_crit(eb->fs_info, "corrupted %s, root=%llu block=%llu owner mismatch, have %llu expect [%llu, %llu]", btrfs_header_level(eb) == 0 ? "leaf" : "node", diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cea8a7e9d6d3..9f05d454b9df 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -112,7 +112,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, - u64 dirid, int del_all); + u64 dirid, bool del_all); static void wait_log_commit(struct btrfs_root *root, int transid); /* @@ -144,7 +144,7 @@ static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *r struct btrfs_inode *inode; /* Only meant to be called for subvolume roots and not for log roots. */ - ASSERT(is_fstree(btrfs_root_id(root))); + ASSERT(btrfs_is_fstree(btrfs_root_id(root))); /* * We're holding a transaction handle whether we are logging or @@ -1041,6 +1041,126 @@ out: return ret; } +static int unlink_refs_not_in_log(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *log_root, + struct btrfs_key *search_key, + struct btrfs_inode *dir, + struct btrfs_inode *inode, + u64 parent_objectid) +{ + struct extent_buffer *leaf = path->nodes[0]; + unsigned long ptr; + unsigned long ptr_end; + + /* + * Check all the names in this back reference to see if they are in the + * log. If so, we allow them to stay otherwise they must be unlinked as + * a conflict. + */ + ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); + ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); + while (ptr < ptr_end) { + struct fscrypt_str victim_name; + struct btrfs_inode_ref *victim_ref; + int ret; + + victim_ref = (struct btrfs_inode_ref *)ptr; + ret = read_alloc_one_name(leaf, (victim_ref + 1), + btrfs_inode_ref_name_len(leaf, victim_ref), + &victim_name); + if (ret) + return ret; + + ret = backref_in_log(log_root, search_key, parent_objectid, &victim_name); + if (ret) { + kfree(victim_name.name); + if (ret < 0) + return ret; + ptr = (unsigned long)(victim_ref + 1) + victim_name.len; + continue; + } + + inc_nlink(&inode->vfs_inode); + btrfs_release_path(path); + + ret = unlink_inode_for_log_replay(trans, dir, inode, &victim_name); + kfree(victim_name.name); + if (ret) + return ret; + return -EAGAIN; + } + + return 0; +} + +static int unlink_extrefs_not_in_log(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_root *log_root, + struct btrfs_key *search_key, + struct btrfs_inode *inode, + u64 inode_objectid, + u64 parent_objectid) +{ + struct extent_buffer *leaf = path->nodes[0]; + const unsigned long base = btrfs_item_ptr_offset(leaf, path->slots[0]); + const u32 item_size = btrfs_item_size(leaf, path->slots[0]); + u32 cur_offset = 0; + + while (cur_offset < item_size) { + struct btrfs_inode_extref *extref; + struct btrfs_inode *victim_parent; + struct fscrypt_str victim_name; + int ret; + + extref = (struct btrfs_inode_extref *)(base + cur_offset); + victim_name.len = btrfs_inode_extref_name_len(leaf, extref); + + if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) + goto next; + + ret = read_alloc_one_name(leaf, &extref->name, victim_name.len, + &victim_name); + if (ret) + return ret; + + search_key->objectid = inode_objectid; + search_key->type = BTRFS_INODE_EXTREF_KEY; + search_key->offset = btrfs_extref_hash(parent_objectid, + victim_name.name, + victim_name.len); + ret = backref_in_log(log_root, search_key, parent_objectid, &victim_name); + if (ret) { + kfree(victim_name.name); + if (ret < 0) + return ret; +next: + cur_offset += victim_name.len + sizeof(*extref); + continue; + } + + victim_parent = btrfs_iget_logging(parent_objectid, root); + if (IS_ERR(victim_parent)) { + kfree(victim_name.name); + return PTR_ERR(victim_parent); + } + + inc_nlink(&inode->vfs_inode); + btrfs_release_path(path); + + ret = unlink_inode_for_log_replay(trans, victim_parent, inode, + &victim_name); + iput(&victim_parent->vfs_inode); + kfree(victim_name.name); + if (ret) + return ret; + return -EAGAIN; + } + + return 0; +} + static inline int __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -1051,7 +1171,6 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, u64 ref_index, struct fscrypt_str *name) { int ret; - struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key search_key; struct btrfs_inode_extref *extref; @@ -1065,120 +1184,34 @@ again: if (ret < 0) { return ret; } else if (ret == 0) { - struct btrfs_inode_ref *victim_ref; - unsigned long ptr; - unsigned long ptr_end; - - leaf = path->nodes[0]; - - /* are we trying to overwrite a back ref for the root directory - * if so, just jump out, we're done + /* + * Are we trying to overwrite a back ref for the root directory? + * If so, we're done. */ if (search_key.objectid == search_key.offset) return 1; - /* check all the names in this back reference to see - * if they are in the log. if so, we allow them to stay - * otherwise they must be unlinked as a conflict - */ - ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); - ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); - while (ptr < ptr_end) { - struct fscrypt_str victim_name; - - victim_ref = (struct btrfs_inode_ref *)ptr; - ret = read_alloc_one_name(leaf, (victim_ref + 1), - btrfs_inode_ref_name_len(leaf, victim_ref), - &victim_name); - if (ret) - return ret; - - ret = backref_in_log(log_root, &search_key, - parent_objectid, &victim_name); - if (ret < 0) { - kfree(victim_name.name); - return ret; - } else if (!ret) { - inc_nlink(&inode->vfs_inode); - btrfs_release_path(path); - - ret = unlink_inode_for_log_replay(trans, dir, inode, - &victim_name); - kfree(victim_name.name); - if (ret) - return ret; - goto again; - } - kfree(victim_name.name); - - ptr = (unsigned long)(victim_ref + 1) + victim_name.len; - } + ret = unlink_refs_not_in_log(trans, path, log_root, &search_key, + dir, inode, parent_objectid); + if (ret == -EAGAIN) + goto again; + else if (ret) + return ret; } btrfs_release_path(path); /* Same search but for extended refs */ - extref = btrfs_lookup_inode_extref(NULL, root, path, name, - inode_objectid, parent_objectid, 0, - 0); + extref = btrfs_lookup_inode_extref(root, path, name, inode_objectid, parent_objectid); if (IS_ERR(extref)) { return PTR_ERR(extref); } else if (extref) { - u32 item_size; - u32 cur_offset = 0; - unsigned long base; - struct btrfs_inode *victim_parent; - - leaf = path->nodes[0]; - - item_size = btrfs_item_size(leaf, path->slots[0]); - base = btrfs_item_ptr_offset(leaf, path->slots[0]); - - while (cur_offset < item_size) { - struct fscrypt_str victim_name; - - extref = (struct btrfs_inode_extref *)(base + cur_offset); - victim_name.len = btrfs_inode_extref_name_len(leaf, extref); - - if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) - goto next; - - ret = read_alloc_one_name(leaf, &extref->name, - victim_name.len, &victim_name); - if (ret) - return ret; - - search_key.objectid = inode_objectid; - search_key.type = BTRFS_INODE_EXTREF_KEY; - search_key.offset = btrfs_extref_hash(parent_objectid, - victim_name.name, - victim_name.len); - ret = backref_in_log(log_root, &search_key, - parent_objectid, &victim_name); - if (ret < 0) { - kfree(victim_name.name); - return ret; - } else if (!ret) { - victim_parent = btrfs_iget_logging(parent_objectid, root); - if (IS_ERR(victim_parent)) { - ret = PTR_ERR(victim_parent); - } else { - inc_nlink(&inode->vfs_inode); - btrfs_release_path(path); - - ret = unlink_inode_for_log_replay(trans, - victim_parent, - inode, &victim_name); - iput(&victim_parent->vfs_inode); - } - kfree(victim_name.name); - if (ret) - return ret; - goto again; - } - kfree(victim_name.name); -next: - cur_offset += victim_name.len + sizeof(*extref); - } + ret = unlink_extrefs_not_in_log(trans, path, root, log_root, + &search_key, inode, + inode_objectid, parent_objectid); + if (ret == -EAGAIN) + goto again; + else if (ret) + return ret; } btrfs_release_path(path); @@ -1352,7 +1385,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, unsigned long ref_end; struct fscrypt_str name = { 0 }; int ret; - int log_ref_ver = 0; + const bool is_extref_item = (key->type == BTRFS_INODE_EXTREF_KEY); u64 parent_objectid; u64 inode_objectid; u64 ref_index = 0; @@ -1361,11 +1394,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size(eb, slot); - if (key->type == BTRFS_INODE_EXTREF_KEY) { + if (is_extref_item) { struct btrfs_inode_extref *r; ref_struct_size = sizeof(struct btrfs_inode_extref); - log_ref_ver = 1; r = (struct btrfs_inode_extref *)ref_ptr; parent_objectid = btrfs_inode_extref_parent(eb, r); } else { @@ -1383,6 +1415,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, dir = btrfs_iget_logging(parent_objectid, root); if (IS_ERR(dir)) { ret = PTR_ERR(dir); + if (ret == -ENOENT) + ret = 0; dir = NULL; goto out; } @@ -1395,9 +1429,11 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } while (ref_ptr < ref_end) { - if (log_ref_ver) { + if (is_extref_item) { ret = extref_get_fields(eb, ref_ptr, &name, &ref_index, &parent_objectid); + if (ret) + goto out; /* * parent object can change from one array * item to another. @@ -1407,14 +1443,30 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, if (IS_ERR(dir)) { ret = PTR_ERR(dir); dir = NULL; + /* + * A new parent dir may have not been + * logged and not exist in the subvolume + * tree, see the comment above before + * the loop when getting the first + * parent dir. + */ + if (ret == -ENOENT) { + /* + * The next extref may refer to + * another parent dir that + * exists, so continue. + */ + ret = 0; + goto next; + } goto out; } } } else { ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); + if (ret) + goto out; } - if (ret) - goto out; ret = inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), ref_index, &name); @@ -1448,10 +1500,11 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } /* Else, ret == 1, we already have a perfect match, we're done. */ +next: ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len; kfree(name.name); name.name = NULL; - if (log_ref_ver) { + if (is_extref_item && dir) { iput(&dir->vfs_inode); dir = NULL; } @@ -1628,8 +1681,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (inode->vfs_inode.i_nlink == 0) { if (S_ISDIR(inode->vfs_inode.i_mode)) { - ret = replay_dir_deletes(trans, root, NULL, path, - ino, 1); + ret = replay_dir_deletes(trans, root, NULL, path, ino, true); if (ret) goto out; } @@ -2281,7 +2333,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, - u64 dirid, int del_all) + u64 dirid, bool del_all) { u64 range_start; u64 range_end; @@ -2443,8 +2495,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, break; mode = btrfs_inode_mode(eb, inode_item); if (S_ISDIR(mode)) { - ret = replay_dir_deletes(wc->trans, - root, log, path, key.objectid, 0); + ret = replay_dir_deletes(wc->trans, root, log, path, + key.objectid, false); if (ret) break; } @@ -2519,9 +2571,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, key.type == BTRFS_INODE_EXTREF_KEY) { ret = add_inode_ref(wc->trans, root, log, path, eb, i, &key); - if (ret && ret != -ENOENT) + if (ret) break; - ret = 0; } else if (key.type == BTRFS_EXTENT_DATA_KEY) { ret = replay_one_extent(wc->trans, root, path, eb, i, &key); @@ -2720,7 +2771,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, level = btrfs_header_level(log->node); orig_level = level; path->nodes[level] = log->node; - atomic_inc(&log->node->refs); + refcount_inc(&log->node->refs); path->slots[level] = 0; while (1) { @@ -2961,9 +3012,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } if (log_transid % 2 == 0) - mark = EXTENT_DIRTY; + mark = EXTENT_DIRTY_LOG1; else - mark = EXTENT_NEW; + mark = EXTENT_DIRTY_LOG2; /* we start IO on all the marked extents here, but we don't actually * wait for them until later. @@ -3094,7 +3145,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ret = btrfs_write_marked_extents(fs_info, &log_root_tree->dirty_log_pages, - EXTENT_DIRTY | EXTENT_NEW); + EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2); blk_finish_plug(&plug); /* * As described above, -EAGAIN indicates a hole in the extents. We @@ -3114,7 +3165,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ret = btrfs_wait_tree_log_extents(log, mark); if (!ret) ret = btrfs_wait_tree_log_extents(log_root_tree, - EXTENT_NEW | EXTENT_DIRTY); + EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2); if (ret) { btrfs_set_log_full_commit(trans); mutex_unlock(&log_root_tree->log_mutex); @@ -3240,9 +3291,9 @@ static void free_log_tree(struct btrfs_trans_handle *trans, */ btrfs_write_marked_extents(log->fs_info, &log->dirty_log_pages, - EXTENT_DIRTY | EXTENT_NEW); + EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2); btrfs_wait_tree_log_extents(log, - EXTENT_DIRTY | EXTENT_NEW); + EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2); if (trans) btrfs_abort_transaction(trans, ret); @@ -3432,7 +3483,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans, * inode item because on log replay we update the field to reflect * all existing entries in the directory (see overwrite_item()). */ - return btrfs_delete_one_dir_name(trans, log, path, di); + return btrfs_del_item(trans, log, path); } /* @@ -3472,26 +3523,27 @@ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, return; } - ret = join_running_log_trans(root); - if (ret) - return; - - mutex_lock(&dir->log_mutex); - path = btrfs_alloc_path(); if (!path) { - ret = -ENOMEM; - goto out_unlock; + btrfs_set_log_full_commit(trans); + return; } + ret = join_running_log_trans(root); + ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret); + if (WARN_ON(ret)) + goto out; + + mutex_lock(&dir->log_mutex); + ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir), name, index); - btrfs_free_path(path); -out_unlock: mutex_unlock(&dir->log_mutex); if (ret < 0) btrfs_set_log_full_commit(trans); btrfs_end_log_trans(root); +out: + btrfs_free_path(path); } /* see comments for btrfs_del_dir_entries_in_log */ @@ -3501,7 +3553,6 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, u64 dirid) { struct btrfs_root *log; - u64 index; int ret; ret = inode_logged(trans, inode, NULL); @@ -3513,13 +3564,13 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, } ret = join_running_log_trans(root); - if (ret) + ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret); + if (WARN_ON(ret)) return; log = root->log_root; mutex_lock(&inode->log_mutex); - ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode), - dirid, &index); + ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode), dirid, NULL); mutex_unlock(&inode->log_mutex); if (ret < 0 && ret != -ENOENT) btrfs_set_log_full_commit(trans); @@ -3684,7 +3735,7 @@ static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx) * Add extra ref to scratch eb so that it is not freed when callers * release the path, so we can reuse it later if needed. */ - atomic_inc(&ctx->scratch_eb->refs); + refcount_inc(&ctx->scratch_eb->refs); return 0; } @@ -4172,44 +4223,37 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, struct inode *inode, int log_inode_only, u64 logged_isize) { - struct btrfs_map_token token; u64 flags; - btrfs_init_map_token(&token, leaf); - if (log_inode_only) { /* set the generation to zero so the recover code * can tell the difference between an logging * just to say 'this inode exists' and a logging * to say 'update this inode with these values' */ - btrfs_set_token_inode_generation(&token, item, 0); - btrfs_set_token_inode_size(&token, item, logged_isize); + btrfs_set_inode_generation(leaf, item, 0); + btrfs_set_inode_size(leaf, item, logged_isize); } else { - btrfs_set_token_inode_generation(&token, item, - BTRFS_I(inode)->generation); - btrfs_set_token_inode_size(&token, item, inode->i_size); + btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); + btrfs_set_inode_size(leaf, item, inode->i_size); } - btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); - btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); - btrfs_set_token_inode_mode(&token, item, inode->i_mode); - btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); + btrfs_set_inode_uid(leaf, item, i_uid_read(inode)); + btrfs_set_inode_gid(leaf, item, i_gid_read(inode)); + btrfs_set_inode_mode(leaf, item, inode->i_mode); + btrfs_set_inode_nlink(leaf, item, inode->i_nlink); + + btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode)); - btrfs_set_token_timespec_sec(&token, &item->atime, - inode_get_atime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->atime, - inode_get_atime_nsec(inode)); + btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode)); - btrfs_set_token_timespec_sec(&token, &item->mtime, - inode_get_mtime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->mtime, - inode_get_mtime_nsec(inode)); + btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode)); + btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode)); - btrfs_set_token_timespec_sec(&token, &item->ctime, - inode_get_ctime_sec(inode)); - btrfs_set_token_timespec_nsec(&token, &item->ctime, - inode_get_ctime_nsec(inode)); + btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec); + btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec); /* * We do not need to set the nbytes field, in fact during a fast fsync @@ -4220,13 +4264,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, * inode item in subvolume tree as needed (see overwrite_item()). */ - btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); - btrfs_set_token_inode_transid(&token, item, trans->transid); - btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); + btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode)); + btrfs_set_inode_transid(leaf, item, trans->transid); + btrfs_set_inode_rdev(leaf, item, inode->i_rdev); flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, BTRFS_I(inode)->ro_flags); - btrfs_set_token_inode_flags(&token, item, flags); - btrfs_set_token_inode_block_group(&token, item, 0); + btrfs_set_inode_flags(leaf, item, flags); + btrfs_set_inode_block_group(leaf, item, 0); } static int log_inode_item(struct btrfs_trans_handle *trans, @@ -7192,8 +7236,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) struct btrfs_path *path; struct btrfs_trans_handle *trans; struct btrfs_key key; - struct btrfs_key found_key; - struct btrfs_root *log; struct btrfs_fs_info *fs_info = log_root_tree->fs_info; struct walk_control wc = { .process_func = process_one_buffer, @@ -7227,6 +7269,9 @@ again: key.offset = (u64)-1; while (1) { + struct btrfs_root *log; + struct btrfs_key found_key; + ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) { @@ -7255,6 +7300,12 @@ again: true); if (IS_ERR(wc.replay_dest)) { ret = PTR_ERR(wc.replay_dest); + wc.replay_dest = NULL; + if (ret != -ENOENT) { + btrfs_put_root(log); + btrfs_abort_transaction(trans, ret); + goto error; + } /* * We didn't find the subvol, likely because it was @@ -7267,36 +7318,36 @@ again: * block from being modified, and we'll just bail for * each subsequent pass. */ - if (ret == -ENOENT) - ret = btrfs_pin_extent_for_log_replay(trans, log->node); - btrfs_put_root(log); - - if (!ret) - goto next; - btrfs_abort_transaction(trans, ret); - goto error; + ret = btrfs_pin_extent_for_log_replay(trans, log->node); + if (ret) { + btrfs_put_root(log); + btrfs_abort_transaction(trans, ret); + goto error; + } + goto next; } wc.replay_dest->log_root = log; ret = btrfs_record_root_in_trans(trans, wc.replay_dest); - if (ret) - /* The loop needs to continue due to the root refs */ + if (ret) { btrfs_abort_transaction(trans, ret); - else - ret = walk_log_tree(trans, log, &wc); + goto next; + } - if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { - ret = fixup_inode_link_counts(trans, wc.replay_dest, - path); - if (ret) - btrfs_abort_transaction(trans, ret); + ret = walk_log_tree(trans, log, &wc); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto next; } - if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { + if (wc.stage == LOG_WALK_REPLAY_ALL) { struct btrfs_root *root = wc.replay_dest; - btrfs_release_path(path); - + ret = fixup_inode_link_counts(trans, wc.replay_dest, path); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto next; + } /* * We have just replayed everything, and the highest * objectid of fs roots probably has changed in case @@ -7306,17 +7357,20 @@ again: * could only happen during mount. */ ret = btrfs_init_root_free_objectid(root); - if (ret) + if (ret) { btrfs_abort_transaction(trans, ret); + goto next; + } + } +next: + if (wc.replay_dest) { + wc.replay_dest->log_root = NULL; + btrfs_put_root(wc.replay_dest); } - - wc.replay_dest->log_root = NULL; - btrfs_put_root(wc.replay_dest); btrfs_put_root(log); if (ret) goto error; -next: if (found_key.offset == 0) break; key.offset = found_key.offset - 1; @@ -7485,6 +7539,9 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, bool log_pinned = false; int ret; + btrfs_init_log_ctx(&ctx, inode); + ctx.logging_new_name = true; + /* * this will force the logging code to walk the dentry chain * up for the file @@ -7516,6 +7573,13 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, ret = 0; /* + * Now that we know we need to update the log, allocate the scratch eb + * for the context before joining a log transaction below, as this can + * take time and therefore we could delay log commits from other tasks. + */ + btrfs_init_log_ctx_scratch_eb(&ctx); + + /* * If we are doing a rename (old_dir is not NULL) from a directory that * was previously logged, make sure that on log replay we get the old * dir entry deleted. This is needed because we will also log the new @@ -7533,6 +7597,14 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, &old_dentry->d_name, 0, &fname); if (ret) goto out; + + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + fscrypt_free_filename(&fname); + goto out; + } + /* * We have two inodes to update in the log, the old directory and * the inode that got renamed, so we must pin the log to prevent @@ -7546,19 +7618,13 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, * mark the log for a full commit. */ if (WARN_ON_ONCE(ret < 0)) { + btrfs_free_path(path); fscrypt_free_filename(&fname); goto out; } log_pinned = true; - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - fscrypt_free_filename(&fname); - goto out; - } - /* * Other concurrent task might be logging the old directory, * as it can be triggered when logging other inode that had or @@ -7590,9 +7656,6 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, goto out; } - btrfs_init_log_ctx(&ctx, inode); - ctx.logging_new_name = true; - btrfs_init_log_ctx_scratch_eb(&ctx); /* * We don't care about the return value. If we fail to log the new name * then we know the next attempt to sync the log will fallback to a full @@ -7601,7 +7664,6 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, * inconsistent state after a rename operation. */ btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx); - free_extent_buffer(ctx.scratch_eb); ASSERT(list_empty(&ctx.conflict_inodes)); out: /* @@ -7614,5 +7676,6 @@ out: btrfs_set_log_full_commit(trans); if (log_pinned) btrfs_end_log_trans(root); + free_extent_buffer(ctx.scratch_eb); } diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c index 1ac2678fc4ca..9e8cb3b7c064 100644 --- a/fs/btrfs/tree-mod-log.c +++ b/fs/btrfs/tree-mod-log.c @@ -27,18 +27,29 @@ struct tree_mod_elem { /* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */ u64 generation; - /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */ - struct btrfs_disk_key key; - u64 blockptr; - - /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */ - struct { - int dst_slot; - int nr_items; - } move; - - /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */ - struct tree_mod_root old_root; + union { + /* + * This is used for the following op types: + * + * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING + * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING + * BTRFS_MOD_LOG_KEY_REMOVE + * BTRFS_MOD_LOG_KEY_REPLACE + */ + struct { + struct btrfs_disk_key key; + u64 blockptr; + } slot_change; + + /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */ + struct { + int dst_slot; + int nr_items; + } move; + + /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */ + struct tree_mod_root old_root; + }; }; /* @@ -164,6 +175,30 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info, return 0; } +static inline bool skip_eb_logging(const struct extent_buffer *eb) +{ + const u64 owner = btrfs_header_owner(eb); + + if (btrfs_header_level(eb) == 0) + return true; + + /* + * Tree mod logging exists so that there's a consistent view of the + * extents and backrefs of inodes even if while a task is iterating over + * them other tasks are modifying subvolume trees and the extent tree + * (including running delayed refs). So we only need to log extent + * buffers from the extent tree and subvolume trees. + */ + + if (owner == BTRFS_EXTENT_TREE_OBJECTID) + return false; + + if (btrfs_is_fstree(owner)) + return false; + + return true; +} + /* * Determines if logging can be omitted. Returns true if it can. Otherwise, it * returns false with the tree_mod_log_lock acquired. The caller must hold @@ -174,7 +209,7 @@ static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return true; - if (eb && btrfs_header_level(eb) == 0) + if (eb && skip_eb_logging(eb)) return true; write_lock(&fs_info->tree_mod_log_lock); @@ -192,7 +227,7 @@ static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return false; - if (eb && btrfs_header_level(eb) == 0) + if (eb && skip_eb_logging(eb)) return false; return true; @@ -204,15 +239,17 @@ static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb, { struct tree_mod_elem *tm; + /* Can't be one of these types, due to union in struct tree_mod_elem. */ + ASSERT(op != BTRFS_MOD_LOG_MOVE_KEYS); + ASSERT(op != BTRFS_MOD_LOG_ROOT_REPLACE); + tm = kzalloc(sizeof(*tm), GFP_NOFS); if (!tm) return NULL; tm->logical = eb->start; - if (op != BTRFS_MOD_LOG_KEY_ADD) { - btrfs_node_key(eb, &tm->key, slot); - tm->blockptr = btrfs_node_blockptr(eb, slot); - } + btrfs_node_key(eb, &tm->slot_change.key, slot); + tm->slot_change.blockptr = btrfs_node_blockptr(eb, slot); tm->op = op; tm->slot = slot; tm->generation = btrfs_node_ptr_generation(eb, slot); @@ -830,8 +867,8 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info, fallthrough; case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING: case BTRFS_MOD_LOG_KEY_REMOVE: - btrfs_set_node_key(eb, &tm->key, tm->slot); - btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); + btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot); + btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); n++; @@ -840,8 +877,8 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info, break; case BTRFS_MOD_LOG_KEY_REPLACE: BUG_ON(tm->slot >= n); - btrfs_set_node_key(eb, &tm->key, tm->slot); - btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); + btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot); + btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); break; diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index fc59b57257d6..7e16a253fb35 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c @@ -129,21 +129,25 @@ void ulist_free(struct ulist *ulist) kfree(ulist); } +static int ulist_node_val_key_cmp(const void *key, const struct rb_node *node) +{ + const u64 *val = key; + const struct ulist_node *unode = rb_entry(node, struct ulist_node, rb_node); + + if (unode->val < *val) + return 1; + else if (unode->val > *val) + return -1; + + return 0; +} + static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val) { - struct rb_node *n = ulist->root.rb_node; - struct ulist_node *u = NULL; - - while (n) { - u = rb_entry(n, struct ulist_node, rb_node); - if (u->val < val) - n = n->rb_right; - else if (u->val > val) - n = n->rb_left; - else - return u; - } - return NULL; + struct rb_node *node; + + node = rb_find(&val, &ulist->root, ulist_node_val_key_cmp); + return rb_entry_safe(node, struct ulist_node, rb_node); } static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node) @@ -155,25 +159,20 @@ static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node) ulist->nnodes--; } +static int ulist_node_val_cmp(struct rb_node *new, const struct rb_node *existing) +{ + const struct ulist_node *unode = rb_entry(new, struct ulist_node, rb_node); + + return ulist_node_val_key_cmp(&unode->val, existing); +} + static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins) { - struct rb_node **p = &ulist->root.rb_node; - struct rb_node *parent = NULL; - struct ulist_node *cur = NULL; - - while (*p) { - parent = *p; - cur = rb_entry(parent, struct ulist_node, rb_node); - - if (cur->val < ins->val) - p = &(*p)->rb_right; - else if (cur->val > ins->val) - p = &(*p)->rb_left; - else - return -EEXIST; - } - rb_link_node(&ins->rb_node, parent, p); - rb_insert_color(&ins->rb_node, &ulist->root); + struct rb_node *node; + + node = rb_find_add(&ins->rb_node, &ulist->root, ulist_node_val_cmp); + if (node) + return -EEXIST; return 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f475b4b7c457..fa7a929a0461 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -18,7 +18,6 @@ #include "transaction.h" #include "volumes.h" #include "raid56.h" -#include "rcu-string.h" #include "dev-replace.h" #include "sysfs.h" #include "tree-checker.h" @@ -214,10 +213,8 @@ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) u64 flags = bg_flags; u32 size_bp = size_buf; - if (!flags) { - strcpy(bp, "NONE"); + if (!flags) return; - } #define DESCRIBE_FLAG(flag, desc) \ do { \ @@ -403,7 +400,11 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) static void btrfs_free_device(struct btrfs_device *device) { WARN_ON(!list_empty(&device->post_commit_list)); - rcu_string_free(device->name); + /* + * No need to call kfree_rcu() nor do RCU lock/unlock, nothing is + * reading the device name. + */ + kfree(rcu_dereference_raw(device->name)); btrfs_extent_io_tree_release(&device->alloc_state); btrfs_destroy_dev_zone_info(device); kfree(device); @@ -414,6 +415,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) struct btrfs_device *device; WARN_ON(fs_devices->opened); + WARN_ON(fs_devices->holding); while (!list_empty(&fs_devices->devices)) { device = list_first_entry(&fs_devices->devices, struct btrfs_device, dev_list); @@ -473,7 +475,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, struct block_device *bdev; int ret; - *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL); + *bdev_file = bdev_file_open_by_path(device_path, flags, holder, &fs_holder_ops); if (IS_ERR(*bdev_file)) { ret = PTR_ERR(*bdev_file); @@ -488,7 +490,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, if (holder) { ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE); if (ret) { - fput(*bdev_file); + bdev_fput(*bdev_file); goto error; } } @@ -496,7 +498,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, *disk_super = btrfs_read_disk_super(bdev, 0, false); if (IS_ERR(*disk_super)) { ret = PTR_ERR(*disk_super); - fput(*bdev_file); + bdev_fput(*bdev_file); goto error; } @@ -541,7 +543,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device continue; if (devt && devt != device->devt) continue; - if (fs_devices->opened) { + if (fs_devices->opened || fs_devices->holding) { if (devt) ret = -EBUSY; break; @@ -657,7 +659,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, if (!device->name) return -EINVAL; - ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, + ret = btrfs_get_bdev_and_sb(rcu_dereference_raw(device->name), flags, holder, 1, &bdev_file, &disk_super); if (ret) return ret; @@ -674,8 +676,8 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { if (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { - pr_err( - "BTRFS: Invalid seeding and uuid-changed device detected\n"); + btrfs_err(NULL, + "invalid seeding and uuid-changed device detected"); goto error_free_page; } @@ -701,7 +703,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, if (device->devt != device->bdev->bd_dev) { btrfs_warn(NULL, "device %s maj:min changed from %d:%d to %d:%d", - device->name->str, MAJOR(device->devt), + rcu_dereference_raw(device->name), MAJOR(device->devt), MINOR(device->devt), MAJOR(device->bdev->bd_dev), MINOR(device->bdev->bd_dev)); @@ -720,7 +722,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, error_free_page: btrfs_release_disk_super(disk_super); - fput(bdev_file); + bdev_fput(bdev_file); return -EINVAL; } @@ -749,7 +751,7 @@ static bool is_same_device(struct btrfs_device *device, const char *new_path) goto out; rcu_read_lock(); - ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX); + ret = strscpy(old_path, rcu_dereference(device->name), PATH_MAX); rcu_read_unlock(); if (ret < 0) goto out; @@ -782,11 +784,11 @@ static noinline struct btrfs_device *device_list_add(const char *path, { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = NULL; - struct rcu_string *name; + const char *name; u64 found_transid = btrfs_super_generation(disk_super); u64 devid = btrfs_stack_device_id(&disk_super->dev_item); dev_t path_devt; - int error; + int ret; bool same_fsid_diff_dev = false; bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID); @@ -798,11 +800,11 @@ static noinline struct btrfs_device *device_list_add(const char *path, return ERR_PTR(-EAGAIN); } - error = lookup_bdev(path, &path_devt); - if (error) { + ret = lookup_bdev(path, &path_devt); + if (ret) { btrfs_err(NULL, "failed to lookup block device for path %s: %d", - path, error); - return ERR_PTR(error); + path, ret); + return ERR_PTR(ret); } fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev); @@ -819,7 +821,7 @@ static noinline struct btrfs_device *device_list_add(const char *path, if (same_fsid_diff_dev) { generate_random_uuid(fs_devices->fsid); fs_devices->temp_fsid = true; - pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n", + btrfs_info(NULL, "device %s (%d:%d) using temp-fsid %pU", path, MAJOR(path_devt), MINOR(path_devt), fs_devices->fsid); } @@ -890,6 +892,8 @@ static noinline struct btrfs_device *device_list_add(const char *path, current->comm, task_pid_nr(current)); } else if (!device->name || !is_same_device(device, path)) { + const char *old_name; + /* * When FS is already mounted. * 1. If you are here and if the device->name is NULL that @@ -943,27 +947,31 @@ static noinline struct btrfs_device *device_list_add(const char *path, if (device->bdev) { if (device->devt != path_devt) { mutex_unlock(&fs_devices->device_list_mutex); - btrfs_warn_in_rcu(NULL, + btrfs_warn(NULL, "duplicate device %s devid %llu generation %llu scanned by %s (%d)", path, devid, found_transid, current->comm, task_pid_nr(current)); return ERR_PTR(-EEXIST); } - btrfs_info_in_rcu(NULL, + btrfs_info(NULL, "devid %llu device path %s changed to %s scanned by %s (%d)", devid, btrfs_dev_name(device), path, current->comm, task_pid_nr(current)); } - name = rcu_string_strdup(path, GFP_NOFS); + name = kstrdup(path, GFP_NOFS); if (!name) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM); } - rcu_string_free(device->name); + rcu_read_lock(); + old_name = rcu_dereference(device->name); + rcu_read_unlock(); rcu_assign_pointer(device->name, name); + kfree_rcu_mightsleep(old_name); + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { fs_devices->missing_devices--; clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); @@ -1012,7 +1020,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) * uuid mutex so nothing we touch in here is going to disappear. */ if (orig_dev->name) - dev_path = orig_dev->name->str; + dev_path = rcu_dereference_raw(orig_dev->name); device = btrfs_alloc_device(NULL, &orig_dev->devid, orig_dev->uuid, dev_path); @@ -1070,7 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, continue; if (device->bdev_file) { - fput(device->bdev_file); + bdev_fput(device->bdev_file); device->bdev = NULL; device->bdev_file = NULL; fs_devices->open_devices--; @@ -1117,7 +1125,7 @@ static void btrfs_close_bdev(struct btrfs_device *device) invalidate_bdev(device->bdev); } - fput(device->bdev_file); + bdev_fput(device->bdev_file); } static void btrfs_close_one_device(struct btrfs_device *device) @@ -1197,7 +1205,7 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&uuid_mutex); close_fs_devices(fs_devices); - if (!fs_devices->opened) { + if (!fs_devices->opened && !fs_devices->holding) { list_splice_init(&fs_devices->seed_list, &list); /* @@ -1414,7 +1422,7 @@ static bool btrfs_skip_registration(struct btrfs_super_block *disk_super, list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev && (device->bdev->bd_dev == devt) && - strcmp(device->name->str, path) != 0) { + strcmp(rcu_dereference_raw(device->name), path) != 0) { mutex_unlock(&fs_devices->device_list_mutex); /* Do not skip registration. */ @@ -1440,7 +1448,7 @@ static bool btrfs_skip_registration(struct btrfs_super_block *disk_super, * the device or return an error. Multi-device and seeding devices are registered * in both cases. */ -struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, +struct btrfs_device *btrfs_scan_one_device(const char *path, bool mount_arg_dev) { struct btrfs_super_block *disk_super; @@ -1461,7 +1469,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, * values temporarily, as the device paths of the fsid are the only * required information for assembling the volume. */ - bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL); + bdev_file = bdev_file_open_by_path(path, BLK_OPEN_READ, NULL, NULL); if (IS_ERR(bdev_file)) return ERR_CAST(bdev_file); @@ -1473,7 +1481,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, devt = file_bdev(bdev_file)->bd_dev; if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) { - pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n", + btrfs_debug(NULL, "skip registering single non-seed device %s (%d:%d)", path, MAJOR(devt), MINOR(devt)); btrfs_free_stale_devices(devt, NULL); @@ -1490,7 +1498,7 @@ free_disk_super: btrfs_release_disk_super(disk_super); error_bdev_put: - fput(bdev_file); + bdev_fput(bdev_file); return device; } @@ -2164,7 +2172,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_devic btrfs_kobject_uevent(bdev, KOBJ_CHANGE); /* Update ctime/mtime for device path for libblkid */ - update_dev_time(device->name->str); + update_dev_time(rcu_dereference_raw(device->name)); } int btrfs_rm_device(struct btrfs_fs_info *fs_info, @@ -2204,7 +2212,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, } if (btrfs_pinned_by_swapfile(fs_info, device)) { - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "cannot remove device %s (devid %llu) due to active swapfile", btrfs_dev_name(device), device->devid); return -ETXTBSY; @@ -2294,7 +2302,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, * free the device. * * We cannot call btrfs_close_bdev() here because we're holding the sb - * write lock, and fput() on the block device will pull in the + * write lock, and bdev_fput() on the block device will pull in the * ->open_mutex on the block device and it's dependencies. Instead * just flush the device and let the caller do the final bdev_release. */ @@ -2473,7 +2481,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, else memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); btrfs_release_disk_super(disk_super); - fput(bdev_file); + bdev_fput(bdev_file); return 0; } @@ -2705,7 +2713,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path return -EROFS; bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, - fs_info->bdev_holder, NULL); + fs_info->sb, &fs_holder_ops); if (IS_ERR(bdev_file)) return PTR_ERR(bdev_file); @@ -2921,7 +2929,7 @@ error_free_zone: error_free_device: btrfs_free_device(device); error: - fput(bdev_file); + bdev_fput(bdev_file); if (locked) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); @@ -3404,7 +3412,8 @@ out: return ret; } -int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) +int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset, + bool verbose) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_trans_handle *trans; @@ -3434,7 +3443,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) /* step one, relocate all the extents inside this chunk */ btrfs_scrub_pause(fs_info); - ret = btrfs_relocate_block_group(fs_info, chunk_offset); + ret = btrfs_relocate_block_group(fs_info, chunk_offset, true); btrfs_scrub_continue(fs_info); if (ret) { /* @@ -3544,7 +3553,8 @@ again: btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { - ret = btrfs_relocate_chunk(fs_info, found_key.offset); + ret = btrfs_relocate_chunk(fs_info, found_key.offset, + true); if (ret == -ENOSPC) failed++; else @@ -4209,7 +4219,7 @@ again: } } - ret = btrfs_relocate_chunk(fs_info, found_key.offset); + ret = btrfs_relocate_chunk(fs_info, found_key.offset, true); mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret == -ENOSPC) { enospc_errors++; @@ -4977,7 +4987,7 @@ again: goto done; } - ret = btrfs_relocate_chunk(fs_info, chunk_offset); + ret = btrfs_relocate_chunk(fs_info, chunk_offset, true); mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret == -ENOSPC) { failed++; @@ -5009,8 +5019,8 @@ again: mutex_lock(&fs_info->chunk_mutex); /* Clear all state bits beyond the shrunk device size */ - btrfs_clear_extent_bits(&device->alloc_state, new_size, (u64)-1, - CHUNK_STATE_MASK); + btrfs_clear_extent_bit(&device->alloc_state, new_size, (u64)-1, + CHUNK_STATE_MASK, NULL); btrfs_device_set_disk_total_bytes(device, new_size); if (list_empty(&device->post_commit_list)) @@ -5437,9 +5447,9 @@ static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned in struct btrfs_io_stripe *stripe = &map->stripes[i]; struct btrfs_device *device = stripe->dev; - btrfs_clear_extent_bits(&device->alloc_state, stripe->physical, - stripe->physical + map->stripe_size - 1, - bits | EXTENT_NOWAIT); + btrfs_clear_extent_bit(&device->alloc_state, stripe->physical, + stripe->physical + map->stripe_size - 1, + bits | EXTENT_NOWAIT, NULL); } } @@ -6923,9 +6933,9 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, generate_random_uuid(dev->uuid); if (path) { - struct rcu_string *name; + const char *name; - name = rcu_string_strdup(path, GFP_KERNEL); + name = kstrdup(path, GFP_KERNEL); if (!name) { btrfs_free_device(dev); return ERR_PTR(-ENOMEM); @@ -7174,7 +7184,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, if (IS_ERR(fs_devices)) return fs_devices; - ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); + ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->sb); if (ret) { free_fs_devices(fs_devices); return ERR_PTR(ret); @@ -7706,7 +7716,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "error %d while searching for dev_stats item for device %s", ret, btrfs_dev_name(device)); goto out; @@ -7717,7 +7727,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, /* need to delete old one and insert a new one */ ret = btrfs_del_item(trans, dev_root, path); if (ret != 0) { - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "delete too small dev_stats item for device %s failed %d", btrfs_dev_name(device), ret); goto out; @@ -7731,7 +7741,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, dev_root, path, &key, sizeof(*ptr)); if (ret < 0) { - btrfs_warn_in_rcu(fs_info, + btrfs_warn(fs_info, "insert dev_stats item for device %s failed %d", btrfs_dev_name(device), ret); goto out; @@ -7794,7 +7804,7 @@ void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) if (!dev->dev_stats_valid) return; - btrfs_err_rl_in_rcu(dev->fs_info, + btrfs_err_rl(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", btrfs_dev_name(dev), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), @@ -7814,7 +7824,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) if (i == BTRFS_DEV_STAT_VALUES_MAX) return; /* all values == 0, suppress message */ - btrfs_info_in_rcu(dev->fs_info, + btrfs_info(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", btrfs_dev_name(dev), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), @@ -7938,7 +7948,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, } /* - * Very old mkfs.btrfs (before v4.1) will not respect the reserved + * Very old mkfs.btrfs (before v4.15) will not respect the reserved * space. Although kernel can handle it without problem, better to warn * the users. */ @@ -8190,7 +8200,7 @@ static int relocating_repair_kthread(void *data) btrfs_info(fs_info, "zoned: relocating block group %llu to repair IO failure", target); - ret = btrfs_relocate_chunk(fs_info, target); + ret = btrfs_relocate_chunk(fs_info, target, true); out: if (cache) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 137cc232f58e..a56e873a3029 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -21,7 +21,6 @@ #include <uapi/linux/btrfs.h> #include <uapi/linux/btrfs_tree.h> #include "messages.h" -#include "rcu-string.h" #include "extent-io-tree.h" struct block_device; @@ -114,7 +113,8 @@ struct btrfs_device { struct btrfs_fs_devices *fs_devices; struct btrfs_fs_info *fs_info; - struct rcu_string __rcu *name; + /* Device path or NULL if missing. */ + const char __rcu *name; u64 generation; @@ -422,6 +422,16 @@ struct btrfs_fs_devices { /* Count fs-devices opened. */ int opened; + /* + * Counter of the processes that are holding this fs_devices but not + * yet opened. + * This is for mounting handling, as we can only open the fs_devices + * after a super block is created. But we cannot take uuid_mutex + * during sget_fc(), thus we have to hold the fs_devices (meaning it + * cannot be released) until a super block is returned. + */ + int holding; + /* Set when we find or add a device that doesn't have the nonrot flag set. */ bool rotating; /* Devices support TRIM/discard commands. */ @@ -667,7 +677,7 @@ enum btrfs_map_op { BTRFS_MAP_GET_READ_MIRRORS, }; -static inline enum btrfs_map_op btrfs_op(struct bio *bio) +static inline enum btrfs_map_op btrfs_op(const struct bio *bio) { switch (bio_op(bio)) { case REQ_OP_WRITE: @@ -719,8 +729,7 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, blk_mode_t flags, void *holder); -struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, - bool mount_arg_dev); +struct btrfs_device *btrfs_scan_one_device(const char *path, bool mount_arg_dev); int btrfs_forget_devices(dev_t devt); void btrfs_close_devices(struct btrfs_fs_devices *fs_devices); void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices); @@ -754,7 +763,8 @@ void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf); int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info); int btrfs_recover_balance(struct btrfs_fs_info *fs_info); int btrfs_pause_balance(struct btrfs_fs_info *fs_info); -int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset); +int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset, + bool verbose); int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); @@ -846,7 +856,7 @@ static inline const char *btrfs_dev_name(const struct btrfs_device *device) if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) return "<missing disk>"; else - return rcu_str_deref(device->name); + return rcu_dereference(device->name); } static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocation_policy pol) @@ -854,6 +864,20 @@ static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocati WARN_ONCE(1, "unknown allocation policy %d, fallback to regular", pol); } +static inline void btrfs_fs_devices_inc_holding(struct btrfs_fs_devices *fs_devices) +{ + lockdep_assert_held(&uuid_mutex); + ASSERT(fs_devices->holding >= 0); + fs_devices->holding++; +} + +static inline void btrfs_fs_devices_dec_holding(struct btrfs_fs_devices *fs_devices) +{ + lockdep_assert_held(&uuid_mutex); + ASSERT(fs_devices->holding > 0); + fs_devices->holding--; +} + void btrfs_commit_device_sizes(struct btrfs_transaction *trans); struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 3e0edbcf73e1..79fb1614bd0c 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -510,14 +510,15 @@ static int btrfs_initxattrs(struct inode *inode, */ nofs_flag = memalloc_nofs_save(); for (xattr = xattr_array; xattr->name != NULL; xattr++) { - name = kmalloc(XATTR_SECURITY_PREFIX_LEN + - strlen(xattr->name) + 1, GFP_KERNEL); + const size_t name_len = XATTR_SECURITY_PREFIX_LEN + + strlen(xattr->name) + 1; + + name = kmalloc(name_len, GFP_KERNEL); if (!name) { ret = -ENOMEM; break; } - strcpy(name, XATTR_SECURITY_PREFIX); - strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); + scnprintf(name, name_len, "%s%s", XATTR_SECURITY_PREFIX, xattr->name); if (strcmp(name, XATTR_NAME_CAPS) == 0) clear_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 9430b34d3cbb..245e813ecd78 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -9,7 +9,6 @@ #include "ctree.h" #include "volumes.h" #include "zoned.h" -#include "rcu-string.h" #include "disk-io.h" #include "block-group.h" #include "dev-replace.h" @@ -17,6 +16,7 @@ #include "fs.h" #include "accessors.h" #include "bio.h" +#include "transaction.h" /* Maximum number of zones to report per blkdev_report_zones() call */ #define BTRFS_REPORT_NR_ZONES 4096 @@ -263,9 +263,9 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos, ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones, copy_zone_info_cb, zones); if (ret < 0) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "zoned: failed to read zone %llu on %s (devid %llu)", - pos, rcu_str_deref(device->name), + pos, rcu_dereference(device->name), device->devid); return ret; } @@ -395,16 +395,16 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) /* We reject devices with a zone size larger than 8GB */ if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) { - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "zoned: %s: zone size %llu larger than supported maximum %llu", - rcu_str_deref(device->name), + rcu_dereference(device->name), zone_info->zone_size, BTRFS_MAX_ZONE_SIZE); ret = -EINVAL; goto out; } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) { - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "zoned: %s: zone size %llu smaller than supported minimum %u", - rcu_str_deref(device->name), + rcu_dereference(device->name), zone_info->zone_size, BTRFS_MIN_ZONE_SIZE); ret = -EINVAL; goto out; @@ -418,9 +418,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) max_active_zones = bdev_max_active_zones(bdev); if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) { - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "zoned: %s: max active zones %u is too small, need at least %u active zones", - rcu_str_deref(device->name), max_active_zones, + rcu_dereference(device->name), max_active_zones, BTRFS_MIN_ACTIVE_ZONES); ret = -EINVAL; goto out; @@ -460,9 +460,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) zone_info->zone_cache = vcalloc(zone_info->nr_zones, sizeof(struct blk_zone)); if (!zone_info->zone_cache) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "zoned: failed to allocate zone cache for %s", - rcu_str_deref(device->name)); + rcu_dereference(device->name)); ret = -ENOMEM; goto out; } @@ -497,9 +497,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) } if (nreported != zone_info->nr_zones) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "inconsistent number of zones on %s (%u/%u)", - rcu_str_deref(device->name), nreported, + rcu_dereference(device->name), nreported, zone_info->nr_zones); ret = -EIO; goto out; @@ -507,9 +507,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) if (max_active_zones) { if (nactive > max_active_zones) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "zoned: %u active zones on %s exceeds max_active_zones %u", - nactive, rcu_str_deref(device->name), + nactive, rcu_dereference(device->name), max_active_zones); ret = -EIO; goto out; @@ -538,7 +538,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) goto out; if (nr_zones != BTRFS_NR_SB_LOG_ZONES) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "zoned: failed to read super block log zone info at devid %llu zone %u", device->devid, sb_zone); ret = -EUCLEAN; @@ -556,7 +556,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) ret = sb_write_pointer(device->bdev, &zone_info->sb_zones[sb_pos], &sb_wp); if (ret != -ENOENT && ret) { - btrfs_err_in_rcu(device->fs_info, + btrfs_err(device->fs_info, "zoned: super block log zone corrupted devid %llu zone %u", device->devid, sb_zone); ret = -EUCLEAN; @@ -575,9 +575,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) emulated = "emulated "; } - btrfs_info_in_rcu(fs_info, + btrfs_info(fs_info, "%s block device %s, %u %szones of %llu bytes", - model, rcu_str_deref(device->name), zone_info->nr_zones, + model, rcu_dereference(device->name), zone_info->nr_zones, emulated, zone_info->zone_size); return 0; @@ -1182,10 +1182,10 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) continue; /* Free regions should be empty */ - btrfs_warn_in_rcu( + btrfs_warn( device->fs_info, "zoned: resetting device %s (devid %llu) zone %llu for allocation", - rcu_str_deref(device->name), device->devid, pos >> shift); + rcu_dereference(device->name), device->devid, pos >> shift); WARN_ON_ONCE(1); ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size, @@ -1345,9 +1345,9 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx, } if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "zoned: unexpected conventional zone %llu on device %s (devid %llu)", - zone.start << SECTOR_SHIFT, rcu_str_deref(device->name), + zone.start << SECTOR_SHIFT, rcu_dereference(device->name), device->devid); up_read(&dev_replace->rwsem); return -EIO; @@ -1358,10 +1358,10 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx, switch (zone.cond) { case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_READONLY: - btrfs_err_in_rcu(fs_info, + btrfs_err(fs_info, "zoned: offline/readonly zone %llu on device %s (devid %llu)", (info->physical >> device->zone_info->zone_size_shift), - rcu_str_deref(device->name), device->devid); + rcu_dereference(device->name), device->devid); info->alloc_offset = WP_MISSING_DEV; break; case BLK_ZONE_COND_EMPTY: @@ -2485,7 +2485,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, /* For the work */ btrfs_get_block_group(bg); - atomic_inc(&eb->refs); + refcount_inc(&eb->refs); bg->last_eb = eb; INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); queue_work(system_unbound_wq, &bg->zone_finish_work); @@ -2501,6 +2501,66 @@ void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) spin_unlock(&fs_info->relocation_bg_lock); } +void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info) +{ + struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; + struct btrfs_space_info *space_info = data_sinfo->sub_group[0]; + struct btrfs_trans_handle *trans; + struct btrfs_block_group *bg; + struct list_head *bg_list; + u64 alloc_flags; + bool initial = false; + bool did_chunk_alloc = false; + int index; + int ret; + + if (!btrfs_is_zoned(fs_info)) + return; + + if (fs_info->data_reloc_bg) + return; + + if (sb_rdonly(fs_info->sb)) + return; + + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); + alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags); + index = btrfs_bg_flags_to_raid_index(alloc_flags); + + bg_list = &data_sinfo->block_groups[index]; +again: + list_for_each_entry(bg, bg_list, list) { + if (bg->used > 0) + continue; + + if (!initial) { + initial = true; + continue; + } + + fs_info->data_reloc_bg = bg->start; + set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags); + btrfs_zone_activate(bg); + + return; + } + + if (did_chunk_alloc) + return; + + trans = btrfs_join_transaction(fs_info->tree_root); + if (IS_ERR(trans)) + return; + + ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); + btrfs_end_transaction(trans); + if (ret == 1) { + did_chunk_alloc = true; + bg_list = &space_info->block_groups[index]; + goto again; + } +} + void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; @@ -2523,8 +2583,8 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; + u64 total = btrfs_super_total_bytes(fs_info->super_copy); u64 used = 0; - u64 total = 0; u64 factor; ASSERT(btrfs_is_zoned(fs_info)); @@ -2537,7 +2597,6 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) if (!device->bdev) continue; - total += device->disk_total_bytes; used += device->bytes_used; } mutex_unlock(&fs_devices->device_list_mutex); diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index 9672bf4c3335..6e11533b8e14 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -88,6 +88,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb); void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); +void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info); void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info); void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, @@ -241,6 +242,8 @@ static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { } +static inline void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info) { } + static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { } static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 4a796a049b5a..ff0292615e1f 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -200,8 +200,7 @@ void zstd_init_workspace_manager(void) ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL); if (IS_ERR(ws)) { - pr_warn( - "BTRFS: cannot preallocate zstd compression workspace\n"); + btrfs_warn(NULL, "cannot preallocate zstd compression workspace"); } else { set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map); list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); diff --git a/fs/buffer.c b/fs/buffer.c index 8cf4a1dc481e..ead4dc85debd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1122,15 +1122,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, { bool blocking = gfpflags_allow_blocking(gfp); - /* Size must be multiple of hard sectorsize */ - if (unlikely(size & (bdev_logical_block_size(bdev)-1) || - (size < 512 || size > PAGE_SIZE))) { - printk(KERN_ERR "getblk(): invalid block size %d requested\n", - size); - printk(KERN_ERR "logical block size: %d\n", - bdev_logical_block_size(bdev)); - - dump_stack(); + if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) { + printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n", + size, bdev_logical_block_size(bdev)); return NULL; } @@ -2271,9 +2265,8 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, } EXPORT_SYMBOL(block_write_begin); -int block_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +int block_write_end(loff_t pos, unsigned len, unsigned copied, + struct folio *folio) { size_t start = pos - folio_pos(folio); @@ -2304,15 +2297,15 @@ int block_write_end(struct file *file, struct address_space *mapping, } EXPORT_SYMBOL(block_write_end); -int generic_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +int generic_write_end(const struct kiocb *iocb, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; loff_t old_size = inode->i_size; bool i_size_changed = false; - copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); + copied = block_write_end(pos, len, copied, folio); /* * No need to use i_size_read() here, the i_size cannot change under us @@ -2501,7 +2494,8 @@ out: } EXPORT_SYMBOL(generic_cont_expand_simple); -static int cont_expand_zero(struct file *file, struct address_space *mapping, +static int cont_expand_zero(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, loff_t *bytes) { struct inode *inode = mapping->host; @@ -2525,12 +2519,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, } len = PAGE_SIZE - zerofrom; - err = aops->write_begin(file, mapping, curpos, len, + err = aops->write_begin(iocb, mapping, curpos, len, &folio, &fsdata); if (err) goto out; folio_zero_range(folio, offset_in_folio(folio, curpos), len); - err = aops->write_end(file, mapping, curpos, len, len, + err = aops->write_end(iocb, mapping, curpos, len, len, folio, fsdata); if (err < 0) goto out; @@ -2558,12 +2552,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, } len = offset - zerofrom; - err = aops->write_begin(file, mapping, curpos, len, + err = aops->write_begin(iocb, mapping, curpos, len, &folio, &fsdata); if (err) goto out; folio_zero_range(folio, offset_in_folio(folio, curpos), len); - err = aops->write_end(file, mapping, curpos, len, len, + err = aops->write_end(iocb, mapping, curpos, len, len, folio, fsdata); if (err < 0) goto out; @@ -2578,17 +2572,16 @@ out: * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ -int cont_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata, - get_block_t *get_block, loff_t *bytes) +int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping, + loff_t pos, unsigned len, struct folio **foliop, + void **fsdata, get_block_t *get_block, loff_t *bytes) { struct inode *inode = mapping->host; unsigned int blocksize = i_blocksize(inode); unsigned int zerofrom; int err; - err = cont_expand_zero(file, mapping, pos, bytes); + err = cont_expand_zero(iocb, mapping, pos, bytes); if (err) return err; @@ -2610,7 +2603,7 @@ EXPORT_SYMBOL(cont_write_begin); * holes and correct delalloc and unwritten extent mapping on filesystems that * support these features. * - * We are not allowed to take the i_mutex here so we have to play games to + * We are not allowed to take the i_rwsem here so we have to play games to * protect against truncate races as the page could now be beyond EOF. Because * truncate writes the inode size before removing pages, once we have the * page lock we can determine safely if the page is beyond EOF. If it is not diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index aecfc5c37b49..91dfd0231877 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -388,10 +388,10 @@ try_again: } else { struct renamedata rd = { .old_mnt_idmap = &nop_mnt_idmap, - .old_dir = d_inode(dir), + .old_parent = dir, .old_dentry = rep, .new_mnt_idmap = &nop_mnt_idmap, - .new_dir = d_inode(cache->graveyard), + .new_parent = cache->graveyard, .new_dentry = grave, }; trace_cachefiles_rename(object, d_inode(rep)->i_ino, why); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 60a621b00c65..8b202d789e93 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1864,10 +1864,12 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned * We are only allowed to write into/dirty the page if the page is * clean, or already dirty within the same snap context. */ -static int ceph_write_begin(struct file *file, struct address_space *mapping, +static int ceph_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { + struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); int r; @@ -1885,10 +1887,12 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, * we don't do anything in here that simple_write_end doesn't do * except adjust dirty page accounting */ -static int ceph_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, +static int ceph_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, struct folio *folio, void *fsdata) { + struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_client *cl = ceph_inode_to_client(inode); bool check_cap = false; @@ -2330,13 +2334,13 @@ static const struct vm_operations_struct ceph_vmops = { .page_mkwrite = ceph_page_mkwrite, }; -int ceph_mmap(struct file *file, struct vm_area_struct *vma) +int ceph_mmap_prepare(struct vm_area_desc *desc) { - struct address_space *mapping = file->f_mapping; + struct address_space *mapping = desc->file->f_mapping; if (!mapping->a_ops->read_folio) return -ENOEXEC; - vma->vm_ops = &ceph_vmops; + desc->vm_ops = &ceph_vmops; return 0; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index a8d8b56cf9d2..b1a8ff612c41 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -4957,24 +4957,20 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry, cl = ceph_inode_to_client(dir); spin_lock(&dentry->d_lock); if (ret && di->lease_session && di->lease_session->s_mds == mds) { + int len = dentry->d_name.len; doutc(cl, "%p mds%d seq %d\n", dentry, mds, (int)di->lease_seq); rel->dname_seq = cpu_to_le32(di->lease_seq); __ceph_mdsc_drop_dentry_lease(dentry); + memcpy(*p, dentry->d_name.name, len); spin_unlock(&dentry->d_lock); if (IS_ENCRYPTED(dir) && fscrypt_has_encryption_key(dir)) { - int ret2 = ceph_encode_encrypted_fname(dir, dentry, *p); - - if (ret2 < 0) - return ret2; - - rel->dname_len = cpu_to_le32(ret2); - *p += ret2; - } else { - rel->dname_len = cpu_to_le32(dentry->d_name.len); - memcpy(*p, dentry->d_name.name, dentry->d_name.len); - *p += dentry->d_name.len; + len = ceph_encode_encrypted_dname(dir, *p, len); + if (len < 0) + return len; } + rel->dname_len = cpu_to_le32(len); + *p += len; } else { spin_unlock(&dentry->d_lock); } diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c index 3b3c4d8d401e..e312f52f48e4 100644 --- a/fs/ceph/crypto.c +++ b/fs/ceph/crypto.c @@ -215,35 +215,31 @@ static struct inode *parse_longname(const struct inode *parent, struct ceph_client *cl = ceph_inode_to_client(parent); struct inode *dir = NULL; struct ceph_vino vino = { .snap = CEPH_NOSNAP }; - char *inode_number; - char *name_end; - int orig_len = *name_len; + char *name_end, *inode_number; int ret = -EIO; - + /* NUL-terminate */ + char *str __free(kfree) = kmemdup_nul(name, *name_len, GFP_KERNEL); + if (!str) + return ERR_PTR(-ENOMEM); /* Skip initial '_' */ - name++; - name_end = strrchr(name, '_'); + str++; + name_end = strrchr(str, '_'); if (!name_end) { - doutc(cl, "failed to parse long snapshot name: %s\n", name); + doutc(cl, "failed to parse long snapshot name: %s\n", str); return ERR_PTR(-EIO); } - *name_len = (name_end - name); + *name_len = (name_end - str); if (*name_len <= 0) { pr_err_client(cl, "failed to parse long snapshot name\n"); return ERR_PTR(-EIO); } /* Get the inode number */ - inode_number = kmemdup_nul(name_end + 1, - orig_len - *name_len - 2, - GFP_KERNEL); - if (!inode_number) - return ERR_PTR(-ENOMEM); + inode_number = name_end + 1; ret = kstrtou64(inode_number, 10, &vino.ino); if (ret) { - doutc(cl, "failed to parse inode number: %s\n", name); - dir = ERR_PTR(ret); - goto out; + doutc(cl, "failed to parse inode number: %s\n", str); + return ERR_PTR(ret); } /* And finally the inode */ @@ -254,42 +250,29 @@ static struct inode *parse_longname(const struct inode *parent, if (IS_ERR(dir)) doutc(cl, "can't find inode %s (%s)\n", inode_number, name); } - -out: - kfree(inode_number); return dir; } -int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, - char *buf) +int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int elen) { struct ceph_client *cl = ceph_inode_to_client(parent); struct inode *dir = parent; - struct qstr iname; + char *p = buf; u32 len; - int name_len; - int elen; + int name_len = elen; int ret; u8 *cryptbuf = NULL; - iname.name = d_name->name; - name_len = d_name->len; - /* Handle the special case of snapshot names that start with '_' */ - if ((ceph_snap(dir) == CEPH_SNAPDIR) && (name_len > 0) && - (iname.name[0] == '_')) { - dir = parse_longname(parent, iname.name, &name_len); + if (ceph_snap(dir) == CEPH_SNAPDIR && *p == '_') { + dir = parse_longname(parent, p, &name_len); if (IS_ERR(dir)) return PTR_ERR(dir); - iname.name++; /* skip initial '_' */ + p++; /* skip initial '_' */ } - iname.len = name_len; - if (!fscrypt_has_encryption_key(dir)) { - memcpy(buf, d_name->name, d_name->len); - elen = d_name->len; + if (!fscrypt_has_encryption_key(dir)) goto out; - } /* * Convert cleartext d_name to ciphertext. If result is longer than @@ -297,7 +280,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, * * See: fscrypt_setup_filename */ - if (!fscrypt_fname_encrypted_size(dir, iname.len, NAME_MAX, &len)) { + if (!fscrypt_fname_encrypted_size(dir, name_len, NAME_MAX, &len)) { elen = -ENAMETOOLONG; goto out; } @@ -310,7 +293,9 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, goto out; } - ret = fscrypt_fname_encrypt(dir, &iname, cryptbuf, len); + ret = fscrypt_fname_encrypt(dir, + &(struct qstr)QSTR_INIT(p, name_len), + cryptbuf, len); if (ret) { elen = ret; goto out; @@ -331,18 +316,13 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, } /* base64 encode the encrypted name */ - elen = ceph_base64_encode(cryptbuf, len, buf); - doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf); + elen = ceph_base64_encode(cryptbuf, len, p); + doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, p); /* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */ WARN_ON(elen > 240); - if ((elen > 0) && (dir != parent)) { - char tmp_buf[NAME_MAX]; - - elen = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld", - elen, buf, dir->i_ino); - memcpy(buf, tmp_buf, elen); - } + if (dir != parent) // leading _ is already there; append _<inum> + elen += 1 + sprintf(p + elen, "_%ld", dir->i_ino); out: kfree(cryptbuf); @@ -355,14 +335,6 @@ out: return elen; } -int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry, - char *buf) -{ - WARN_ON_ONCE(!fscrypt_has_encryption_key(parent)); - - return ceph_encode_encrypted_dname(parent, &dentry->d_name, buf); -} - /** * ceph_fname_to_usr - convert a filename for userland presentation * @fname: ceph_fname to be converted diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h index d0768239a1c9..f752bbb2eb06 100644 --- a/fs/ceph/crypto.h +++ b/fs/ceph/crypto.h @@ -102,10 +102,7 @@ int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode, struct ceph_acl_sec_ctx *as); void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req, struct ceph_acl_sec_ctx *as); -int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, - char *buf); -int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry, - char *buf); +int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int len); static inline int ceph_fname_alloc_buffer(struct inode *parent, struct fscrypt_str *fname) @@ -194,17 +191,10 @@ static inline void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req, { } -static inline int ceph_encode_encrypted_dname(struct inode *parent, - struct qstr *d_name, char *buf) +static inline int ceph_encode_encrypted_dname(struct inode *parent, char *buf, + int len) { - memcpy(buf, d_name->name, d_name->len); - return d_name->len; -} - -static inline int ceph_encode_encrypted_fname(struct inode *parent, - struct dentry *dentry, char *buf) -{ - return -EOPNOTSUPP; + return len; } static inline int ceph_fname_alloc_buffer(struct inode *parent, diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index a321aa6d0ed2..8478e7e75df6 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -423,17 +423,16 @@ more: req->r_inode_drop = CEPH_CAP_FILE_EXCL; } if (dfi->last_name) { - struct qstr d_name = { .name = dfi->last_name, - .len = strlen(dfi->last_name) }; + int len = strlen(dfi->last_name); req->r_path2 = kzalloc(NAME_MAX + 1, GFP_KERNEL); if (!req->r_path2) { ceph_mdsc_put_request(req); return -ENOMEM; } + memcpy(req->r_path2, dfi->last_name, len); - err = ceph_encode_encrypted_dname(inode, &d_name, - req->r_path2); + err = ceph_encode_encrypted_dname(inode, req->r_path2, len); if (err < 0) { ceph_mdsc_put_request(req); return err; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a7254cab44cc..8c06dc4655a8 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -3171,7 +3171,7 @@ const struct file_operations ceph_file_fops = { .llseek = ceph_llseek, .read_iter = ceph_read_iter, .write_iter = ceph_write_iter, - .mmap = ceph_mmap, + .mmap_prepare = ceph_mmap_prepare, .fsync = ceph_fsync, .lock = ceph_lock, .setlease = simple_nosetlease, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 230e0c3f341f..0f497c39ff82 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2766,8 +2766,8 @@ retry: } if (fscrypt_has_encryption_key(d_inode(parent))) { - len = ceph_encode_encrypted_fname(d_inode(parent), - cur, buf); + len = ceph_encode_encrypted_dname(d_inode(parent), + buf, len); if (len < 0) { dput(parent); dput(cur); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 2b8438d8a324..c3eb651862c5 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1219,7 +1219,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc) fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */ s->s_op = &ceph_super_ops; - s->s_d_op = &ceph_dentry_ops; + set_default_d_op(s, &ceph_dentry_ops); s->s_export_op = &ceph_export_ops; s->s_time_gran = 1; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index bb0db0cc8003..cf176aab0f82 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -1286,7 +1286,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci, /* addr.c */ extern const struct address_space_operations ceph_aops; extern const struct netfs_request_ops ceph_netfs_ops; -extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); +int ceph_mmap_prepare(struct vm_area_desc *desc); extern int ceph_uninline_data(struct file *file); extern int ceph_pool_perm_check(struct inode *inode, int need); extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index ab69d8f0cec2..ca9990017265 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -429,17 +429,9 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; - if (host_file->f_op->iterate_shared) { - struct inode *host_inode = file_inode(host_file); - ret = -ENOENT; - if (!IS_DEADDIR(host_inode)) { - inode_lock_shared(host_inode); - ret = host_file->f_op->iterate_shared(host_file, ctx); - file_accessed(host_file); - inode_unlock_shared(host_inode); - } + ret = iterate_dir(host_file, ctx); + if (ret != -ENOTDIR) return ret; - } /* Venus: we must read Venus dirents from a file */ return coda_venus_readdir(coda_file, ctx); } diff --git a/fs/coda/file.c b/fs/coda/file.c index 148856a582a9..a390b5d21196 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -160,7 +160,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) size_t count; int ret; - if (!host_file->f_op->mmap) + if (!can_mmap_file(host_file)) return -ENODEV; if (WARN_ON(coda_file != vma->vm_file)) @@ -199,10 +199,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) spin_unlock(&cii->c_lock); vma->vm_file = get_file(host_file); - ret = call_mmap(vma->vm_file, vma); + ret = vfs_mmap(vma->vm_file, vma); if (ret) { - /* if call_mmap fails, our caller will put host_file so we + /* if vfs_mmap fails, our caller will put host_file so we * should drop the reference to the coda_file that we got. */ fput(coda_file); diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 6896fce122e1..08450d006016 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -230,7 +230,7 @@ static int coda_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize_bits = 12; sb->s_magic = CODA_SUPER_MAGIC; sb->s_op = &coda_super_operations; - sb->s_d_op = &coda_dentry_operations; + set_default_d_op(sb, &coda_dentry_operations); sb->s_time_gran = 1; sb->s_time_min = S64_MIN; sb->s_time_max = S64_MAX; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index ebf32822e29b..f327fbb9a0ca 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -67,7 +67,6 @@ static void configfs_d_iput(struct dentry * dentry, const struct dentry_operations configfs_dentry_ops = { .d_iput = configfs_d_iput, - .d_delete = always_delete_dentry, }; #ifdef CONFIG_LOCKDEP diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index c2d820063ec4..740f18b60c9d 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -92,7 +92,8 @@ static int configfs_fill_super(struct super_block *sb, struct fs_context *fc) configfs_root_group.cg_item.ci_dentry = root; root->d_fsdata = &configfs_root; sb->s_root = root; - sb->s_d_op = &configfs_dentry_ops; /* the rest get that */ + set_default_d_op(sb, &configfs_dentry_ops); /* the rest get that */ + sb->s_d_flags |= DCACHE_DONTCACHE; return 0; } diff --git a/fs/coredump.c b/fs/coredump.c index f217ebf2b3b6..fedbead956ed 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -51,6 +51,7 @@ #include <net/sock.h> #include <uapi/linux/pidfd.h> #include <uapi/linux/un.h> +#include <uapi/linux/coredump.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> @@ -81,17 +82,22 @@ static unsigned int core_sort_vma; static char core_pattern[CORENAME_MAX_SIZE] = "core"; static int core_name_size = CORENAME_MAX_SIZE; unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; +static atomic_t core_pipe_count = ATOMIC_INIT(0); enum coredump_type_t { - COREDUMP_FILE = 1, - COREDUMP_PIPE = 2, - COREDUMP_SOCK = 3, + COREDUMP_FILE = 1, + COREDUMP_PIPE = 2, + COREDUMP_SOCK = 3, + COREDUMP_SOCK_REQ = 4, }; struct core_name { char *corename; int used, size; + unsigned int core_pipe_limit; + bool core_dumped; enum coredump_type_t core_type; + u64 mask; }; static int expand_corename(struct core_name *cn, int size) @@ -222,11 +228,12 @@ put_exe_file: return ret; } -/* format_corename will inspect the pattern parameter, and output a - * name into corename, which must have space for at least - * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. +/* + * coredump_parse will inspect the pattern parameter, and output a name + * into corename, which must have space for at least CORENAME_MAX_SIZE + * bytes plus one byte for the zero terminator. */ -static int format_corename(struct core_name *cn, struct coredump_params *cprm, +static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm, size_t **argv, int *argc) { const struct cred *cred = current_cred(); @@ -235,8 +242,13 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, int pid_in_pattern = 0; int err = 0; + cn->mask = COREDUMP_KERNEL; + if (core_pipe_limit) + cn->mask |= COREDUMP_WAIT; cn->used = 0; cn->corename = NULL; + cn->core_pipe_limit = 0; + cn->core_dumped = false; if (*pat_ptr == '|') cn->core_type = COREDUMP_PIPE; else if (*pat_ptr == '@') @@ -244,7 +256,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, else cn->core_type = COREDUMP_FILE; if (expand_corename(cn, core_name_size)) - return -ENOMEM; + return false; cn->corename[0] = '\0'; switch (cn->core_type) { @@ -252,26 +264,33 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, int argvs = sizeof(core_pattern) / 2; (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); if (!(*argv)) - return -ENOMEM; + return false; (*argv)[(*argc)++] = 0; ++pat_ptr; if (!(*pat_ptr)) - return -ENOMEM; + return false; break; } case COREDUMP_SOCK: { /* skip the @ */ pat_ptr++; if (!(*pat_ptr)) - return -ENOMEM; + return false; + if (*pat_ptr == '@') { + pat_ptr++; + if (!(*pat_ptr)) + return false; + + cn->core_type = COREDUMP_SOCK_REQ; + } err = cn_printf(cn, "%s", pat_ptr); if (err) - return err; + return false; /* Require absolute paths. */ if (cn->corename[0] != '/') - return -EINVAL; + return false; /* * Ensure we can uses spaces to indicate additional @@ -279,7 +298,18 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, */ if (strchr(cn->corename, ' ')) { coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename); - return -EINVAL; + return false; + } + + /* Must not contain ".." in the path. */ + if (name_contains_dotdot(cn->corename)) { + coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename); + return false; + } + + if (strlen(cn->corename) >= UNIX_PATH_MAX) { + coredump_report_failure("Coredump socket path %s too long", cn->corename); + return false; } /* @@ -289,13 +319,13 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, * via /proc/<pid>, using the SO_PEERPIDFD to guard * against pid recycling when opening /proc/<pid>. */ - return 0; + return true; } case COREDUMP_FILE: break; default: WARN_ON_ONCE(true); - return -EINVAL; + return false; } /* Repeat as long as we have more pattern to process and more output @@ -433,7 +463,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, } if (err) - return err; + return false; } out: @@ -443,9 +473,9 @@ out: * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid) - return cn_printf(cn, ".%d", task_tgid_vnr(current)); + return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0; - return 0; + return true; } static int zap_process(struct signal_struct *signal, int exit_code) @@ -632,21 +662,440 @@ static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) return 0; } -void do_coredump(const kernel_siginfo_t *siginfo) +#ifdef CONFIG_UNIX +static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm) +{ + struct file *file __free(fput) = NULL; + struct sockaddr_un addr = { + .sun_family = AF_UNIX, + }; + ssize_t addr_len; + int retval; + struct socket *socket; + + addr_len = strscpy(addr.sun_path, cn->corename); + if (addr_len < 0) + return false; + addr_len += offsetof(struct sockaddr_un, sun_path) + 1; + + /* + * It is possible that the userspace process which is supposed + * to handle the coredump and is listening on the AF_UNIX socket + * coredumps. Userspace should just mark itself non dumpable. + */ + + retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket); + if (retval < 0) + return false; + + file = sock_alloc_file(socket, 0, NULL); + if (IS_ERR(file)) + return false; + + /* + * Set the thread-group leader pid which is used for the peer + * credentials during connect() below. Then immediately register + * it in pidfs... + */ + cprm->pid = task_tgid(current); + retval = pidfs_register_pid(cprm->pid); + if (retval) + return false; + + /* + * ... and set the coredump information so userspace has it + * available after connect()... + */ + pidfs_coredump(cprm); + + retval = kernel_connect(socket, (struct sockaddr *)(&addr), addr_len, + O_NONBLOCK | SOCK_COREDUMP); + + if (retval) { + if (retval == -EAGAIN) + coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path); + else + coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval); + return false; + } + + /* ... and validate that @sk_peer_pid matches @cprm.pid. */ + if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid)) + return false; + + cprm->limit = RLIM_INFINITY; + cprm->file = no_free_ptr(file); + + return true; +} + +static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags) +{ + struct msghdr msg = {}; + struct kvec iov = { .iov_base = ack, .iov_len = size }; + ssize_t ret; + + memset(ack, 0, size); + ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags); + return ret == size; +} + +static inline bool coredump_sock_send(struct file *file, struct coredump_req *req) +{ + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; + struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) }; + ssize_t ret; + + ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req)); + return ret == sizeof(*req); +} + +static_assert(sizeof(enum coredump_mark) == sizeof(__u32)); + +static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark) +{ + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; + struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) }; + ssize_t ret; + + ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark)); + return ret == sizeof(mark); +} + +static inline void coredump_sock_wait(struct file *file) +{ + ssize_t n; + + /* + * We use a simple read to wait for the coredump processing to + * finish. Either the socket is closed or we get sent unexpected + * data. In both cases, we're done. + */ + n = __kernel_read(file, &(char){ 0 }, 1, NULL); + if (n > 0) + coredump_report_failure("Coredump socket had unexpected data"); + else if (n < 0) + coredump_report_failure("Coredump socket failed"); +} + +static inline void coredump_sock_shutdown(struct file *file) +{ + struct socket *socket; + + socket = sock_from_file(file); + if (!socket) + return; + + /* Let userspace know we're done processing the coredump. */ + kernel_sock_shutdown(socket, SHUT_WR); +} + +static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm) +{ + struct coredump_req req = { + .size = sizeof(struct coredump_req), + .mask = COREDUMP_KERNEL | COREDUMP_USERSPACE | + COREDUMP_REJECT | COREDUMP_WAIT, + .size_ack = sizeof(struct coredump_ack), + }; + struct coredump_ack ack = {}; + ssize_t usize; + + if (cn->core_type != COREDUMP_SOCK_REQ) + return true; + + /* Let userspace know what we support. */ + if (!coredump_sock_send(cprm->file, &req)) + return false; + + /* Peek the size of the coredump_ack. */ + if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size), + MSG_PEEK | MSG_WAITALL)) + return false; + + /* Refuse unknown coredump_ack sizes. */ + usize = ack.size; + if (usize < COREDUMP_ACK_SIZE_VER0) { + coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE); + return false; + } + + if (usize > sizeof(ack)) { + coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE); + return false; + } + + /* Now retrieve the coredump_ack. */ + if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL)) + return false; + if (ack.size != usize) + return false; + + /* Refuse unknown coredump_ack flags. */ + if (ack.mask & ~req.mask) { + coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); + return false; + } + + /* Refuse mutually exclusive options. */ + if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL | + COREDUMP_REJECT)) != 1) { + coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING); + return false; + } + + if (ack.spare) { + coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); + return false; + } + + cn->mask = ack.mask; + return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK); +} + +static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) +{ + if (!coredump_sock_connect(cn, cprm)) + return false; + + return coredump_sock_request(cn, cprm); +} +#else +static inline void coredump_sock_wait(struct file *file) { } +static inline void coredump_sock_shutdown(struct file *file) { } +static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; } +#endif + +/* cprm->mm_flags contains a stable snapshot of dumpability flags. */ +static inline bool coredump_force_suid_safe(const struct coredump_params *cprm) +{ + /* Require nonrelative corefile path and be extra careful. */ + return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT; +} + +static bool coredump_file(struct core_name *cn, struct coredump_params *cprm, + const struct linux_binfmt *binfmt) +{ + struct mnt_idmap *idmap; + struct inode *inode; + struct file *file __free(fput) = NULL; + int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL; + + if (cprm->limit < binfmt->min_coredump) + return false; + + if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') { + coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump"); + return false; + } + + /* + * Unlink the file if it exists unless this is a SUID + * binary - in that case, we're running around with root + * privs and don't want to unlink another user's coredump. + */ + if (!coredump_force_suid_safe(cprm)) { + /* + * If it doesn't exist, that's fine. If there's some + * other problem, we'll catch it at the filp_open(). + */ + do_unlinkat(AT_FDCWD, getname_kernel(cn->corename)); + } + + /* + * There is a race between unlinking and creating the + * file, but if that causes an EEXIST here, that's + * fine - another process raced with us while creating + * the corefile, and the other process won. To userspace, + * what matters is that at least one of the two processes + * writes its coredump successfully, not which one. + */ + if (coredump_force_suid_safe(cprm)) { + /* + * Using user namespaces, normal user tasks can change + * their current->fs->root to point to arbitrary + * directories. Since the intention of the "only dump + * with a fully qualified path" rule is to control where + * coredumps may be placed using root privileges, + * current->fs->root must not be used. Instead, use the + * root directory of init_task. + */ + struct path root; + + task_lock(&init_task); + get_fs_root(init_task.fs, &root); + task_unlock(&init_task); + file = file_open_root(&root, cn->corename, open_flags, 0600); + path_put(&root); + } else { + file = filp_open(cn->corename, open_flags, 0600); + } + if (IS_ERR(file)) + return false; + + inode = file_inode(file); + if (inode->i_nlink > 1) + return false; + if (d_unhashed(file->f_path.dentry)) + return false; + /* + * AK: actually i see no reason to not allow this for named + * pipes etc, but keep the previous behaviour for now. + */ + if (!S_ISREG(inode->i_mode)) + return false; + /* + * Don't dump core if the filesystem changed owner or mode + * of the file during file creation. This is an issue when + * a process dumps core while its cwd is e.g. on a vfat + * filesystem. + */ + idmap = file_mnt_idmap(file); + if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) { + coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename); + return false; + } + if ((inode->i_mode & 0677) != 0600) { + coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename); + return false; + } + if (!(file->f_mode & FMODE_CAN_WRITE)) + return false; + if (do_truncate(idmap, file->f_path.dentry, 0, 0, file)) + return false; + + cprm->file = no_free_ptr(file); + return true; +} + +static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm, + size_t *argv, int argc) { + int argi; + char **helper_argv __free(kfree) = NULL; + struct subprocess_info *sub_info; + + if (cprm->limit == 1) { + /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. + * + * Normally core limits are irrelevant to pipes, since + * we're not writing to the file system, but we use + * cprm.limit of 1 here as a special value, this is a + * consistent way to catch recursive crashes. + * We can still crash if the core_pattern binary sets + * RLIM_CORE = !1, but it runs as root, and can do + * lots of stupid things. + * + * Note that we use task_tgid_vnr here to grab the pid + * of the process group leader. That way we get the + * right pid if a thread in a multi-threaded + * core_pattern process dies. + */ + coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); + return false; + } + cprm->limit = RLIM_INFINITY; + + cn->core_pipe_limit = atomic_inc_return(&core_pipe_count); + if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) { + coredump_report_failure("over core_pipe_limit, skipping core dump"); + return false; + } + + helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL); + if (!helper_argv) { + coredump_report_failure("%s failed to allocate memory", __func__); + return false; + } + for (argi = 0; argi < argc; argi++) + helper_argv[argi] = cn->corename + argv[argi]; + helper_argv[argi] = NULL; + + sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL, + GFP_KERNEL, umh_coredump_setup, + NULL, cprm); + if (!sub_info) + return false; + + if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) { + coredump_report_failure("|%s pipe failed", cn->corename); + return false; + } + + /* + * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would + * have this set to NULL. + */ + if (!cprm->file) { + coredump_report_failure("Core dump to |%s disabled", cn->corename); + return false; + } + + return true; +} + +static bool coredump_write(struct core_name *cn, + struct coredump_params *cprm, + struct linux_binfmt *binfmt) +{ + + if (dump_interrupted()) + return true; + + if (!dump_vma_snapshot(cprm)) + return false; + + file_start_write(cprm->file); + cn->core_dumped = binfmt->core_dump(cprm); + /* + * Ensures that file size is big enough to contain the current + * file postion. This prevents gdb from complaining about + * a truncated file if the last "write" to the file was + * dump_skip. + */ + if (cprm->to_skip) { + cprm->to_skip--; + dump_emit(cprm, "", 1); + } + file_end_write(cprm->file); + free_vma_snapshot(cprm); + return true; +} + +static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm) +{ + if (cprm->file) + filp_close(cprm->file, NULL); + if (cn->core_pipe_limit) { + VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE); + atomic_dec(&core_pipe_count); + } + kfree(cn->corename); + coredump_finish(cn->core_dumped); +} + +static inline bool coredump_skip(const struct coredump_params *cprm, + const struct linux_binfmt *binfmt) +{ + if (!binfmt) + return true; + if (!binfmt->core_dump) + return true; + if (!__get_dumpable(cprm->mm_flags)) + return true; + return false; +} + +void vfs_coredump(const kernel_siginfo_t *siginfo) +{ + struct cred *cred __free(put_cred) = NULL; + size_t *argv __free(kfree) = NULL; struct core_state core_state; struct core_name cn; struct mm_struct *mm = current->mm; - struct linux_binfmt * binfmt; + struct linux_binfmt *binfmt = mm->binfmt; const struct cred *old_cred; - struct cred *cred; - int retval = 0; - size_t *argv = NULL; int argc = 0; - /* require nonrelative corefile path and be extra careful */ - bool need_suid_safe = false; - bool core_dumped = false; - static atomic_t core_dump_count = ATOMIC_INIT(0); struct coredump_params cprm = { .siginfo = siginfo, .limit = rlimit(RLIMIT_CORE), @@ -662,357 +1111,92 @@ void do_coredump(const kernel_siginfo_t *siginfo) audit_core_dumps(siginfo->si_signo); - binfmt = mm->binfmt; - if (!binfmt || !binfmt->core_dump) - goto fail; - if (!__get_dumpable(cprm.mm_flags)) - goto fail; + if (coredump_skip(&cprm, binfmt)) + return; cred = prepare_creds(); if (!cred) - goto fail; + return; /* * We cannot trust fsuid as being the "true" uid of the process * nor do we know its entire history. We only know it was tainted * so we dump it as root in mode 2, and only into a controlled * environment (pipe handler or fully qualified path). */ - if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { - /* Setuid core dump mode */ - cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ - need_suid_safe = true; - } + if (coredump_force_suid_safe(&cprm)) + cred->fsuid = GLOBAL_ROOT_UID; - retval = coredump_wait(siginfo->si_signo, &core_state); - if (retval < 0) - goto fail_creds; + if (coredump_wait(siginfo->si_signo, &core_state) < 0) + return; old_cred = override_creds(cred); - retval = format_corename(&cn, &cprm, &argv, &argc); - if (retval < 0) { + if (!coredump_parse(&cn, &cprm, &argv, &argc)) { coredump_report_failure("format_corename failed, aborting core"); - goto fail_unlock; + goto close_fail; } switch (cn.core_type) { - case COREDUMP_FILE: { - struct mnt_idmap *idmap; - struct inode *inode; - int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | - O_LARGEFILE | O_EXCL; - - if (cprm.limit < binfmt->min_coredump) - goto fail_unlock; - - if (need_suid_safe && cn.corename[0] != '/') { - coredump_report_failure( - "this process can only dump core to a fully qualified path, skipping core dump"); - goto fail_unlock; - } - - /* - * Unlink the file if it exists unless this is a SUID - * binary - in that case, we're running around with root - * privs and don't want to unlink another user's coredump. - */ - if (!need_suid_safe) { - /* - * If it doesn't exist, that's fine. If there's some - * other problem, we'll catch it at the filp_open(). - */ - do_unlinkat(AT_FDCWD, getname_kernel(cn.corename)); - } - - /* - * There is a race between unlinking and creating the - * file, but if that causes an EEXIST here, that's - * fine - another process raced with us while creating - * the corefile, and the other process won. To userspace, - * what matters is that at least one of the two processes - * writes its coredump successfully, not which one. - */ - if (need_suid_safe) { - /* - * Using user namespaces, normal user tasks can change - * their current->fs->root to point to arbitrary - * directories. Since the intention of the "only dump - * with a fully qualified path" rule is to control where - * coredumps may be placed using root privileges, - * current->fs->root must not be used. Instead, use the - * root directory of init_task. - */ - struct path root; - - task_lock(&init_task); - get_fs_root(init_task.fs, &root); - task_unlock(&init_task); - cprm.file = file_open_root(&root, cn.corename, - open_flags, 0600); - path_put(&root); - } else { - cprm.file = filp_open(cn.corename, open_flags, 0600); - } - if (IS_ERR(cprm.file)) - goto fail_unlock; - - inode = file_inode(cprm.file); - if (inode->i_nlink > 1) - goto close_fail; - if (d_unhashed(cprm.file->f_path.dentry)) - goto close_fail; - /* - * AK: actually i see no reason to not allow this for named - * pipes etc, but keep the previous behaviour for now. - */ - if (!S_ISREG(inode->i_mode)) - goto close_fail; - /* - * Don't dump core if the filesystem changed owner or mode - * of the file during file creation. This is an issue when - * a process dumps core while its cwd is e.g. on a vfat - * filesystem. - */ - idmap = file_mnt_idmap(cprm.file); - if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), - current_fsuid())) { - coredump_report_failure("Core dump to %s aborted: " - "cannot preserve file owner", cn.corename); - goto close_fail; - } - if ((inode->i_mode & 0677) != 0600) { - coredump_report_failure("Core dump to %s aborted: " - "cannot preserve file permissions", cn.corename); - goto close_fail; - } - if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) - goto close_fail; - if (do_truncate(idmap, cprm.file->f_path.dentry, - 0, 0, cprm.file)) + case COREDUMP_FILE: + if (!coredump_file(&cn, &cprm, binfmt)) goto close_fail; break; - } - case COREDUMP_PIPE: { - int argi; - int dump_count; - char **helper_argv; - struct subprocess_info *sub_info; - - if (cprm.limit == 1) { - /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. - * - * Normally core limits are irrelevant to pipes, since - * we're not writing to the file system, but we use - * cprm.limit of 1 here as a special value, this is a - * consistent way to catch recursive crashes. - * We can still crash if the core_pattern binary sets - * RLIM_CORE = !1, but it runs as root, and can do - * lots of stupid things. - * - * Note that we use task_tgid_vnr here to grab the pid - * of the process group leader. That way we get the - * right pid if a thread in a multi-threaded - * core_pattern process dies. - */ - coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); - goto fail_unlock; - } - cprm.limit = RLIM_INFINITY; - - dump_count = atomic_inc_return(&core_dump_count); - if (core_pipe_limit && (core_pipe_limit < dump_count)) { - coredump_report_failure("over core_pipe_limit, skipping core dump"); - goto fail_dropcount; - } - - helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), - GFP_KERNEL); - if (!helper_argv) { - coredump_report_failure("%s failed to allocate memory", __func__); - goto fail_dropcount; - } - for (argi = 0; argi < argc; argi++) - helper_argv[argi] = cn.corename + argv[argi]; - helper_argv[argi] = NULL; - - retval = -ENOMEM; - sub_info = call_usermodehelper_setup(helper_argv[0], - helper_argv, NULL, GFP_KERNEL, - umh_coredump_setup, NULL, &cprm); - if (sub_info) - retval = call_usermodehelper_exec(sub_info, - UMH_WAIT_EXEC); - - kfree(helper_argv); - if (retval) { - coredump_report_failure("|%s pipe failed", cn.corename); + case COREDUMP_PIPE: + if (!coredump_pipe(&cn, &cprm, argv, argc)) goto close_fail; - } break; - } - case COREDUMP_SOCK: { -#ifdef CONFIG_UNIX - struct file *file __free(fput) = NULL; - struct sockaddr_un addr = { - .sun_family = AF_UNIX, - }; - ssize_t addr_len; - struct socket *socket; - - addr_len = strscpy(addr.sun_path, cn.corename); - if (addr_len < 0) - goto close_fail; - addr_len += offsetof(struct sockaddr_un, sun_path) + 1; - - /* - * It is possible that the userspace process which is - * supposed to handle the coredump and is listening on - * the AF_UNIX socket coredumps. Userspace should just - * mark itself non dumpable. - */ - - retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket); - if (retval < 0) - goto close_fail; - - file = sock_alloc_file(socket, 0, NULL); - if (IS_ERR(file)) - goto close_fail; - - /* - * Set the thread-group leader pid which is used for the - * peer credentials during connect() below. Then - * immediately register it in pidfs... - */ - cprm.pid = task_tgid(current); - retval = pidfs_register_pid(cprm.pid); - if (retval) - goto close_fail; - - /* - * ... and set the coredump information so userspace - * has it available after connect()... - */ - pidfs_coredump(&cprm); - - retval = kernel_connect(socket, (struct sockaddr *)(&addr), - addr_len, O_NONBLOCK | SOCK_COREDUMP); - - /* - * ... Make sure to only put our reference after connect() took - * its own reference keeping the pidfs entry alive ... - */ - pidfs_put_pid(cprm.pid); - - if (retval) { - if (retval == -EAGAIN) - coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path); - else - coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval); + case COREDUMP_SOCK_REQ: + fallthrough; + case COREDUMP_SOCK: + if (!coredump_socket(&cn, &cprm)) goto close_fail; - } - - /* ... and validate that @sk_peer_pid matches @cprm.pid. */ - if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm.pid)) - goto close_fail; - - cprm.limit = RLIM_INFINITY; - cprm.file = no_free_ptr(file); -#else - coredump_report_failure("Core dump socket support %s disabled", cn.corename); - goto close_fail; -#endif break; - } default: WARN_ON_ONCE(true); goto close_fail; } + /* Don't even generate the coredump. */ + if (cn.mask & COREDUMP_REJECT) + goto close_fail; + /* get us an unshared descriptor table; almost always a no-op */ /* The cell spufs coredump code reads the file descriptor tables */ - retval = unshare_files(); - if (retval) + if (unshare_files()) goto close_fail; - if (!dump_interrupted()) { - /* - * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would - * have this set to NULL. - */ - if (!cprm.file) { - coredump_report_failure("Core dump to |%s disabled", cn.corename); - goto close_fail; - } - if (!dump_vma_snapshot(&cprm)) - goto close_fail; - file_start_write(cprm.file); - core_dumped = binfmt->core_dump(&cprm); - /* - * Ensures that file size is big enough to contain the current - * file postion. This prevents gdb from complaining about - * a truncated file if the last "write" to the file was - * dump_skip. - */ - if (cprm.to_skip) { - cprm.to_skip--; - dump_emit(&cprm, "", 1); - } - file_end_write(cprm.file); - free_vma_snapshot(&cprm); - } + if ((cn.mask & COREDUMP_KERNEL) && !coredump_write(&cn, &cprm, binfmt)) + goto close_fail; -#ifdef CONFIG_UNIX - /* Let userspace know we're done processing the coredump. */ - if (sock_from_file(cprm.file)) - kernel_sock_shutdown(sock_from_file(cprm.file), SHUT_WR); -#endif + coredump_sock_shutdown(cprm.file); + + /* Let the parent know that a coredump was generated. */ + if (cn.mask & COREDUMP_USERSPACE) + cn.core_dumped = true; /* * When core_pipe_limit is set we wait for the coredump server * or usermodehelper to finish before exiting so it can e.g., * inspect /proc/<pid>. */ - if (core_pipe_limit) { + if (cn.mask & COREDUMP_WAIT) { switch (cn.core_type) { case COREDUMP_PIPE: wait_for_dump_helpers(cprm.file); break; -#ifdef CONFIG_UNIX - case COREDUMP_SOCK: { - ssize_t n; - - /* - * We use a simple read to wait for the coredump - * processing to finish. Either the socket is - * closed or we get sent unexpected data. In - * both cases, we're done. - */ - n = __kernel_read(cprm.file, &(char){ 0 }, 1, NULL); - if (n != 0) - coredump_report_failure("Unexpected data on coredump socket"); + case COREDUMP_SOCK_REQ: + fallthrough; + case COREDUMP_SOCK: + coredump_sock_wait(cprm.file); break; - } -#endif default: break; } } close_fail: - if (cprm.file) - filp_close(cprm.file, NULL); -fail_dropcount: - if (cn.core_type == COREDUMP_PIPE) - atomic_dec(&core_dump_count); -fail_unlock: - kfree(argv); - kfree(cn.corename); - coredump_finish(core_dumped); + coredump_cleanup(&cn, &cprm); revert_creds(old_cred); -fail_creds: - put_cred(cred); -fail: return; } @@ -1238,6 +1422,8 @@ void validate_coredump_safety(void) static inline bool check_coredump_socket(void) { + const char *p; + if (core_pattern[0] != '@') return true; @@ -1249,8 +1435,25 @@ static inline bool check_coredump_socket(void) if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) return false; - /* Must be an absolute path. */ - if (*(core_pattern + 1) != '/') + /* Must be an absolute path... */ + if (core_pattern[1] != '/') { + /* ... or the socket request protocol... */ + if (core_pattern[1] != '@') + return false; + /* ... and if so must be an absolute path. */ + if (core_pattern[2] != '/') + return false; + p = &core_pattern[2]; + } else { + p = &core_pattern[1]; + } + + /* The path obviously cannot exceed UNIX_PATH_MAX. */ + if (strlen(p) >= UNIX_PATH_MAX) + return false; + + /* Must not contain ".." in the path. */ + if (name_contains_dotdot(core_pattern)) return false; return true; diff --git a/fs/d_path.c b/fs/d_path.c index 5f4da5c8d5db..bb365511066b 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -241,9 +241,9 @@ static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) unsigned seq; do { - seq = read_seqcount_begin(&fs->seq); + seq = read_seqbegin(&fs->seq); *root = fs->root; - } while (read_seqcount_retry(&fs->seq, seq)); + } while (read_seqretry(&fs->seq, seq)); } /** @@ -385,10 +385,10 @@ static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, unsigned seq; do { - seq = read_seqcount_begin(&fs->seq); + seq = read_seqbegin(&fs->seq); *root = fs->root; *pwd = fs->pwd; - } while (read_seqcount_retry(&fs->seq, seq)); + } while (read_seqretry(&fs->seq, seq)); } /* diff --git a/fs/dcache.c b/fs/dcache.c index 03d58b2d4fa3..60046ae23d51 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1436,7 +1436,7 @@ int d_set_mounted(struct dentry *dentry) { struct dentry *p; int ret = -ENOENT; - write_seqlock(&rename_lock); + read_seqlock_excl(&rename_lock); for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { /* Need exclusion wrt. d_invalidate() */ spin_lock(&p->d_lock); @@ -1456,7 +1456,7 @@ int d_set_mounted(struct dentry *dentry) } spin_unlock(&dentry->d_lock); out: - write_sequnlock(&rename_lock); + read_sequnlock_excl(&rename_lock); return ret; } @@ -1731,14 +1731,14 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; - dentry->d_op = NULL; + dentry->d_op = sb->__s_d_op; + dentry->d_flags = sb->s_d_flags; dentry->d_fsdata = NULL; INIT_HLIST_BL_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_HLIST_HEAD(&dentry->d_children); INIT_HLIST_NODE(&dentry->d_u.d_alias); INIT_HLIST_NODE(&dentry->d_sib); - d_set_d_op(dentry, dentry->d_sb->s_d_op); if (dentry->d_op && dentry->d_op->d_init) { err = dentry->d_op->d_init(dentry); @@ -1821,8 +1821,9 @@ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) struct dentry *dentry = __d_alloc(sb, name); if (likely(dentry)) { dentry->d_flags |= DCACHE_NORCU; - if (!sb->s_d_op) - d_set_d_op(dentry, &anon_ops); + /* d_op_flags(&anon_ops) is 0 */ + if (!dentry->d_op) + dentry->d_op = &anon_ops; } return dentry; } @@ -1837,35 +1838,50 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name) } EXPORT_SYMBOL(d_alloc_name); -void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) +#define DCACHE_OP_FLAGS \ + (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | \ + DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_PRUNE | \ + DCACHE_OP_REAL) + +static unsigned int d_op_flags(const struct dentry_operations *op) { + unsigned int flags = 0; + if (op) { + if (op->d_hash) + flags |= DCACHE_OP_HASH; + if (op->d_compare) + flags |= DCACHE_OP_COMPARE; + if (op->d_revalidate) + flags |= DCACHE_OP_REVALIDATE; + if (op->d_weak_revalidate) + flags |= DCACHE_OP_WEAK_REVALIDATE; + if (op->d_delete) + flags |= DCACHE_OP_DELETE; + if (op->d_prune) + flags |= DCACHE_OP_PRUNE; + if (op->d_real) + flags |= DCACHE_OP_REAL; + } + return flags; +} + +static void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) +{ + unsigned int flags = d_op_flags(op); WARN_ON_ONCE(dentry->d_op); - WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | - DCACHE_OP_COMPARE | - DCACHE_OP_REVALIDATE | - DCACHE_OP_WEAK_REVALIDATE | - DCACHE_OP_DELETE | - DCACHE_OP_REAL)); + WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS); dentry->d_op = op; - if (!op) - return; - if (op->d_hash) - dentry->d_flags |= DCACHE_OP_HASH; - if (op->d_compare) - dentry->d_flags |= DCACHE_OP_COMPARE; - if (op->d_revalidate) - dentry->d_flags |= DCACHE_OP_REVALIDATE; - if (op->d_weak_revalidate) - dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; - if (op->d_delete) - dentry->d_flags |= DCACHE_OP_DELETE; - if (op->d_prune) - dentry->d_flags |= DCACHE_OP_PRUNE; - if (op->d_real) - dentry->d_flags |= DCACHE_OP_REAL; - -} -EXPORT_SYMBOL(d_set_d_op); + if (flags) + dentry->d_flags |= flags; +} + +void set_default_d_op(struct super_block *s, const struct dentry_operations *ops) +{ + unsigned int flags = d_op_flags(ops); + s->__s_d_op = ops; + s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags; +} +EXPORT_SYMBOL(set_default_d_op); static unsigned d_flags_for_inode(struct inode *inode) { @@ -2530,13 +2546,19 @@ struct dentry *d_alloc_parallel(struct dentry *parent, unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); struct hlist_bl_node *node; - struct dentry *new = d_alloc(parent, name); + struct dentry *new = __d_alloc(parent->d_sb, name); struct dentry *dentry; unsigned seq, r_seq, d_seq; if (unlikely(!new)) return ERR_PTR(-ENOMEM); + new->d_flags |= DCACHE_PAR_LOOKUP; + spin_lock(&parent->d_lock); + new->d_parent = dget_dlock(parent); + hlist_add_head(&new->d_sib, &parent->d_children); + spin_unlock(&parent->d_lock); + retry: rcu_read_lock(); seq = smp_load_acquire(&parent->d_inode->i_dir_seq); @@ -2620,8 +2642,6 @@ retry: return dentry; } rcu_read_unlock(); - /* we can't take ->d_lock here; it's OK, though. */ - new->d_flags |= DCACHE_PAR_LOOKUP; new->d_wait = wq; hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); hlist_bl_unlock(b); @@ -2667,7 +2687,8 @@ EXPORT_SYMBOL(__d_lookup_unhash_wake); /* inode->i_lock held if inode is non-NULL */ -static inline void __d_add(struct dentry *dentry, struct inode *inode) +static inline void __d_add(struct dentry *dentry, struct inode *inode, + const struct dentry_operations *ops) { wait_queue_head_t *d_wait; struct inode *dir = NULL; @@ -2678,6 +2699,8 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode) n = start_dir_add(dir); d_wait = __d_lookup_unhash(dentry); } + if (unlikely(ops)) + d_set_d_op(dentry, ops); if (inode) { unsigned add_flags = d_flags_for_inode(inode); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); @@ -2709,7 +2732,7 @@ void d_add(struct dentry *entry, struct inode *inode) security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); } - __d_add(entry, inode); + __d_add(entry, inode, NULL); } EXPORT_SYMBOL(d_add); @@ -2774,10 +2797,10 @@ static void copy_name(struct dentry *dentry, struct dentry *target) * @target: new dentry * @exchange: exchange the two dentries * - * Update the dcache to reflect the move of a file name. Negative - * dcache entries should not be moved in this way. Caller must hold - * rename_lock, the i_mutex of the source and target directories, - * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). + * Update the dcache to reflect the move of a file name. Negative dcache + * entries should not be moved in this way. Caller must hold rename_lock, the + * i_rwsem of the source and target directories (exclusively), and the sb-> + * s_vfs_rename_mutex if they differ. See lock_rename(). */ static void __d_move(struct dentry *dentry, struct dentry *target, bool exchange) @@ -2923,7 +2946,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding - * dentry->d_parent->d_inode->i_mutex, and rename_lock + * dentry->d_parent->d_inode->i_rwsem, and rename_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... @@ -2961,30 +2984,8 @@ out_err: return ret; } -/** - * d_splice_alias - splice a disconnected dentry into the tree if one exists - * @inode: the inode which may have a disconnected dentry - * @dentry: a negative dentry which we want to point to the inode. - * - * If inode is a directory and has an IS_ROOT alias, then d_move that in - * place of the given dentry and return it, else simply d_add the inode - * to the dentry and return NULL. - * - * If a non-IS_ROOT directory is found, the filesystem is corrupt, and - * we should error out: directories can't have multiple aliases. - * - * This is needed in the lookup routine of any filesystem that is exportable - * (via knfsd) so that we can build dcache paths to directories effectively. - * - * If a dentry was found and moved, then it is returned. Otherwise NULL - * is returned. This matches the expected return value of ->lookup. - * - * Cluster filesystems may call this function with a negative, hashed dentry. - * In that case, we know that the inode will be a regular file, and also this - * will only occur during atomic_open. So we need to check for the dentry - * being already hashed only in the final case. - */ -struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) +struct dentry *d_splice_alias_ops(struct inode *inode, struct dentry *dentry, + const struct dentry_operations *ops) { if (IS_ERR(inode)) return ERR_CAST(inode); @@ -3030,9 +3031,37 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) } } out: - __d_add(dentry, inode); + __d_add(dentry, inode, ops); return NULL; } + +/** + * d_splice_alias - splice a disconnected dentry into the tree if one exists + * @inode: the inode which may have a disconnected dentry + * @dentry: a negative dentry which we want to point to the inode. + * + * If inode is a directory and has an IS_ROOT alias, then d_move that in + * place of the given dentry and return it, else simply d_add the inode + * to the dentry and return NULL. + * + * If a non-IS_ROOT directory is found, the filesystem is corrupt, and + * we should error out: directories can't have multiple aliases. + * + * This is needed in the lookup routine of any filesystem that is exportable + * (via knfsd) so that we can build dcache paths to directories effectively. + * + * If a dentry was found and moved, then it is returned. Otherwise NULL + * is returned. This matches the expected return value of ->lookup. + * + * Cluster filesystems may call this function with a negative, hashed dentry. + * In that case, we know that the inode will be a regular file, and also this + * will only occur during atomic_open. So we need to check for the dentry + * being already hashed only in the final case. + */ +struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) +{ + return d_splice_alias_ops(inode, dentry, NULL); +} EXPORT_SYMBOL(d_splice_alias); /* diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 30c4944e1862..6677991c7e4b 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -258,7 +258,6 @@ static struct vfsmount *debugfs_automount(struct path *path) } static const struct dentry_operations debugfs_dops = { - .d_delete = always_delete_dentry, .d_release = debugfs_release_dentry, .d_automount = debugfs_automount, }; @@ -273,7 +272,8 @@ static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc) return err; sb->s_op = &debugfs_super_operations; - sb->s_d_op = &debugfs_dops; + set_default_d_op(sb, &debugfs_dops); + sb->s_d_flags |= DCACHE_DONTCACHE; debugfs_apply_options(sb); @@ -384,27 +384,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) if (!parent) parent = debugfs_mount->mnt_root; - inode_lock(d_inode(parent)); - if (unlikely(IS_DEADDIR(d_inode(parent)))) - dentry = ERR_PTR(-ENOENT); - else - dentry = lookup_noperm(&QSTR(name), parent); - if (!IS_ERR(dentry) && d_really_is_positive(dentry)) { - if (d_is_dir(dentry)) - pr_err("Directory '%s' with parent '%s' already present!\n", - name, parent->d_name.name); - else - pr_err("File '%s' in directory '%s' already present!\n", - name, parent->d_name.name); - dput(dentry); - dentry = ERR_PTR(-EEXIST); - } - + dentry = simple_start_creating(parent, name); if (IS_ERR(dentry)) { - inode_unlock(d_inode(parent)); + if (dentry == ERR_PTR(-EEXIST)) + pr_err("'%s' already exists in '%pd'\n", name, parent); simple_release_fs(&debugfs_mount, &debugfs_mount_count); } - return dentry; } diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 9c20d78e41f6..fdf22264a8e9 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -381,7 +381,7 @@ static int devpts_fill_super(struct super_block *s, struct fs_context *fc) s->s_blocksize_bits = 10; s->s_magic = DEVPTS_SUPER_MAGIC; s->s_op = &devpts_sops; - s->s_d_op = &simple_dentry_operations; + s->s_d_flags = DCACHE_DONTCACHE; s->s_time_gran = 1; fsi->sb = s; diff --git a/fs/direct-io.c b/fs/direct-io.c index bbd05f1a2145..1694ee9a9382 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1083,8 +1083,8 @@ static inline int drop_refcount(struct dio *dio) * The locking rules are governed by the flags parameter: * - if the flags value contains DIO_LOCKING we use a fancy locking * scheme for dumb filesystems. - * For writes this function is called under i_mutex and returns with - * i_mutex held, for reads, i_mutex is not held on entry, but it is + * For writes this function is called under i_rwsem and returns with + * i_rwsem held, for reads, i_rwsem is not held on entry, but it is * taken and dropped again before returning. * - if the flags value does NOT contain DIO_LOCKING we don't use any * internal locking but rather rely on the filesystem to synchronize @@ -1094,7 +1094,7 @@ static inline int drop_refcount(struct dio *dio) * counter before starting direct I/O, and decrement it once we are done. * Truncate can wait for it to reach zero to provide exclusion. It is * expected that filesystem provide exclusion between new direct I/O - * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, + * and truncates. For DIO_LOCKING filesystems this is done by i_rwsem, * but other filesystems need to take care of this on their own. * * NOTE: if you pass "sdio" to anything by pointer make sure that function @@ -1279,7 +1279,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, /* * All block lookups have been performed. For READ requests - * we can let i_mutex go now that its achieved its purpose + * we can let i_rwsem go now that its achieved its purpose * of protecting us from looking up uninitialized blocks. */ if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index ce0a3c5ed0ca..5f8f96da09fe 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -193,7 +193,7 @@ static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma) * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs * allows recursive mounting, this will need to be extended. */ - if (!lower_file->f_op->mmap) + if (!can_mmap_file(lower_file)) return -ENODEV; return generic_file_mmap(file, vma); } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 493d7f194956..72fbe1316ab8 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -635,10 +635,10 @@ ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } rd.old_mnt_idmap = &nop_mnt_idmap; - rd.old_dir = d_inode(lower_old_dir_dentry); + rd.old_parent = lower_old_dir_dentry; rd.old_dentry = lower_old_dentry; rd.new_mnt_idmap = &nop_mnt_idmap; - rd.new_dir = d_inode(lower_new_dir_dentry); + rd.new_parent = lower_new_dir_dentry; rd.new_dentry = lower_new_dentry; rc = vfs_rename(&rd); if (rc) @@ -1124,13 +1124,13 @@ out: return rc; } -static int ecryptfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +static int ecryptfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { return vfs_fileattr_get(ecryptfs_dentry_to_lower(dentry), fa); } static int ecryptfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); int rc; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 8dd1d7189c3b..eab1beb846d3 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -20,6 +20,7 @@ #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/fs_stack.h> +#include <linux/sysfs.h> #include <linux/slab.h> #include <linux/magic.h> #include "ecryptfs_kernel.h" @@ -471,7 +472,7 @@ static int ecryptfs_get_tree(struct fs_context *fc) sbi = NULL; s->s_op = &ecryptfs_sops; s->s_xattr = ecryptfs_xattr_handlers; - s->s_d_op = &ecryptfs_dops; + set_default_d_op(s, &ecryptfs_dops); err = "Reading sb failed"; rc = kern_path(fc->source, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); @@ -764,7 +765,7 @@ static struct kobject *ecryptfs_kobj; static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { - return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK); + return sysfs_emit(buff, "%d\n", ECRYPTFS_VERSIONING_MASK); } static struct kobj_attribute version_attr = __ATTR_RO(version); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 60f0ac8744b5..2c2b12fedeae 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -228,7 +228,7 @@ out: /** * ecryptfs_write_begin - * @file: The eCryptfs file + * @iocb: I/O control block for the eCryptfs file * @mapping: The eCryptfs object * @pos: The file offset at which to start writing * @len: Length of the write @@ -239,7 +239,7 @@ out: * * Returns zero on success; non-zero otherwise */ -static int ecryptfs_write_begin(struct file *file, +static int ecryptfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) @@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file, * Note, this will increase i_size. */ if (index != 0) { if (prev_page_end_size > i_size_read(mapping->host)) { - rc = ecryptfs_truncate(file->f_path.dentry, + rc = ecryptfs_truncate(iocb->ki_filp->f_path.dentry, prev_page_end_size); if (rc) { printk(KERN_ERR "%s: Error on attempt to " @@ -429,7 +429,7 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode) /** * ecryptfs_write_end - * @file: The eCryptfs file object + * @iocb: I/O control block for the eCryptfs file * @mapping: The eCryptfs object * @pos: The file position * @len: The length of the data (unused) @@ -437,7 +437,7 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode) * @folio: The eCryptfs folio * @fsdata: The fsdata (unused) */ -static int ecryptfs_write_end(struct file *file, +static int ecryptfs_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index 98a7299a9ee9..2891614abf8d 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -138,7 +138,7 @@ const struct inode_operations efivarfs_dir_inode_operations = { }; static int -efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +efivarfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { unsigned int i_flags; unsigned int flags = 0; @@ -154,7 +154,7 @@ efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) static int efivarfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { unsigned int i_flags = 0; diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 284d6dbba2ec..c4a139911356 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -183,7 +183,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr) static const struct dentry_operations efivarfs_d_ops = { .d_compare = efivarfs_d_compare, .d_hash = efivarfs_d_hash, - .d_delete = always_delete_dentry, }; static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) @@ -350,7 +349,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = EFIVARFS_MAGIC; sb->s_op = &efivarfs_ops; - sb->s_d_op = &efivarfs_d_ops; + set_default_d_op(sb, &efivarfs_d_ops); + sb->s_d_flags |= DCACHE_DONTCACHE; sb->s_time_gran = 1; if (!efivar_supports_writes()) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 6beeb7063871..7b26efc271ee 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -147,6 +147,8 @@ config EROFS_FS_ZIP_ZSTD config EROFS_FS_ZIP_ACCEL bool "EROFS hardware decompression support" depends on EROFS_FS_ZIP + select CRYPTO + select CRYPTO_DEFLATE help Saying Y here includes hardware accelerator support for reading EROFS file systems containing compressed data. It gives better diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 16e4a6bd9b97..3b1ba571c728 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -49,11 +49,18 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap) return buf->base + (offset & ~PAGE_MASK); } -void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) +int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb, + bool in_metabox) { struct erofs_sb_info *sbi = EROFS_SB(sb); buf->file = NULL; + if (in_metabox) { + if (unlikely(!sbi->metabox_inode)) + return -EFSCORRUPTED; + buf->mapping = sbi->metabox_inode->i_mapping; + return 0; + } buf->off = sbi->dif0.fsoff; if (erofs_is_fileio_mode(sbi)) { buf->file = sbi->dif0.file; /* some fs like FUSE needs it */ @@ -62,13 +69,18 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) buf->mapping = sbi->dif0.fscache->inode->i_mapping; else buf->mapping = sb->s_bdev->bd_mapping; + return 0; } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_off_t offset, bool need_kmap) + erofs_off_t offset, bool in_metabox) { - erofs_init_metabuf(buf, sb); - return erofs_bread(buf, offset, need_kmap); + int err; + + err = erofs_init_metabuf(buf, sb, in_metabox); + if (err) + return ERR_PTR(err); + return erofs_bread(buf, offset, true); } int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) @@ -118,7 +130,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, unit) + unit * chunknr; - idx = erofs_read_metabuf(&buf, sb, pos, true); + idx = erofs_read_metabuf(&buf, sb, pos, erofs_inode_in_metabox(inode)); if (IS_ERR(idx)) { err = PTR_ERR(idx); goto out; @@ -264,51 +276,51 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, map.m_la = offset; map.m_llen = length; - ret = erofs_map_blocks(inode, &map); if (ret < 0) return ret; - mdev = (struct erofs_map_dev) { - .m_deviceid = map.m_deviceid, - .m_pa = map.m_pa, - }; - ret = erofs_map_dev(sb, &mdev); - if (ret) - return ret; - iomap->offset = map.m_la; - if (flags & IOMAP_DAX) - iomap->dax_dev = mdev.m_dif->dax_dev; - else - iomap->bdev = mdev.m_bdev; iomap->length = map.m_llen; iomap->flags = 0; iomap->private = NULL; - + iomap->addr = IOMAP_NULL_ADDR; if (!(map.m_flags & EROFS_MAP_MAPPED)) { iomap->type = IOMAP_HOLE; - iomap->addr = IOMAP_NULL_ADDR; - if (!iomap->length) - iomap->length = length; return 0; } + if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(inode)) { + mdev = (struct erofs_map_dev) { + .m_deviceid = map.m_deviceid, + .m_pa = map.m_pa, + }; + ret = erofs_map_dev(sb, &mdev); + if (ret) + return ret; + + if (flags & IOMAP_DAX) + iomap->dax_dev = mdev.m_dif->dax_dev; + else + iomap->bdev = mdev.m_bdev; + iomap->addr = mdev.m_dif->fsoff + mdev.m_pa; + if (flags & IOMAP_DAX) + iomap->addr += mdev.m_dif->dax_part_off; + } + if (map.m_flags & EROFS_MAP_META) { void *ptr; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; iomap->type = IOMAP_INLINE; - ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, true); + ptr = erofs_read_metabuf(&buf, sb, map.m_pa, + erofs_inode_in_metabox(inode)); if (IS_ERR(ptr)) return PTR_ERR(ptr); iomap->inline_data = ptr; iomap->private = buf.base; } else { iomap->type = IOMAP_MAPPED; - iomap->addr = mdev.m_dif->fsoff + mdev.m_pa; - if (flags & IOMAP_DAX) - iomap->addr += mdev.m_dif->dax_part_off; } return 0; } @@ -420,20 +432,20 @@ static const struct vm_operations_struct erofs_dax_vm_ops = { .huge_fault = erofs_dax_huge_fault, }; -static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int erofs_file_mmap_prepare(struct vm_area_desc *desc) { - if (!IS_DAX(file_inode(file))) - return generic_file_readonly_mmap(file, vma); + if (!IS_DAX(file_inode(desc->file))) + return generic_file_readonly_mmap_prepare(desc); - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) + if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) return -EINVAL; - vma->vm_ops = &erofs_dax_vm_ops; - vm_flags_set(vma, VM_HUGEPAGE); + desc->vm_ops = &erofs_dax_vm_ops; + desc->vm_flags |= VM_HUGEPAGE; return 0; } #else -#define erofs_file_mmap generic_file_readonly_mmap +#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare #endif static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence) @@ -463,7 +475,7 @@ static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence) const struct file_operations erofs_file_fops = { .llseek = erofs_file_llseek, .read_iter = erofs_file_read_iter, - .mmap = erofs_file_mmap, + .mmap_prepare = erofs_file_mmap_prepare, .get_unmapped_area = thp_get_unmapped_area, .splice_read = filemap_splice_read, }; diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 358061d7b660..354762c9723f 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -467,7 +467,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) return -EOPNOTSUPP; } - erofs_init_metabuf(&buf, sb); + (void)erofs_init_metabuf(&buf, sb, false); offset = EROFS_SUPER_OFFSET + sbi->sb_size; alg = 0; for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index 3e4b38bec0aa..debf469ad6bd 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -34,7 +34,8 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, } if (!dir_emit(ctx, de_name, de_namelen, - le64_to_cpu(de->nid), d_type)) + erofs_nid_to_ino64(EROFS_SB(dir->i_sb), + le64_to_cpu(de->nid)), d_type)) return 1; ++de; ctx->pos += sizeof(struct erofs_dirent); @@ -47,8 +48,12 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) struct inode *dir = file_inode(f); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct super_block *sb = dir->i_sb; + struct file_ra_state *ra = &f->f_ra; unsigned long bsz = sb->s_blocksize; unsigned int ofs = erofs_blkoff(sb, ctx->pos); + pgoff_t ra_pages = DIV_ROUND_UP_POW2( + EROFS_I_SB(dir)->dir_ra_bytes, PAGE_SIZE); + pgoff_t nr_pages = DIV_ROUND_UP_POW2(dir->i_size, PAGE_SIZE); int err = 0; bool initial = true; @@ -63,6 +68,16 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) break; } + /* readahead blocks to enhance performance for large directories */ + if (ra_pages) { + pgoff_t idx = DIV_ROUND_UP_POW2(ctx->pos, PAGE_SIZE); + pgoff_t pages = min(nr_pages - idx, ra_pages); + + if (pages > 1 && !ra_has_index(ra, idx)) + page_cache_sync_readahead(dir->i_mapping, ra, + f, idx, pages); + } + de = erofs_bread(&buf, dbstart, true); if (IS_ERR(de)) { erofs_err(sb, "failed to readdir of logical block %llu of nid %llu", diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 767fb4acdc93..377ee12b8b96 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -15,6 +15,7 @@ #define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001 #define EROFS_FEATURE_COMPAT_MTIME 0x00000002 #define EROFS_FEATURE_COMPAT_XATTR_FILTER 0x00000004 +#define EROFS_FEATURE_COMPAT_SHARED_EA_IN_METABOX 0x00000008 /* * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should @@ -31,8 +32,9 @@ #define EROFS_FEATURE_INCOMPAT_DEDUPE 0x00000020 #define EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES 0x00000040 #define EROFS_FEATURE_INCOMPAT_48BIT 0x00000080 +#define EROFS_FEATURE_INCOMPAT_METABOX 0x00000100 #define EROFS_ALL_FEATURE_INCOMPAT \ - ((EROFS_FEATURE_INCOMPAT_48BIT << 1) - 1) + ((EROFS_FEATURE_INCOMPAT_METABOX << 1) - 1) #define EROFS_SB_EXTSLOT_SIZE 16 @@ -46,7 +48,7 @@ struct erofs_deviceslot { }; #define EROFS_DEVT_SLOT_SIZE sizeof(struct erofs_deviceslot) -/* erofs on-disk super block (currently 128 bytes) */ +/* erofs on-disk super block (currently 144 bytes at maximum) */ struct erofs_super_block { __le32 magic; /* file system magic number */ __le32 checksum; /* crc32c to avoid unexpected on-disk overlap */ @@ -82,7 +84,9 @@ struct erofs_super_block { __u8 reserved[3]; __le32 build_time; /* seconds added to epoch for mkfs time */ __le64 rootnid_8b; /* (48BIT on) nid of root directory */ - __u8 reserved2[8]; + __le64 reserved2; + __le64 metabox_nid; /* (METABOX on) nid of the metabox inode */ + __le64 reserved3; /* [align to extslot 1] */ }; /* @@ -267,6 +271,9 @@ struct erofs_inode_chunk_index { __le32 startblk_lo; /* starting block number of this chunk */ }; +#define EROFS_DIRENT_NID_METABOX_BIT 63 +#define EROFS_DIRENT_NID_MASK (BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT) - 1) + /* dirent sorts in alphabet order, thus we can do binary search */ struct erofs_dirent { __le64 nid; /* node number */ @@ -434,7 +441,7 @@ static inline void erofs_check_ondisk_layout_definitions(void) .h_clusterbits = 1 << Z_EROFS_FRAGMENT_INODE_BIT }; - BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); + BUILD_BUG_ON(sizeof(struct erofs_super_block) != 144); BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32); BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64); BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 91781718199e..b7b3432a9882 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -115,7 +115,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) void *src; src = erofs_read_metabuf(&buf, inode->i_sb, - map->m_pa + ofs, true); + map->m_pa + ofs, erofs_inode_in_metabox(inode)); if (IS_ERR(src)) { err = PTR_ERR(src); break; diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 34517ca9df91..362acf828279 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -274,7 +274,8 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) size_t size = map.m_llen; void *src; - src = erofs_read_metabuf(&buf, sb, map.m_pa, true); + src = erofs_read_metabuf(&buf, sb, map.m_pa, + erofs_inode_in_metabox(inode)); if (IS_ERR(src)) return PTR_ERR(src); diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index a0ae0b4f7b01..9a2f59721522 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -29,6 +29,7 @@ static int erofs_read_inode(struct inode *inode) struct super_block *sb = inode->i_sb; erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode)); unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode)); + bool in_mbox = erofs_inode_in_metabox(inode); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_sb_info *sbi = EROFS_SB(sb); erofs_blk_t addrmask = BIT_ULL(48) - 1; @@ -39,10 +40,10 @@ static int erofs_read_inode(struct inode *inode) void *ptr; int err = 0; - ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), true); + ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); - erofs_err(sb, "failed to get inode (nid: %llu) page, err %d", + erofs_err(sb, "failed to read inode meta block (nid: %llu): %d", vi->nid, err); goto err_out; } @@ -78,10 +79,10 @@ static int erofs_read_inode(struct inode *inode) memcpy(&copied, dic, gotten); ptr = erofs_read_metabuf(&buf, sb, - erofs_pos(sb, blkaddr + 1), true); + erofs_pos(sb, blkaddr + 1), in_mbox); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); - erofs_err(sb, "failed to get inode payload block (nid: %llu), err %d", + erofs_err(sb, "failed to read inode payload block (nid: %llu): %d", vi->nid, err); goto err_out; } @@ -264,13 +265,13 @@ static int erofs_fill_inode(struct inode *inode) * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down * so that it will fit. */ -static ino_t erofs_squash_ino(erofs_nid_t nid) +static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid) { - ino_t ino = (ino_t)nid; + u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid); if (sizeof(ino_t) < sizeof(erofs_nid_t)) - ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8; - return ino; + ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8; + return (ino_t)ino64; } static int erofs_iget5_eq(struct inode *inode, void *opaque) @@ -282,7 +283,7 @@ static int erofs_iget5_set(struct inode *inode, void *opaque) { const erofs_nid_t nid = *(erofs_nid_t *)opaque; - inode->i_ino = erofs_squash_ino(nid); + inode->i_ino = erofs_squash_ino(inode->i_sb, nid); EROFS_I(inode)->nid = nid; return 0; } @@ -291,7 +292,7 @@ struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid) { struct inode *inode; - inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq, + inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq, erofs_iget5_set, &nid); if (!inode) return ERR_PTR(-ENOMEM); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 06b867d2fc3b..4ccc5f0ee8df 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -125,6 +125,7 @@ struct erofs_sb_info { struct erofs_sb_lz4_info lz4; #endif /* CONFIG_EROFS_FS_ZIP */ struct inode *packed_inode; + struct inode *metabox_inode; struct erofs_dev_context *devs; u64 total_blocks; @@ -148,6 +149,7 @@ struct erofs_sb_info { /* what we really care is nid, rather than ino.. */ erofs_nid_t root_nid; erofs_nid_t packed_nid; + erofs_nid_t metabox_nid; /* used for statfs, f_files - f_favail */ u64 inos; @@ -157,6 +159,7 @@ struct erofs_sb_info { /* sysfs support */ struct kobject s_kobj; /* /sys/fs/erofs/<devname> */ struct completion s_kobj_unregister; + erofs_off_t dir_ra_bytes; /* fscache support */ struct fscache_volume *volume; @@ -227,8 +230,27 @@ EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS) EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE) EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES) EROFS_FEATURE_FUNCS(48bit, incompat, INCOMPAT_48BIT) +EROFS_FEATURE_FUNCS(metabox, incompat, INCOMPAT_METABOX) EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER) +EROFS_FEATURE_FUNCS(shared_ea_in_metabox, compat, COMPAT_SHARED_EA_IN_METABOX) + +static inline u64 erofs_nid_to_ino64(struct erofs_sb_info *sbi, erofs_nid_t nid) +{ + if (!erofs_sb_has_metabox(sbi)) + return nid; + + /* + * When metadata compression is enabled, avoid generating excessively + * large inode numbers for metadata-compressed inodes. Shift NIDs in + * the 31-62 bit range left by one and move the metabox flag to bit 31. + * + * Note: on-disk NIDs remain unchanged as they are primarily used for + * compatibility with non-LFS 32-bit applications. + */ + return ((nid << 1) & GENMASK_ULL(63, 32)) | (nid & GENMASK(30, 0)) | + ((nid >> EROFS_DIRENT_NID_METABOX_BIT) << 31); +} /* atomic flag definitions */ #define EROFS_I_EA_INITED_BIT 0 @@ -238,6 +260,9 @@ EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER) #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1) #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2) +/* default readahead size of directories */ +#define EROFS_DIR_RA_BYTES 16384 + struct erofs_inode { erofs_nid_t nid; @@ -279,12 +304,20 @@ struct erofs_inode { #define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode) +static inline bool erofs_inode_in_metabox(struct inode *inode) +{ + return EROFS_I(inode)->nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT); +} + static inline erofs_off_t erofs_iloc(struct inode *inode) { struct erofs_sb_info *sbi = EROFS_I_SB(inode); + erofs_nid_t nid_lo = EROFS_I(inode)->nid & EROFS_DIRENT_NID_MASK; + if (erofs_inode_in_metabox(inode)) + return nid_lo << sbi->islotbits; return erofs_pos(inode->i_sb, sbi->meta_blkaddr) + - (EROFS_I(inode)->nid << sbi->islotbits); + (nid_lo << sbi->islotbits); } static inline unsigned int erofs_inode_version(unsigned int ifmt) @@ -383,9 +416,10 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, void erofs_unmap_metabuf(struct erofs_buf *buf); void erofs_put_metabuf(struct erofs_buf *buf); void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap); -void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); +int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb, + bool in_metabox); void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_off_t offset, bool need_kmap); + erofs_off_t offset, bool in_metabox); int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index e1e9f06e8342..e1020aa60771 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -141,7 +141,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, struct erofs_deviceslot *dis; struct file *file; - dis = erofs_read_metabuf(buf, sb, *pos, true); + dis = erofs_read_metabuf(buf, sb, *pos, false); if (IS_ERR(dis)) return PTR_ERR(dis); @@ -258,7 +258,7 @@ static int erofs_read_superblock(struct super_block *sb) void *data; int ret; - data = erofs_read_metabuf(&buf, sb, 0, true); + data = erofs_read_metabuf(&buf, sb, 0, false); if (IS_ERR(data)) { erofs_err(sb, "cannot read erofs superblock"); return PTR_ERR(data); @@ -319,6 +319,14 @@ static int erofs_read_superblock(struct super_block *sb) sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b); } sbi->packed_nid = le64_to_cpu(dsb->packed_nid); + if (erofs_sb_has_metabox(sbi)) { + if (sbi->sb_size <= offsetof(struct erofs_super_block, + metabox_nid)) + return -EFSCORRUPTED; + sbi->metabox_nid = le64_to_cpu(dsb->metabox_nid); + if (sbi->metabox_nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT)) + return -EFSCORRUPTED; /* self-loop detection */ + } sbi->inos = le64_to_cpu(dsb->inos); sbi->epoch = (s64)le64_to_cpu(dsb->epoch); @@ -335,6 +343,8 @@ static int erofs_read_superblock(struct super_block *sb) if (erofs_sb_has_48bit(sbi)) erofs_info(sb, "EXPERIMENTAL 48-bit layout support in use. Use at your own risk!"); + if (erofs_sb_has_metabox(sbi)) + erofs_info(sb, "EXPERIMENTAL metadata compression support in use. Use at your own risk!"); if (erofs_is_fscache_mode(sb)) erofs_info(sb, "[deprecated] fscache-based on-demand read feature in use. Use at your own risk!"); out: @@ -690,6 +700,12 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) return PTR_ERR(inode); sbi->packed_inode = inode; } + if (erofs_sb_has_metabox(sbi)) { + inode = erofs_iget(sb, sbi->metabox_nid); + if (IS_ERR(inode)) + return PTR_ERR(inode); + sbi->metabox_inode = inode; + } inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) @@ -715,6 +731,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; + sbi->dir_ra_bytes = EROFS_DIR_RA_BYTES; erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); return 0; } @@ -845,6 +862,8 @@ static void erofs_drop_internal_inodes(struct erofs_sb_info *sbi) { iput(sbi->packed_inode); sbi->packed_inode = NULL; + iput(sbi->metabox_inode); + sbi->metabox_inode = NULL; #ifdef CONFIG_EROFS_FS_ZIP iput(sbi->managed_cache); sbi->managed_cache = NULL; diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c index eed8797a193f..1e0658a1d95b 100644 --- a/fs/erofs/sysfs.c +++ b/fs/erofs/sysfs.c @@ -65,12 +65,14 @@ EROFS_ATTR_FUNC(drop_caches, 0200); #ifdef CONFIG_EROFS_FS_ZIP_ACCEL EROFS_ATTR_FUNC(accel, 0644); #endif +EROFS_ATTR_RW_UI(dir_ra_bytes, erofs_sb_info); static struct attribute *erofs_sb_attrs[] = { #ifdef CONFIG_EROFS_FS_ZIP ATTR_LIST(sync_decompress), ATTR_LIST(drop_caches), #endif + ATTR_LIST(dir_ra_bytes), NULL, }; ATTRIBUTE_GROUPS(erofs_sb); @@ -95,6 +97,7 @@ EROFS_ATTR_FEATURE(ztailpacking); EROFS_ATTR_FEATURE(fragments); EROFS_ATTR_FEATURE(dedupe); EROFS_ATTR_FEATURE(48bit); +EROFS_ATTR_FEATURE(metabox); static struct attribute *erofs_feat_attrs[] = { ATTR_LIST(zero_padding), @@ -108,6 +111,7 @@ static struct attribute *erofs_feat_attrs[] = { ATTR_LIST(fragments), ATTR_LIST(dedupe), ATTR_LIST(48bit), + ATTR_LIST(metabox), NULL, }; ATTRIBUTE_GROUPS(erofs_feat); diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 9cf84717a92e..eaa9efd766ee 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -72,12 +72,14 @@ static int erofs_init_inode_xattrs(struct inode *inode) ret = -EFSCORRUPTED; goto out_unlock; /* xattr ondisk layout error */ } - ret = -ENOATTR; + ret = -ENODATA; goto out_unlock; } it.buf = __EROFS_BUF_INITIALIZER; - erofs_init_metabuf(&it.buf, sb); + ret = erofs_init_metabuf(&it.buf, sb, erofs_inode_in_metabox(inode)); + if (ret) + goto out_unlock; it.pos = erofs_iloc(inode) + vi->inode_isize; /* read in shared xattr array (non-atomic, see kmalloc below) */ @@ -266,20 +268,20 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK); if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count) - return -ENOATTR; + return -ENODATA; if (it->index != pf->prefix->base_index || it->name.len != entry.e_name_len + pf->infix_len) - return -ENOATTR; + return -ENODATA; if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len)) - return -ENOATTR; + return -ENODATA; it->infix_len = pf->infix_len; } else { if (it->index != entry.e_name_index || it->name.len != entry.e_name_len) - return -ENOATTR; + return -ENODATA; it->infix_len = 0; } @@ -295,7 +297,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) entry.e_name_len - processed); if (memcmp(it->name.name + it->infix_len + processed, it->kaddr, slice)) - return -ENOATTR; + return -ENODATA; it->pos += slice; } @@ -323,9 +325,12 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it, sizeof(u32) * vi->xattr_shared_count; if (xattr_header_sz >= vi->xattr_isize) { DBG_BUGON(xattr_header_sz > vi->xattr_isize); - return -ENOATTR; + return -ENODATA; } + ret = erofs_init_metabuf(&it->buf, it->sb, erofs_inode_in_metabox(inode)); + if (ret) + return ret; remaining = vi->xattr_isize - xattr_header_sz; it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz; @@ -347,7 +352,7 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it, ret = erofs_getxattr_foreach(it); else ret = erofs_listxattr_foreach(it); - if ((getxattr && ret != -ENOATTR) || (!getxattr && ret)) + if ((getxattr && ret != -ENODATA) || (!getxattr && ret)) break; it->pos = next_pos; @@ -361,12 +366,17 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it, struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = it->sb; struct erofs_sb_info *sbi = EROFS_SB(sb); - unsigned int i; - int ret = -ENOATTR; + unsigned int i = 0; + int ret; - for (i = 0; i < vi->xattr_shared_count; ++i) { + ret = erofs_init_metabuf(&it->buf, sb, + erofs_sb_has_shared_ea_in_metabox(sbi)); + if (ret) + return ret; + + while (i < vi->xattr_shared_count) { it->pos = erofs_pos(sb, sbi->xattr_blkaddr) + - vi->xattr_shared_xattrs[i] * sizeof(__le32); + vi->xattr_shared_xattrs[i++] * sizeof(__le32); it->kaddr = erofs_bread(&it->buf, it->pos, true); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); @@ -375,10 +385,10 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it, ret = erofs_getxattr_foreach(it); else ret = erofs_listxattr_foreach(it); - if ((getxattr && ret != -ENOATTR) || (!getxattr && ret)) + if ((getxattr && ret != -ENODATA) || (!getxattr && ret)) break; } - return ret; + return i ? ret : -ENODATA; } int erofs_getxattr(struct inode *inode, int index, const char *name, @@ -403,7 +413,7 @@ int erofs_getxattr(struct inode *inode, int index, const char *name, EROFS_XATTR_FILTER_SEED + index); hashbit &= EROFS_XATTR_FILTER_BITS - 1; if (vi->xattr_name_filter & (1U << hashbit)) - return -ENOATTR; + return -ENODATA; } it.index = index; @@ -413,13 +423,12 @@ int erofs_getxattr(struct inode *inode, int index, const char *name, it.sb = inode->i_sb; it.buf = __EROFS_BUF_INITIALIZER; - erofs_init_metabuf(&it.buf, it.sb); it.buffer = buffer; it.buffer_size = buffer_size; it.buffer_ofs = 0; ret = erofs_xattr_iter_inline(&it, inode, true); - if (ret == -ENOATTR) + if (ret == -ENODATA) ret = erofs_xattr_iter_shared(&it, inode, true); erofs_put_metabuf(&it.buf); return ret ? ret : it.buffer_ofs; @@ -432,23 +441,22 @@ ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) struct inode *inode = d_inode(dentry); ret = erofs_init_inode_xattrs(inode); - if (ret == -ENOATTR) + if (ret == -ENODATA) return 0; if (ret) return ret; it.sb = dentry->d_sb; it.buf = __EROFS_BUF_INITIALIZER; - erofs_init_metabuf(&it.buf, it.sb); it.dentry = dentry; it.buffer = buffer; it.buffer_size = buffer_size; it.buffer_ofs = 0; ret = erofs_xattr_iter_inline(&it, inode, false); - if (!ret || ret == -ENOATTR) + if (!ret || ret == -ENODATA) ret = erofs_xattr_iter_shared(&it, inode, false); - if (ret == -ENOATTR) + if (ret == -ENODATA) ret = 0; erofs_put_metabuf(&it.buf); return ret ? ret : it.buffer_ofs; @@ -485,7 +493,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb) if (sbi->packed_inode) buf.mapping = sbi->packed_inode->i_mapping; else - erofs_init_metabuf(&buf, sb); + (void)erofs_init_metabuf(&buf, sb, false); for (i = 0; i < sbi->xattr_prefix_count; i++) { void *ptr = erofs_read_metadata(sb, &buf, &pos, &len); @@ -539,7 +547,7 @@ struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu) rc = erofs_getxattr(inode, prefix, "", value, rc); } - if (rc == -ENOATTR) + if (rc == -ENODATA) acl = NULL; else if (rc < 0) acl = ERR_PTR(rc); diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index b246cd0e135e..6317caa8413e 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -10,9 +10,6 @@ #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> -/* Attribute not found */ -#define ENOATTR ENODATA - #ifdef CONFIG_EROFS_FS_XATTR extern const struct xattr_handler erofs_xattr_user_handler; extern const struct xattr_handler erofs_xattr_trusted_handler; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index e3f28a1bb945..792f20888a8f 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -805,6 +805,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe) struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; struct z_erofs_pcluster *pcl = NULL; + void *ptr; int ret; DBG_BUGON(fe->pcl); @@ -854,15 +855,17 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe) /* bind cache first when cached decompression is preferred */ z_erofs_bind_cache(fe); } else { - void *mptr; - - mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, false); - if (IS_ERR(mptr)) { - ret = PTR_ERR(mptr); - erofs_err(sb, "failed to get inline data %d", ret); + ret = erofs_init_metabuf(&map->buf, sb, + erofs_inode_in_metabox(fe->inode)); + if (ret) + return ret; + ptr = erofs_bread(&map->buf, map->m_pa, false); + if (IS_ERR(ptr)) { + ret = PTR_ERR(ptr); + erofs_err(sb, "failed to get inline folio %d", ret); return ret; } - get_page(map->buf.page); + folio_get(page_folio(map->buf.page)); WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page); fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; @@ -1325,9 +1328,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) /* must handle all compressed pages before actual file pages */ if (pcl->from_meta) { - page = pcl->compressed_bvecs[0].page; + folio_put(page_folio(pcl->compressed_bvecs[0].page)); WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); - put_page(page); } else { /* managed folios are still left in compressed_bvecs[] */ for (i = 0; i < pclusterpages; ++i) { diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index f1a15ff22147..a93efd95c555 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -17,7 +17,7 @@ struct z_erofs_maprecorder { u16 delta[2]; erofs_blk_t pblk, compressedblks; erofs_off_t nextpackoff; - bool partialref; + bool partialref, in_mbox; }; static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, @@ -31,7 +31,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, struct z_erofs_lcluster_index *di; unsigned int advise; - di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, true); + di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox); if (IS_ERR(di)) return PTR_ERR(di); m->lcn = lcn; @@ -146,7 +146,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, else return -EOPNOTSUPP; - in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, true); + in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox); if (IS_ERR(in)) return PTR_ERR(in); @@ -240,6 +240,13 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m, unsigned int lcn, bool lookahead) { + if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) { + erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu", + m->type, lcn, EROFS_I(m->inode)->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + switch (EROFS_I(m->inode)->datalayout) { case EROFS_INODE_COMPRESSED_FULL: return z_erofs_load_full_lcluster(m, lcn); @@ -265,12 +272,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, if (err) return err; - if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) { - erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu", - m->type, lcn, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } else if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { + if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { lookback_distance = m->delta[0]; if (!lookback_distance) break; @@ -325,25 +327,18 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, DBG_BUGON(lcn == initial_lcn && m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD); - if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { - if (m->delta[0] != 1) { - erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - if (m->compressedblks) - goto out; - } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) { - /* - * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type - * rather than CBLKCNT, it's a 1 block-sized pcluster. - */ - m->compressedblks = 1; - goto out; + if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) { + erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; } - erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; + + /* + * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather + * than CBLKCNT, it's a 1 block-sized pcluster. + */ + if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks) + m->compressedblks = 1; out: m->map->m_plen = erofs_pos(sb, m->compressedblks); return 0; @@ -379,11 +374,6 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) if (lcn != headlcn) break; /* ends at the next HEAD lcluster */ m->delta[1] = 1; - } else { - erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", - m->type, lcn, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; } lcn += m->delta[1]; } @@ -402,6 +392,7 @@ static int z_erofs_map_blocks_fo(struct inode *inode, struct z_erofs_maprecorder m = { .inode = inode, .map = map, + .in_mbox = erofs_inode_in_metabox(inode), }; int err = 0; unsigned int endoff, afmt; @@ -428,44 +419,33 @@ static int z_erofs_map_blocks_fo(struct inode *inode, map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED; end = (m.lcn + 1ULL) << lclusterbits; - switch (m.type) { - case Z_EROFS_LCLUSTER_TYPE_PLAIN: - case Z_EROFS_LCLUSTER_TYPE_HEAD1: - case Z_EROFS_LCLUSTER_TYPE_HEAD2: - if (endoff >= m.clusterofs) { - m.headtype = m.type; - map->m_la = (m.lcn << lclusterbits) | m.clusterofs; - /* - * For ztailpacking files, in order to inline data more - * effectively, special EOF lclusters are now supported - * which can have three parts at most. - */ - if (ztailpacking && end > inode->i_size) - end = inode->i_size; - break; - } - /* m.lcn should be >= 1 if endoff < m.clusterofs */ - if (!m.lcn) { - erofs_err(sb, "invalid logical cluster 0 at nid %llu", - vi->nid); - err = -EFSCORRUPTED; - goto unmap_out; + if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) { + m.headtype = m.type; + map->m_la = (m.lcn << lclusterbits) | m.clusterofs; + /* + * For ztailpacking files, in order to inline data more + * effectively, special EOF lclusters are now supported + * which can have three parts at most. + */ + if (ztailpacking && end > inode->i_size) + end = inode->i_size; + } else { + if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) { + /* m.lcn should be >= 1 if endoff < m.clusterofs */ + if (!m.lcn) { + erofs_err(sb, "invalid logical cluster 0 at nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_out; + } + end = (m.lcn << lclusterbits) | m.clusterofs; + map->m_flags |= EROFS_MAP_FULL_MAPPED; + m.delta[0] = 1; } - end = (m.lcn << lclusterbits) | m.clusterofs; - map->m_flags |= EROFS_MAP_FULL_MAPPED; - m.delta[0] = 1; - fallthrough; - case Z_EROFS_LCLUSTER_TYPE_NONHEAD: /* get the corresponding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) goto unmap_out; - break; - default: - erofs_err(sb, "unknown type %u @ offset %llu of nid %llu", - m.type, ofs, vi->nid); - err = -EOPNOTSUPP; - goto unmap_out; } if (m.partialref) map->m_flags |= EROFS_MAP_PARTIAL_REF; @@ -542,6 +522,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode, unsigned int recsz = z_erofs_extent_recsize(vi->z_advise); erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize), recsz); + bool in_mbox = erofs_inode_in_metabox(inode); erofs_off_t lend = inode->i_size; erofs_off_t l, r, mid, pa, la, lstart; struct z_erofs_extent *ext; @@ -551,7 +532,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode, map->m_flags = 0; if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) { if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) { - ext = erofs_read_metabuf(&map->buf, sb, pos, true); + ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox); if (IS_ERR(ext)) return PTR_ERR(ext); pa = le64_to_cpu(*(__le64 *)ext); @@ -564,7 +545,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode, } for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) { - ext = erofs_read_metabuf(&map->buf, sb, pos, true); + ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox); if (IS_ERR(ext)) return PTR_ERR(ext); map->m_plen = le32_to_cpu(ext->plen); @@ -584,7 +565,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode, for (l = 0, r = vi->z_extents; l < r; ) { mid = l + (r - l) / 2; ext = erofs_read_metabuf(&map->buf, sb, - pos + mid * recsz, true); + pos + mid * recsz, in_mbox); if (IS_ERR(ext)) return PTR_ERR(ext); @@ -641,13 +622,12 @@ static int z_erofs_map_blocks_ext(struct inode *inode, return 0; } -static int z_erofs_fill_inode_lazy(struct inode *inode) +static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map) { struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; int err, headnr; erofs_off_t pos; - struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct z_erofs_map_header *h; if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { @@ -667,7 +647,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) goto out_unlock; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); - h = erofs_read_metabuf(&buf, sb, pos, true); + h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode)); if (IS_ERR(h)) { err = PTR_ERR(h); goto out_unlock; @@ -705,7 +685,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", headnr + 1, vi->z_algorithmtype[headnr], vi->nid); err = -EOPNOTSUPP; - goto out_put_metabuf; + goto out_unlock; } if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && @@ -714,7 +694,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto out_put_metabuf; + goto out_unlock; } if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ @@ -722,27 +702,25 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto out_put_metabuf; + goto out_unlock; } if (vi->z_idata_size || (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) { - struct erofs_map_blocks map = { + struct erofs_map_blocks tm = { .buf = __EROFS_BUF_INITIALIZER }; - err = z_erofs_map_blocks_fo(inode, &map, + err = z_erofs_map_blocks_fo(inode, &tm, EROFS_GET_BLOCKS_FINDTAIL); - erofs_put_metabuf(&map.buf); + erofs_put_metabuf(&tm.buf); if (err < 0) - goto out_put_metabuf; + goto out_unlock; } done: /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); -out_put_metabuf: - erofs_put_metabuf(&buf); out_unlock: clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); return err; @@ -760,7 +738,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, map->m_la = inode->i_size; map->m_flags = 0; } else { - err = z_erofs_fill_inode_lazy(inode); + err = z_erofs_fill_inode(inode, map); if (!err) { if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL && (vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 0fbf5dfedb24..b22d6f819f78 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -218,6 +218,7 @@ struct eventpoll { /* used to optimize loop detection check */ u64 gen; struct hlist_head refs; + u8 loop_check_depth; /* * usage count, used together with epitem->dying to @@ -2140,23 +2141,24 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } /** - * ep_loop_check_proc - verify that adding an epoll file inside another - * epoll structure does not violate the constraints, in - * terms of closed loops, or too deep chains (which can - * result in excessive stack usage). + * ep_loop_check_proc - verify that adding an epoll file @ep inside another + * epoll file does not create closed loops, and + * determine the depth of the subtree starting at @ep * * @ep: the &struct eventpoll to be currently checked. * @depth: Current depth of the path being checked. * - * Return: %zero if adding the epoll @file inside current epoll - * structure @ep does not violate the constraints, or %-1 otherwise. + * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep. */ static int ep_loop_check_proc(struct eventpoll *ep, int depth) { - int error = 0; + int result = 0; struct rb_node *rbp; struct epitem *epi; + if (ep->gen == loop_check_gen) + return ep->loop_check_depth; + mutex_lock_nested(&ep->mtx, depth + 1); ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { @@ -2164,13 +2166,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) if (unlikely(is_file_epoll(epi->ffd.file))) { struct eventpoll *ep_tovisit; ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->gen == loop_check_gen) - continue; if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) - error = -1; + result = INT_MAX; else - error = ep_loop_check_proc(ep_tovisit, depth + 1); - if (error != 0) + result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1); + if (result > EP_MAX_NESTS) break; } else { /* @@ -2184,9 +2184,25 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) list_file(epi->ffd.file); } } + ep->loop_check_depth = result; mutex_unlock(&ep->mtx); - return error; + return result; +} + +/* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards */ +static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth) +{ + int result = 0; + struct epitem *epi; + + if (ep->gen == loop_check_gen) + return ep->loop_check_depth; + hlist_for_each_entry_rcu(epi, &ep->refs, fllink) + result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1); + ep->gen = loop_check_gen; + ep->loop_check_depth = result; + return result; } /** @@ -2202,8 +2218,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) */ static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to) { + int depth, upwards_depth; + inserting_into = ep; - return ep_loop_check_proc(to, 0); + /* + * Check how deep down we can get from @to, and whether it is possible + * to loop up to @ep. + */ + depth = ep_loop_check_proc(to, 0); + if (depth > EP_MAX_NESTS) + return -1; + /* Check how far up we can go from @ep. */ + rcu_read_lock(); + upwards_depth = ep_get_upwards_depth_proc(ep, 0); + rcu_read_unlock(); + + return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0; } static void clear_tfile_check_list(void) diff --git a/fs/exec.c b/fs/exec.c index ba400aafd640..fe895e47f1dd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1515,7 +1515,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm) * state is protected by cred_guard_mutex we hold. */ n_fs = 1; - spin_lock(&p->fs->lock); + read_seqlock_excl(&p->fs->seq); rcu_read_lock(); for_other_threads(p, t) { if (t->fs == p->fs) @@ -1528,7 +1528,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm) bprm->unsafe |= LSM_UNSAFE_SHARE; else p->fs->in_exec = 1; - spin_unlock(&p->fs->lock); + read_sequnlock_excl(&p->fs->seq); } static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 841a5b18e3df..6b82497572b4 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -532,11 +532,10 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) return blkdev_issue_flush(inode->i_sb->s_bdev); } -static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size) +static int exfat_extend_valid_size(struct inode *inode, loff_t new_valid_size) { int err; loff_t pos; - struct inode *inode = file_inode(file); struct exfat_inode_info *ei = EXFAT_I(inode); struct address_space *mapping = inode->i_mapping; const struct address_space_operations *ops = mapping->a_ops; @@ -551,14 +550,14 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size) if (pos + len > new_valid_size) len = new_valid_size - pos; - err = ops->write_begin(file, mapping, pos, len, &folio, NULL); + err = ops->write_begin(NULL, mapping, pos, len, &folio, NULL); if (err) goto out; off = offset_in_folio(folio, pos); folio_zero_new_buffers(folio, off, off + len); - err = ops->write_end(file, mapping, pos, len, len, folio, NULL); + err = ops->write_end(NULL, mapping, pos, len, len, folio, NULL); if (err < 0) goto out; pos += len; @@ -604,7 +603,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) } if (pos > valid_size) { - ret = exfat_extend_valid_size(file, pos); + ret = exfat_extend_valid_size(inode, pos); if (ret < 0 && ret != -ENOSPC) { exfat_err(inode->i_sb, "write: fail to zero from %llu to %llu(%zd)", @@ -665,7 +664,7 @@ static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf) start + vma->vm_end - vma->vm_start); if (ei->valid_size < end) { - err = exfat_extend_valid_size(file, end); + err = exfat_extend_valid_size(inode, end); if (err < 0) { inode_unlock(inode); return vmf_fs_error(err); @@ -683,13 +682,15 @@ static const struct vm_operations_struct exfat_file_vm_ops = { .page_mkwrite = exfat_page_mkwrite, }; -static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma) +static int exfat_file_mmap_prepare(struct vm_area_desc *desc) { - if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb))) + struct file *file = desc->file; + + if (unlikely(exfat_forced_shutdown(file_inode(desc->file)->i_sb))) return -EIO; file_accessed(file); - vma->vm_ops = &exfat_file_vm_ops; + desc->vm_ops = &exfat_file_vm_ops; return 0; } @@ -710,7 +711,7 @@ const struct file_operations exfat_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = exfat_compat_ioctl, #endif - .mmap = exfat_file_mmap, + .mmap_prepare = exfat_file_mmap_prepare, .fsync = exfat_file_fsync, .splice_read = exfat_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index b22c02d6000f..c10844e1e16c 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -446,9 +446,10 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to) } } -static int exfat_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, - struct folio **foliop, void **fsdata) +static int exfat_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned int len, + struct folio **foliop, void **fsdata) { int ret; @@ -463,15 +464,16 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int exfat_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int copied, - struct folio *folio, void *fsdata) +static int exfat_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; struct exfat_inode_info *ei = EXFAT_I(inode); int err; - err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (err < len) exfat_write_failed(mapping, pos+len); diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 7ed858937d45..ea5c1334a214 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -667,9 +667,9 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) } if (sbi->options.utf8) - sb->s_d_op = &exfat_utf8_dentry_ops; + set_default_d_op(sb, &exfat_utf8_dentry_ops); else - sb->s_d_op = &exfat_dentry_ops; + set_default_d_op(sb, &exfat_dentry_ops); root_inode = new_inode(sb); if (!root_inode) { diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index cdefea17986a..d3e55de4a2a2 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -549,15 +549,13 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len, goto err_result; } - inode_lock(target_dir->d_inode); - nresult = lookup_one(mnt_idmap(mnt), &QSTR(nbuf), target_dir); + nresult = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), target_dir); if (!IS_ERR(nresult)) { if (unlikely(nresult->d_inode != result->d_inode)) { dput(nresult); nresult = ERR_PTR(-ESTALE); } } - inode_unlock(target_dir->d_inode); /* * At this point we are done with the parent, but it's pinned * by the child dentry anyway. diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 402fecf90a44..b07b3b369710 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len) struct inode *dir = mapping->host; inode_inc_iversion(dir); - block_write_end(NULL, mapping, pos, len, len, folio, NULL); + block_write_end(pos, len, len, folio); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 4025f875252a..cf97b76e9fd3 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -750,9 +750,9 @@ extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); /* ioctl.c */ -extern int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa); +extern int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa); extern int ext2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); extern long ext2_ioctl(struct file *, unsigned int, unsigned long); extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 10b061ac5bc0..76bddce462fc 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -122,17 +122,19 @@ static const struct vm_operations_struct ext2_dax_vm_ops = { .pfn_mkwrite = ext2_dax_fault, }; -static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ext2_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; + if (!IS_DAX(file_inode(file))) - return generic_file_mmap(file, vma); + return generic_file_mmap_prepare(desc); file_accessed(file); - vma->vm_ops = &ext2_dax_vm_ops; + desc->vm_ops = &ext2_dax_vm_ops; return 0; } #else -#define ext2_file_mmap generic_file_mmap +#define ext2_file_mmap_prepare generic_file_mmap_prepare #endif /* @@ -316,7 +318,7 @@ const struct file_operations ext2_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif - .mmap = ext2_file_mmap, + .mmap_prepare = ext2_file_mmap_prepare, .open = ext2_file_open, .release = ext2_release_file, .fsync = ext2_fsync, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 30f8201c155f..e10c376843d7 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -895,9 +895,19 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { int ret; + loff_t i_size; inode_lock(inode); - len = min_t(u64, len, i_size_read(inode)); + i_size = i_size_read(inode); + /* + * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim + * length to 0 but still trim the range as much as possible since + * ext2_get_blocks() iterates unmapped space block by block which is + * slow. + */ + if (i_size == 0) + i_size = 1; + len = min_t(u64, len, i_size); ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops); inode_unlock(inode); @@ -915,7 +925,7 @@ static void ext2_readahead(struct readahead_control *rac) } static int -ext2_write_begin(struct file *file, struct address_space *mapping, +ext2_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { int ret; @@ -926,13 +936,14 @@ ext2_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int ext2_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int ext2_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { int ret; - ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (ret < len) ext2_write_failed(mapping, pos + len); return ret; diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c index 44e04484e570..c3fea55b8efa 100644 --- a/fs/ext2/ioctl.c +++ b/fs/ext2/ioctl.c @@ -18,7 +18,7 @@ #include <linux/uaccess.h> #include <linux/fileattr.h> -int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct ext2_inode_info *ei = EXT2_I(d_inode(dentry)); @@ -28,7 +28,7 @@ int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int ext2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct ext2_inode_info *ei = EXT2_I(inode); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 18373de980f2..7d962e7f388a 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -3103,8 +3103,8 @@ extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode, extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); int ext4_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); -int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); +int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa); extern void ext4_reset_inode_seed(struct inode *inode); int ext4_update_overhead(struct super_block *sb, bool force); int ext4_force_shutdown(struct super_block *sb, u32 flags); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index b543a46fc809..b43aa82c1b39 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4501,6 +4501,8 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, struct ext4_map_blocks map; unsigned int credits; loff_t epos, old_size = i_size_read(inode); + unsigned int blkbits = inode->i_blkbits; + bool alloc_zero = false; BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); map.m_lblk = offset; @@ -4514,6 +4516,17 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; /* + * Do the actual write zero during a running journal transaction + * costs a lot. First allocate an unwritten extent and then + * convert it to written after zeroing it out. + */ + if (flags & EXT4_GET_BLOCKS_ZERO) { + flags &= ~EXT4_GET_BLOCKS_ZERO; + flags |= EXT4_GET_BLOCKS_UNWRIT_EXT; + alloc_zero = true; + } + + /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, len); @@ -4549,9 +4562,7 @@ retry: * allow a full retry cycle for any remaining allocations */ retries = 0; - map.m_lblk += ret; - map.m_len = len = len - ret; - epos = (loff_t)map.m_lblk << inode->i_blkbits; + epos = (loff_t)(map.m_lblk + ret) << blkbits; inode_set_ctime_current(inode); if (new_size) { if (epos > new_size) @@ -4571,6 +4582,21 @@ retry: ret2 = ret3 ? ret3 : ret2; if (unlikely(ret2)) break; + + if (alloc_zero && + (map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN))) { + ret2 = ext4_issue_zeroout(inode, map.m_lblk, map.m_pblk, + map.m_len); + if (likely(!ret2)) + ret2 = ext4_convert_unwritten_extents(NULL, + inode, (loff_t)map.m_lblk << blkbits, + (loff_t)map.m_len << blkbits); + if (ret2) + break; + } + + map.m_lblk += ret; + map.m_len = len = len - ret; } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; @@ -4636,7 +4662,11 @@ static long ext4_zero_range(struct file *file, loff_t offset, if (end_lblk > start_lblk) { ext4_lblk_t zero_blks = end_lblk - start_lblk; - flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE); + if (mode & FALLOC_FL_WRITE_ZEROES) + flags = EXT4_GET_BLOCKS_CREATE_ZERO | EXT4_EX_NOCACHE; + else + flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | + EXT4_EX_NOCACHE); ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks, new_size, flags); if (ret) @@ -4745,11 +4775,18 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (IS_ENCRYPTED(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; + /* + * Don't allow writing zeroes if the underlying device does not + * enable the unmap write zeroes operation. + */ + if ((mode & FALLOC_FL_WRITE_ZEROES) && + !bdev_write_zeroes_unmap_sectors(inode->i_sb->s_bdev)) + return -EOPNOTSUPP; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | - FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | - FALLOC_FL_INSERT_RANGE)) + FALLOC_FL_ZERO_RANGE | FALLOC_FL_COLLAPSE_RANGE | + FALLOC_FL_INSERT_RANGE | FALLOC_FL_WRITE_ZEROES)) return -EOPNOTSUPP; inode_lock(inode); @@ -4780,16 +4817,23 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret) goto out_invalidate_lock; - if (mode & FALLOC_FL_PUNCH_HOLE) + switch (mode & FALLOC_FL_MODE_MASK) { + case FALLOC_FL_PUNCH_HOLE: ret = ext4_punch_hole(file, offset, len); - else if (mode & FALLOC_FL_COLLAPSE_RANGE) + break; + case FALLOC_FL_COLLAPSE_RANGE: ret = ext4_collapse_range(file, offset, len); - else if (mode & FALLOC_FL_INSERT_RANGE) + break; + case FALLOC_FL_INSERT_RANGE: ret = ext4_insert_range(file, offset, len); - else if (mode & FALLOC_FL_ZERO_RANGE) + break; + case FALLOC_FL_ZERO_RANGE: + case FALLOC_FL_WRITE_ZEROES: ret = ext4_zero_range(file, offset, len, mode); - else + break; + default: ret = -EOPNOTSUPP; + } out_invalidate_lock: filemap_invalidate_unlock(mapping); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 21df81347147..48908ce0c3ea 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -804,9 +804,10 @@ static const struct vm_operations_struct ext4_file_vm_ops = { .page_mkwrite = ext4_page_mkwrite, }; -static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ext4_file_mmap_prepare(struct vm_area_desc *desc) { int ret; + struct file *file = desc->file; struct inode *inode = file->f_mapping->host; struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; @@ -821,15 +822,15 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ - if (!daxdev_mapping_supported(vma, dax_dev)) + if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev)) return -EOPNOTSUPP; file_accessed(file); if (IS_DAX(file_inode(file))) { - vma->vm_ops = &ext4_dax_vm_ops; - vm_flags_set(vma, VM_HUGEPAGE); + desc->vm_ops = &ext4_dax_vm_ops; + desc->vm_flags |= VM_HUGEPAGE; } else { - vma->vm_ops = &ext4_file_vm_ops; + desc->vm_ops = &ext4_file_vm_ops; } return 0; } @@ -968,7 +969,7 @@ const struct file_operations ext4_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, #endif - .mmap = ext4_file_mmap, + .mmap_prepare = ext4_file_mmap_prepare, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, @@ -977,7 +978,8 @@ const struct file_operations ext4_file_operations = { .splice_write = iter_file_splice_write, .fallocate = ext4_fallocate, .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | - FOP_DIO_PARALLEL_WRITE, + FOP_DIO_PARALLEL_WRITE | + FOP_DONTCACHE, }; const struct inode_operations ext4_file_inode_operations = { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index be9a4cba35fd..5c7024051f1e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1252,7 +1252,8 @@ int ext4_block_write_begin(handle_t *handle, struct folio *folio, * and the ext4_write_end(). So doing the jbd2_journal_start at the start of * ext4_write_begin() is the right place. */ -static int ext4_write_begin(struct file *file, struct address_space *mapping, +static int ext4_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -1263,7 +1264,6 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, struct folio *folio; pgoff_t index; unsigned from, to; - fgf_t fgp = FGP_WRITEBEGIN; ret = ext4_emergency_state(inode->i_sb); if (unlikely(ret)) @@ -1287,16 +1287,14 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, } /* - * __filemap_get_folio() can take a long time if the + * write_begin_get_folio() can take a long time if the * system is thrashing due to memory pressure, or if the folio * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the folio (if needed) without using GFP_NOFS. */ retry_grab: - fgp |= fgf_set_order(len); - folio = __filemap_get_folio(mapping, index, fgp, - mapping_gfp_mask(mapping)); + folio = write_begin_get_folio(iocb, mapping, index, len); if (IS_ERR(folio)) return PTR_ERR(folio); @@ -1400,12 +1398,12 @@ static int write_end_fn(handle_t *handle, struct inode *inode, /* * We need to pick up the new inode size which generic_commit_write gave us - * `file' can be NULL - eg, when called from page_symlink(). + * `iocb` can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->i_private_list. metadata * buffers are managed internally. */ -static int ext4_write_end(struct file *file, +static int ext4_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) @@ -1424,7 +1422,7 @@ static int ext4_write_end(struct file *file, return ext4_write_inline_data_end(inode, pos, len, copied, folio); - copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); + copied = block_write_end(pos, len, copied, folio); /* * it's important to update i_size while still holding folio lock: * page writeout could otherwise come in and zero beyond i_size. @@ -1510,7 +1508,7 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle, } while (bh != head); } -static int ext4_journalled_write_end(struct file *file, +static int ext4_journalled_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) @@ -3036,7 +3034,8 @@ static int ext4_nonda_switch(struct super_block *sb) return 0; } -static int ext4_da_write_begin(struct file *file, struct address_space *mapping, +static int ext4_da_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -3044,7 +3043,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, struct folio *folio; pgoff_t index; struct inode *inode = mapping->host; - fgf_t fgp = FGP_WRITEBEGIN; ret = ext4_emergency_state(inode->i_sb); if (unlikely(ret)) @@ -3054,7 +3052,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; - return ext4_write_begin(file, mapping, pos, + return ext4_write_begin(iocb, mapping, pos, len, foliop, fsdata); } *fsdata = (void *)0; @@ -3070,9 +3068,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, } retry: - fgp |= fgf_set_order(len); - folio = __filemap_get_folio(mapping, index, fgp, - mapping_gfp_mask(mapping)); + folio = write_begin_get_folio(iocb, mapping, index, len); if (IS_ERR(folio)) return PTR_ERR(folio); @@ -3144,8 +3140,7 @@ static int ext4_da_do_write_end(struct address_space *mapping, * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES * flag, which all that's needed to trigger page writeback. */ - copied = block_write_end(NULL, mapping, pos, len, copied, - folio, NULL); + copied = block_write_end(pos, len, copied, folio); new_i_size = pos + copied; /* @@ -3196,7 +3191,7 @@ static int ext4_da_do_write_end(struct address_space *mapping, return copied; } -static int ext4_da_write_end(struct file *file, +static int ext4_da_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) @@ -3205,7 +3200,7 @@ static int ext4_da_write_end(struct file *file, int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) - return ext4_write_end(file, mapping, pos, + return ext4_write_end(iocb, mapping, pos, len, copied, folio, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 5668a17458ae..84e3c73952d7 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -980,7 +980,7 @@ group_add_out: return err; } -int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct ext4_inode_info *ei = EXT4_I(inode); @@ -997,7 +997,7 @@ int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int ext4_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); u32 flags = fa->flags; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 31e892842625..711ad80b38d0 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3519,8 +3519,10 @@ reserve_block: return 0; } -static int f2fs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct folio **foliop, void **fsdata) +static int f2fs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, struct folio **foliop, + void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -3656,7 +3658,7 @@ fail: return err; } -static int f2fs_write_end(struct file *file, +static int f2fs_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9333a22b9a01..c78464792ceb 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3615,9 +3615,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, bool readonly, bool need_lock); int f2fs_precache_extents(struct inode *inode); -int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int f2fs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 696131e655ed..c677230699fd 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -548,8 +548,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) return -EINVAL; } -static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int f2fs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) @@ -559,7 +560,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) return -EOPNOTSUPP; file_accessed(file); - vma->vm_ops = &f2fs_file_vm_ops; + desc->vm_ops = &f2fs_file_vm_ops; f2fs_down_read(&F2FS_I(inode)->i_sem); set_inode_flag(inode, FI_MMAP_FILE); @@ -3390,7 +3391,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) } #endif -int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); @@ -3414,7 +3415,7 @@ int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int f2fs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL; @@ -5414,7 +5415,7 @@ const struct file_operations f2fs_file_operations = { .iopoll = iocb_bio_iopoll, .open = f2fs_file_open, .release = f2fs_release_file, - .mmap = f2fs_file_mmap, + .mmap_prepare = f2fs_file_mmap_prepare, .flush = f2fs_file_flush, .fsync = f2fs_sync_file, .fallocate = f2fs_fallocate, diff --git a/fs/fat/file.c b/fs/fat/file.c index e887e9ab7472..4fc49a614fb8 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -204,7 +204,7 @@ const struct file_operations fat_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .release = fat_file_release, .unlocked_ioctl = fat_generic_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 3852bb66358c..9648ed097816 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -219,13 +219,14 @@ static void fat_write_failed(struct address_space *mapping, loff_t to) } } -static int fat_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int fat_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int err; - err = cont_write_begin(file, mapping, pos, len, + err = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, fat_get_block, &MSDOS_I(mapping->host)->mmu_private); if (err < 0) @@ -233,13 +234,14 @@ static int fat_write_begin(struct file *file, struct address_space *mapping, return err; } -static int fat_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int fat_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; int err; - err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (err < len) fat_write_failed(mapping, pos + len); if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) { diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 23e9b9371ec3..0b920ee40a7f 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -646,7 +646,7 @@ static const struct inode_operations msdos_dir_inode_operations = { static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations; - sb->s_d_op = &msdos_dentry_operations; + set_default_d_op(sb, &msdos_dentry_operations); sb->s_flags |= SB_NOATIME; } diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index dd910edd2404..5dbc4cbb8fce 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -1187,9 +1187,9 @@ static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations; if (MSDOS_SB(sb)->options.name_check != 's') - sb->s_d_op = &vfat_ci_dentry_ops; + set_default_d_op(sb, &vfat_ci_dentry_ops); else - sb->s_d_op = &vfat_dentry_ops; + set_default_d_op(sb, &vfat_dentry_ops); } static int vfat_fill_super(struct super_block *sb, struct fs_context *fc) diff --git a/fs/fhandle.c b/fs/fhandle.c index 3e092ae6d142..7c236f64cdea 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -88,7 +88,7 @@ static long do_sys_name_to_handle(const struct path *path, if (fh_flags & EXPORT_FH_CONNECTABLE) { handle->handle_type |= FILEID_IS_CONNECTABLE; if (d_is_dir(path->dentry)) - fh_flags |= FILEID_IS_DIR; + handle->handle_type |= FILEID_IS_DIR; } retval = 0; } @@ -168,23 +168,28 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name, return err; } -static int get_path_from_fd(int fd, struct path *root) +static int get_path_anchor(int fd, struct path *root) { - if (fd == AT_FDCWD) { - struct fs_struct *fs = current->fs; - spin_lock(&fs->lock); - *root = fs->pwd; - path_get(root); - spin_unlock(&fs->lock); - } else { + if (fd >= 0) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; *root = fd_file(f)->f_path; path_get(root); + return 0; } - return 0; + if (fd == AT_FDCWD) { + get_fs_pwd(current->fs, root); + return 0; + } + + if (fd == FD_PIDFS_ROOT) { + pidfs_get_root(root); + return 0; + } + + return -EBADF; } static int vfs_dentry_acceptable(void *context, struct dentry *dentry) @@ -323,13 +328,24 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, { int retval = 0; struct file_handle f_handle; - struct file_handle *handle = NULL; + struct file_handle *handle __free(kfree) = NULL; struct handle_to_path_ctx ctx = {}; const struct export_operations *eops; - retval = get_path_from_fd(mountdirfd, &ctx.root); + if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) + return -EFAULT; + + if ((f_handle.handle_bytes > MAX_HANDLE_SZ) || + (f_handle.handle_bytes == 0)) + return -EINVAL; + + if (f_handle.handle_type < 0 || + FILEID_USER_FLAGS(f_handle.handle_type) & ~FILEID_VALID_USER_FLAGS) + return -EINVAL; + + retval = get_path_anchor(mountdirfd, &ctx.root); if (retval) - goto out_err; + return retval; eops = ctx.root.mnt->mnt_sb->s_export_op; if (eops && eops->permission) @@ -339,21 +355,6 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, if (retval) goto out_path; - if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) { - retval = -EFAULT; - goto out_path; - } - if ((f_handle.handle_bytes > MAX_HANDLE_SZ) || - (f_handle.handle_bytes == 0)) { - retval = -EINVAL; - goto out_path; - } - if (f_handle.handle_type < 0 || - FILEID_USER_FLAGS(f_handle.handle_type) & ~FILEID_VALID_USER_FLAGS) { - retval = -EINVAL; - goto out_path; - } - handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes), GFP_KERNEL); if (!handle) { @@ -366,7 +367,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, &ufh->f_handle, f_handle.handle_bytes)) { retval = -EFAULT; - goto out_handle; + goto out_path; } /* @@ -384,11 +385,8 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, handle->handle_type &= ~FILEID_USER_FLAGS_MASK; retval = do_handle_to_path(handle, path, &ctx); -out_handle: - kfree(handle); out_path: path_put(&ctx.root); -out_err: return retval; } diff --git a/fs/file.c b/fs/file.c index b6db031545e6..6d2275c3be9c 100644 --- a/fs/file.c +++ b/fs/file.c @@ -197,6 +197,21 @@ static struct fdtable *alloc_fdtable(unsigned int slots_wanted) return ERR_PTR(-EMFILE); } + /* + * Check if the allocation size would exceed INT_MAX. kvmalloc_array() + * and kvmalloc() will warn if the allocation size is greater than + * INT_MAX, as filp_cachep objects are not __GFP_NOWARN. + * + * This can happen when sysctl_nr_open is set to a very high value and + * a process tries to use a file descriptor near that limit. For example, + * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what + * systemd typically sets it to - then trying to use a file descriptor + * close to that value will require allocating a file descriptor table + * that exceeds 8GB in size. + */ + if (unlikely(nr > INT_MAX / sizeof(struct file *))) + return ERR_PTR(-EMFILE); + fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) goto out; diff --git a/fs/file_attr.c b/fs/file_attr.c new file mode 100644 index 000000000000..12424d4945d0 --- /dev/null +++ b/fs/file_attr.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/fs.h> +#include <linux/security.h> +#include <linux/fscrypt.h> +#include <linux/fileattr.h> +#include <linux/export.h> +#include <linux/syscalls.h> +#include <linux/namei.h> + +#include "internal.h" + +/** + * fileattr_fill_xflags - initialize fileattr with xflags + * @fa: fileattr pointer + * @xflags: FS_XFLAG_* flags + * + * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All + * other fields are zeroed. + */ +void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags) +{ + memset(fa, 0, sizeof(*fa)); + fa->fsx_valid = true; + fa->fsx_xflags = xflags; + if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE) + fa->flags |= FS_IMMUTABLE_FL; + if (fa->fsx_xflags & FS_XFLAG_APPEND) + fa->flags |= FS_APPEND_FL; + if (fa->fsx_xflags & FS_XFLAG_SYNC) + fa->flags |= FS_SYNC_FL; + if (fa->fsx_xflags & FS_XFLAG_NOATIME) + fa->flags |= FS_NOATIME_FL; + if (fa->fsx_xflags & FS_XFLAG_NODUMP) + fa->flags |= FS_NODUMP_FL; + if (fa->fsx_xflags & FS_XFLAG_DAX) + fa->flags |= FS_DAX_FL; + if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) + fa->flags |= FS_PROJINHERIT_FL; +} +EXPORT_SYMBOL(fileattr_fill_xflags); + +/** + * fileattr_fill_flags - initialize fileattr with flags + * @fa: fileattr pointer + * @flags: FS_*_FL flags + * + * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags). + * All other fields are zeroed. + */ +void fileattr_fill_flags(struct file_kattr *fa, u32 flags) +{ + memset(fa, 0, sizeof(*fa)); + fa->flags_valid = true; + fa->flags = flags; + if (fa->flags & FS_SYNC_FL) + fa->fsx_xflags |= FS_XFLAG_SYNC; + if (fa->flags & FS_IMMUTABLE_FL) + fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; + if (fa->flags & FS_APPEND_FL) + fa->fsx_xflags |= FS_XFLAG_APPEND; + if (fa->flags & FS_NODUMP_FL) + fa->fsx_xflags |= FS_XFLAG_NODUMP; + if (fa->flags & FS_NOATIME_FL) + fa->fsx_xflags |= FS_XFLAG_NOATIME; + if (fa->flags & FS_DAX_FL) + fa->fsx_xflags |= FS_XFLAG_DAX; + if (fa->flags & FS_PROJINHERIT_FL) + fa->fsx_xflags |= FS_XFLAG_PROJINHERIT; +} +EXPORT_SYMBOL(fileattr_fill_flags); + +/** + * vfs_fileattr_get - retrieve miscellaneous file attributes + * @dentry: the object to retrieve from + * @fa: fileattr pointer + * + * Call i_op->fileattr_get() callback, if exists. + * + * Return: 0 on success, or a negative error on failure. + */ +int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) +{ + struct inode *inode = d_inode(dentry); + int error; + + if (!inode->i_op->fileattr_get) + return -EOPNOTSUPP; + + error = security_inode_file_getattr(dentry, fa); + if (error) + return error; + + return inode->i_op->fileattr_get(dentry, fa); +} +EXPORT_SYMBOL(vfs_fileattr_get); + +static void fileattr_to_file_attr(const struct file_kattr *fa, + struct file_attr *fattr) +{ + __u32 mask = FS_XFLAGS_MASK; + + memset(fattr, 0, sizeof(struct file_attr)); + fattr->fa_xflags = fa->fsx_xflags & mask; + fattr->fa_extsize = fa->fsx_extsize; + fattr->fa_nextents = fa->fsx_nextents; + fattr->fa_projid = fa->fsx_projid; + fattr->fa_cowextsize = fa->fsx_cowextsize; +} + +/** + * copy_fsxattr_to_user - copy fsxattr to userspace. + * @fa: fileattr pointer + * @ufa: fsxattr user pointer + * + * Return: 0 on success, or -EFAULT on failure. + */ +int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa) +{ + struct fsxattr xfa; + __u32 mask = FS_XFLAGS_MASK; + + memset(&xfa, 0, sizeof(xfa)); + xfa.fsx_xflags = fa->fsx_xflags & mask; + xfa.fsx_extsize = fa->fsx_extsize; + xfa.fsx_nextents = fa->fsx_nextents; + xfa.fsx_projid = fa->fsx_projid; + xfa.fsx_cowextsize = fa->fsx_cowextsize; + + if (copy_to_user(ufa, &xfa, sizeof(xfa))) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(copy_fsxattr_to_user); + +static int file_attr_to_fileattr(const struct file_attr *fattr, + struct file_kattr *fa) +{ + __u64 mask = FS_XFLAGS_MASK; + + if (fattr->fa_xflags & ~mask) + return -EINVAL; + + fileattr_fill_xflags(fa, fattr->fa_xflags); + fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK; + fa->fsx_extsize = fattr->fa_extsize; + fa->fsx_projid = fattr->fa_projid; + fa->fsx_cowextsize = fattr->fa_cowextsize; + + return 0; +} + +static int copy_fsxattr_from_user(struct file_kattr *fa, + struct fsxattr __user *ufa) +{ + struct fsxattr xfa; + __u32 mask = FS_XFLAGS_MASK; + + if (copy_from_user(&xfa, ufa, sizeof(xfa))) + return -EFAULT; + + if (xfa.fsx_xflags & ~mask) + return -EOPNOTSUPP; + + fileattr_fill_xflags(fa, xfa.fsx_xflags); + fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK; + fa->fsx_extsize = xfa.fsx_extsize; + fa->fsx_nextents = xfa.fsx_nextents; + fa->fsx_projid = xfa.fsx_projid; + fa->fsx_cowextsize = xfa.fsx_cowextsize; + + return 0; +} + +/* + * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject + * any invalid configurations. + * + * Note: must be called with inode lock held. + */ +static int fileattr_set_prepare(struct inode *inode, + const struct file_kattr *old_ma, + struct file_kattr *fa) +{ + int err; + + /* + * The IMMUTABLE and APPEND_ONLY flags can only be changed by + * the relevant capability. + */ + if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && + !capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + + err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags); + if (err) + return err; + + /* + * Project Quota ID state is only allowed to change from within the init + * namespace. Enforce that restriction only if we are trying to change + * the quota ID state. Everything else is allowed in user namespaces. + */ + if (current_user_ns() != &init_user_ns) { + if (old_ma->fsx_projid != fa->fsx_projid) + return -EINVAL; + if ((old_ma->fsx_xflags ^ fa->fsx_xflags) & + FS_XFLAG_PROJINHERIT) + return -EINVAL; + } else { + /* + * Caller is allowed to change the project ID. If it is being + * changed, make sure that the new value is valid. + */ + if (old_ma->fsx_projid != fa->fsx_projid && + !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid))) + return -EINVAL; + } + + /* Check extent size hints. */ + if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) + return -EINVAL; + + if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && + !S_ISDIR(inode->i_mode)) + return -EINVAL; + + if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && + !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) + return -EINVAL; + + /* + * It is only valid to set the DAX flag on regular files and + * directories on filesystems. + */ + if ((fa->fsx_xflags & FS_XFLAG_DAX) && + !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) + return -EINVAL; + + /* Extent size hints of zero turn off the flags. */ + if (fa->fsx_extsize == 0) + fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); + if (fa->fsx_cowextsize == 0) + fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; + + return 0; +} + +/** + * vfs_fileattr_set - change miscellaneous file attributes + * @idmap: idmap of the mount + * @dentry: the object to change + * @fa: fileattr pointer + * + * After verifying permissions, call i_op->fileattr_set() callback, if + * exists. + * + * Verifying attributes involves retrieving current attributes with + * i_op->fileattr_get(), this also allows initializing attributes that have + * not been set by the caller to current values. Inode lock is held + * thoughout to prevent racing with another instance. + * + * Return: 0 on success, or a negative error on failure. + */ +int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, + struct file_kattr *fa) +{ + struct inode *inode = d_inode(dentry); + struct file_kattr old_ma = {}; + int err; + + if (!inode->i_op->fileattr_set) + return -EOPNOTSUPP; + + if (!inode_owner_or_capable(idmap, inode)) + return -EPERM; + + inode_lock(inode); + err = vfs_fileattr_get(dentry, &old_ma); + if (!err) { + /* initialize missing bits from old_ma */ + if (fa->flags_valid) { + fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON; + fa->fsx_extsize = old_ma.fsx_extsize; + fa->fsx_nextents = old_ma.fsx_nextents; + fa->fsx_projid = old_ma.fsx_projid; + fa->fsx_cowextsize = old_ma.fsx_cowextsize; + } else { + fa->flags |= old_ma.flags & ~FS_COMMON_FL; + } + + err = fileattr_set_prepare(inode, &old_ma, fa); + if (err) + goto out; + err = security_inode_file_setattr(dentry, fa); + if (err) + goto out; + err = inode->i_op->fileattr_set(idmap, dentry, fa); + if (err) + goto out; + } + +out: + inode_unlock(inode); + return err; +} +EXPORT_SYMBOL(vfs_fileattr_set); + +int ioctl_getflags(struct file *file, unsigned int __user *argp) +{ + struct file_kattr fa = { .flags_valid = true }; /* hint only */ + int err; + + err = vfs_fileattr_get(file->f_path.dentry, &fa); + if (err == -EOPNOTSUPP) + err = -ENOIOCTLCMD; + if (!err) + err = put_user(fa.flags, argp); + return err; +} +EXPORT_SYMBOL(ioctl_getflags); + +int ioctl_setflags(struct file *file, unsigned int __user *argp) +{ + struct mnt_idmap *idmap = file_mnt_idmap(file); + struct dentry *dentry = file->f_path.dentry; + struct file_kattr fa; + unsigned int flags; + int err; + + err = get_user(flags, argp); + if (!err) { + err = mnt_want_write_file(file); + if (!err) { + fileattr_fill_flags(&fa, flags); + err = vfs_fileattr_set(idmap, dentry, &fa); + mnt_drop_write_file(file); + if (err == -EOPNOTSUPP) + err = -ENOIOCTLCMD; + } + } + return err; +} +EXPORT_SYMBOL(ioctl_setflags); + +int ioctl_fsgetxattr(struct file *file, void __user *argp) +{ + struct file_kattr fa = { .fsx_valid = true }; /* hint only */ + int err; + + err = vfs_fileattr_get(file->f_path.dentry, &fa); + if (err == -EOPNOTSUPP) + err = -ENOIOCTLCMD; + if (!err) + err = copy_fsxattr_to_user(&fa, argp); + + return err; +} +EXPORT_SYMBOL(ioctl_fsgetxattr); + +int ioctl_fssetxattr(struct file *file, void __user *argp) +{ + struct mnt_idmap *idmap = file_mnt_idmap(file); + struct dentry *dentry = file->f_path.dentry; + struct file_kattr fa; + int err; + + err = copy_fsxattr_from_user(&fa, argp); + if (!err) { + err = mnt_want_write_file(file); + if (!err) { + err = vfs_fileattr_set(idmap, dentry, &fa); + mnt_drop_write_file(file); + if (err == -EOPNOTSUPP) + err = -ENOIOCTLCMD; + } + } + return err; +} +EXPORT_SYMBOL(ioctl_fssetxattr); + +SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename, + struct file_attr __user *, ufattr, size_t, usize, + unsigned int, at_flags) +{ + struct path filepath __free(path_put) = {}; + struct filename *name __free(putname) = NULL; + unsigned int lookup_flags = 0; + struct file_attr fattr; + struct file_kattr fa; + int error; + + BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0); + BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST); + + if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) + return -EINVAL; + + if (!(at_flags & AT_SYMLINK_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + + if (usize > PAGE_SIZE) + return -E2BIG; + + if (usize < FILE_ATTR_SIZE_VER0) + return -EINVAL; + + name = getname_maybe_null(filename, at_flags); + if (IS_ERR(name)) + return PTR_ERR(name); + + if (!name && dfd >= 0) { + CLASS(fd, f)(dfd); + if (fd_empty(f)) + return -EBADF; + + filepath = fd_file(f)->f_path; + path_get(&filepath); + } else { + error = filename_lookup(dfd, name, lookup_flags, &filepath, + NULL); + if (error) + return error; + } + + error = vfs_fileattr_get(filepath.dentry, &fa); + if (error) + return error; + + fileattr_to_file_attr(&fa, &fattr); + error = copy_struct_to_user(ufattr, usize, &fattr, + sizeof(struct file_attr), NULL); + + return error; +} + +SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename, + struct file_attr __user *, ufattr, size_t, usize, + unsigned int, at_flags) +{ + struct path filepath __free(path_put) = {}; + struct filename *name __free(putname) = NULL; + unsigned int lookup_flags = 0; + struct file_attr fattr; + struct file_kattr fa; + int error; + + BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0); + BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST); + + if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) + return -EINVAL; + + if (!(at_flags & AT_SYMLINK_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + + if (usize > PAGE_SIZE) + return -E2BIG; + + if (usize < FILE_ATTR_SIZE_VER0) + return -EINVAL; + + error = copy_struct_from_user(&fattr, sizeof(struct file_attr), ufattr, + usize); + if (error) + return error; + + error = file_attr_to_fileattr(&fattr, &fa); + if (error) + return error; + + name = getname_maybe_null(filename, at_flags); + if (IS_ERR(name)) + return PTR_ERR(name); + + if (!name && dfd >= 0) { + CLASS(fd, f)(dfd); + if (fd_empty(f)) + return -EBADF; + + filepath = fd_file(f)->f_path; + path_get(&filepath); + } else { + error = filename_lookup(dfd, name, lookup_flags, &filepath, + NULL); + if (error) + return error; + } + + error = mnt_want_write(filepath.mnt); + if (!error) { + error = vfs_fileattr_set(mnt_idmap(filepath.mnt), + filepath.dentry, &fa); + mnt_drop_write(filepath.mnt); + } + + return error; +} diff --git a/fs/file_table.c b/fs/file_table.c index 138114d64307..f09d79a98111 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -52,17 +52,20 @@ struct backing_file { }; }; -static inline struct backing_file *backing_file(struct file *f) -{ - return container_of(f, struct backing_file, file); -} +#define backing_file(f) container_of(f, struct backing_file, file) -struct path *backing_file_user_path(struct file *f) +struct path *backing_file_user_path(const struct file *f) { return &backing_file(f)->user_path; } EXPORT_SYMBOL_GPL(backing_file_user_path); +void backing_file_set_user_path(struct file *f, const struct path *path) +{ + backing_file(f)->user_path = *path; +} +EXPORT_SYMBOL_GPL(backing_file_set_user_path); + static inline void file_free(struct file *f) { security_file_free(f); diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 64c2d0814ed6..28be762ac1c6 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -17,12 +17,10 @@ void set_fs_root(struct fs_struct *fs, const struct path *path) struct path old_root; path_get(path); - spin_lock(&fs->lock); - write_seqcount_begin(&fs->seq); + write_seqlock(&fs->seq); old_root = fs->root; fs->root = *path; - write_seqcount_end(&fs->seq); - spin_unlock(&fs->lock); + write_sequnlock(&fs->seq); if (old_root.dentry) path_put(&old_root); } @@ -36,12 +34,10 @@ void set_fs_pwd(struct fs_struct *fs, const struct path *path) struct path old_pwd; path_get(path); - spin_lock(&fs->lock); - write_seqcount_begin(&fs->seq); + write_seqlock(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; - write_seqcount_end(&fs->seq); - spin_unlock(&fs->lock); + write_sequnlock(&fs->seq); if (old_pwd.dentry) path_put(&old_pwd); @@ -67,16 +63,14 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root) fs = p->fs; if (fs) { int hits = 0; - spin_lock(&fs->lock); - write_seqcount_begin(&fs->seq); + write_seqlock(&fs->seq); hits += replace_path(&fs->root, old_root, new_root); hits += replace_path(&fs->pwd, old_root, new_root); - write_seqcount_end(&fs->seq); while (hits--) { count++; path_get(new_root); } - spin_unlock(&fs->lock); + write_sequnlock(&fs->seq); } task_unlock(p); } @@ -99,10 +93,10 @@ void exit_fs(struct task_struct *tsk) if (fs) { int kill; task_lock(tsk); - spin_lock(&fs->lock); + read_seqlock_excl(&fs->seq); tsk->fs = NULL; kill = !--fs->users; - spin_unlock(&fs->lock); + read_sequnlock_excl(&fs->seq); task_unlock(tsk); if (kill) free_fs_struct(fs); @@ -116,16 +110,15 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) if (fs) { fs->users = 1; fs->in_exec = 0; - spin_lock_init(&fs->lock); - seqcount_spinlock_init(&fs->seq, &fs->lock); + seqlock_init(&fs->seq); fs->umask = old->umask; - spin_lock(&old->lock); + read_seqlock_excl(&old->seq); fs->root = old->root; path_get(&fs->root); fs->pwd = old->pwd; path_get(&fs->pwd); - spin_unlock(&old->lock); + read_sequnlock_excl(&old->seq); } return fs; } @@ -140,10 +133,10 @@ int unshare_fs_struct(void) return -ENOMEM; task_lock(current); - spin_lock(&fs->lock); + read_seqlock_excl(&fs->seq); kill = !--fs->users; current->fs = new_fs; - spin_unlock(&fs->lock); + read_sequnlock_excl(&fs->seq); task_unlock(current); if (kill) @@ -162,7 +155,6 @@ EXPORT_SYMBOL(current_umask); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .users = 1, - .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), - .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), + .seq = __SEQLOCK_UNLOCKED(init_fs.seq), .umask = 0022, }; diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index ca215a3cba3e..a774166264de 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -2,6 +2,7 @@ config FUSE_FS tristate "FUSE (Filesystem in Userspace) support" select FS_POSIX_ACL + select FS_IOMAP help With FUSE it is possible to implement a fully functional filesystem in a userspace program. diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 2a730d88cc3b..bb407705603c 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/fs_context.h> +#include <linux/namei.h> #define FUSE_CTL_SUPER_MAGIC 0x65735543 @@ -212,7 +213,6 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, struct dentry *dentry; struct inode *inode; - BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES); dentry = d_alloc_name(parent, name); if (!dentry) return NULL; @@ -236,8 +236,6 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, inode->i_private = fc; d_add(dentry, inode); - fc->ctl_dentry[fc->ctl_ndents++] = dentry; - return dentry; } @@ -280,27 +278,29 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) return -ENOMEM; } +static void remove_one(struct dentry *dentry) +{ + d_inode(dentry)->i_private = NULL; +} + /* * Remove a connection from the control filesystem (if it exists). * Caller must hold fuse_mutex */ void fuse_ctl_remove_conn(struct fuse_conn *fc) { - int i; + struct dentry *dentry; + char name[32]; if (!fuse_control_sb || fc->no_control) return; - for (i = fc->ctl_ndents - 1; i >= 0; i--) { - struct dentry *dentry = fc->ctl_dentry[i]; - d_inode(dentry)->i_private = NULL; - if (!i) { - /* Get rid of submounts: */ - d_invalidate(dentry); - } - dput(dentry); + sprintf(name, "%u", fc->dev); + dentry = lookup_noperm_positive_unlocked(&QSTR(name), fuse_control_sb->s_root); + if (!IS_ERR(dentry)) { + simple_recursive_removal(dentry, remove_one); + dput(dentry); // paired with lookup_noperm_positive_unlocked() } - drop_nlink(d_inode(fuse_control_sb->s_root)); } static int fuse_ctl_fill_super(struct super_block *sb, struct fs_context *fsc) @@ -346,12 +346,8 @@ static int fuse_ctl_init_fs_context(struct fs_context *fsc) static void fuse_ctl_kill_sb(struct super_block *sb) { - struct fuse_conn *fc; - mutex_lock(&fuse_mutex); fuse_control_sb = NULL; - list_for_each_entry(fc, &fuse_conn_list, entry) - fc->ctl_ndents = 0; mutex_unlock(&fuse_mutex); kill_litter_super(sb); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 45b4c3cc1396..2d817d7cab26 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -338,13 +338,6 @@ const struct dentry_operations fuse_dentry_operations = { .d_automount = fuse_dentry_automount, }; -const struct dentry_operations fuse_root_dentry_operations = { -#if BITS_PER_LONG < 64 - .d_init = fuse_dentry_init, - .d_release = fuse_dentry_release, -#endif -}; - int fuse_valid_type(int m) { return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 47006d0753f1..95275a1e2f54 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -21,6 +21,7 @@ #include <linux/filelock.h> #include <linux/splice.h> #include <linux/task_io_accounting_ops.h> +#include <linux/iomap.h> static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, int opcode, @@ -788,12 +789,16 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, } } -static int fuse_do_readfolio(struct file *file, struct folio *folio) +static int fuse_do_readfolio(struct file *file, struct folio *folio, + size_t off, size_t len) { struct inode *inode = folio->mapping->host; struct fuse_mount *fm = get_fuse_mount(inode); - loff_t pos = folio_pos(folio); - struct fuse_folio_desc desc = { .length = folio_size(folio) }; + loff_t pos = folio_pos(folio) + off; + struct fuse_folio_desc desc = { + .offset = off, + .length = len, + }; struct fuse_io_args ia = { .ap.args.page_zeroing = true, .ap.args.out_pages = true, @@ -820,8 +825,6 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio) if (res < desc.length) fuse_short_read(inode, attr_ver, res, &ia.ap); - folio_mark_uptodate(folio); - return 0; } @@ -834,13 +837,26 @@ static int fuse_read_folio(struct file *file, struct folio *folio) if (fuse_is_bad(inode)) goto out; - err = fuse_do_readfolio(file, folio); + err = fuse_do_readfolio(file, folio, 0, folio_size(folio)); + if (!err) + folio_mark_uptodate(folio); + fuse_invalidate_atime(inode); out: folio_unlock(folio); return err; } +static int fuse_iomap_read_folio_range(const struct iomap_iter *iter, + struct folio *folio, loff_t pos, + size_t len) +{ + struct file *file = iter->private; + size_t off = offset_in_folio(folio, pos); + + return fuse_do_readfolio(file, folio, off, len); +} + static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, int err) { @@ -1374,6 +1390,24 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) } } +static const struct iomap_write_ops fuse_iomap_write_ops = { + .read_folio_range = fuse_iomap_read_folio_range, +}; + +static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length, + unsigned int flags, struct iomap *iomap, + struct iomap *srcmap) +{ + iomap->type = IOMAP_MAPPED; + iomap->length = length; + iomap->offset = offset; + return 0; +} + +static const struct iomap_ops fuse_iomap_ops = { + .iomap_begin = fuse_iomap_begin, +}; + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1383,6 +1417,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = mapping->host; ssize_t err, count; struct fuse_conn *fc = get_fuse_conn(inode); + bool writeback = false; if (fc->writeback_cache) { /* Update size (EOF optimization) and mode (SUID clearing) */ @@ -1391,16 +1426,11 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) if (err) return err; - if (fc->handle_killpriv_v2 && - setattr_should_drop_suidgid(idmap, - file_inode(file))) { - goto writethrough; - } - - return generic_file_write_iter(iocb, from); + if (!fc->handle_killpriv_v2 || + !setattr_should_drop_suidgid(idmap, file_inode(file))) + writeback = true; } -writethrough: inode_lock(inode); err = count = generic_write_checks(iocb, from); @@ -1419,6 +1449,15 @@ writethrough: goto out; written = direct_write_fallback(iocb, from, written, fuse_perform_write(iocb, from)); + } else if (writeback) { + /* + * Use iomap so that we can do granular uptodate reads + * and granular dirty tracking for large folios. + */ + written = iomap_file_buffered_write(iocb, from, + &fuse_iomap_ops, + &fuse_iomap_write_ops, + file); } else { written = fuse_perform_write(iocb, from); } @@ -1793,7 +1832,7 @@ static void fuse_writepage_finish(struct fuse_writepage_args *wpa) * scope of the fi->lock alleviates xarray lock * contention and noticeably improves performance. */ - folio_end_writeback(ap->folios[i]); + iomap_finish_folio_write(inode, ap->folios[i], 1); dec_wb_stat(&bdi->wb, WB_WRITEBACK); wb_writeout_inc(&bdi->wb); } @@ -1980,19 +2019,20 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, } static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, - uint32_t folio_index) + uint32_t folio_index, loff_t offset, unsigned len) { struct inode *inode = folio->mapping->host; struct fuse_args_pages *ap = &wpa->ia.ap; ap->folios[folio_index] = folio; - ap->descs[folio_index].offset = 0; - ap->descs[folio_index].length = folio_size(folio); + ap->descs[folio_index].offset = offset; + ap->descs[folio_index].length = len; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); } static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, + size_t offset, struct fuse_file *ff) { struct inode *inode = folio->mapping->host; @@ -2005,7 +2045,7 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio return NULL; fuse_writepage_add_to_bucket(fc, wpa); - fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0); + fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0); wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; wpa->inode = inode; wpa->ia.ff = ff; @@ -2017,63 +2057,28 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio return wpa; } -static int fuse_writepage_locked(struct folio *folio) -{ - struct address_space *mapping = folio->mapping; - struct inode *inode = mapping->host; - struct fuse_inode *fi = get_fuse_inode(inode); - struct fuse_writepage_args *wpa; - struct fuse_args_pages *ap; - struct fuse_file *ff; - int error = -EIO; - - ff = fuse_write_file_get(fi); - if (!ff) - goto err; - - wpa = fuse_writepage_args_setup(folio, ff); - error = -ENOMEM; - if (!wpa) - goto err_writepage_args; - - ap = &wpa->ia.ap; - ap->num_folios = 1; - - folio_start_writeback(folio); - fuse_writepage_args_page_fill(wpa, folio, 0); - - spin_lock(&fi->lock); - list_add_tail(&wpa->queue_entry, &fi->queued_writes); - fuse_flush_writepages(inode); - spin_unlock(&fi->lock); - - return 0; - -err_writepage_args: - fuse_file_put(ff, false); -err: - mapping_set_error(folio->mapping, error); - return error; -} - struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; - struct inode *inode; unsigned int max_folios; - unsigned int nr_pages; + /* + * nr_bytes won't overflow since fuse_writepage_need_send() caps + * wb requests to never exceed fc->max_pages (which has an upper bound + * of U16_MAX). + */ + unsigned int nr_bytes; }; -static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) +static bool fuse_pages_realloc(struct fuse_fill_wb_data *data, + unsigned int max_pages) { struct fuse_args_pages *ap = &data->wpa->ia.ap; - struct fuse_conn *fc = get_fuse_conn(data->inode); struct folio **folios; struct fuse_folio_desc *descs; unsigned int nfolios = min_t(unsigned int, max_t(unsigned int, data->max_folios * 2, FUSE_DEFAULT_MAX_PAGES_PER_REQ), - fc->max_pages); + max_pages); WARN_ON(nfolios <= data->max_folios); folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs); @@ -2090,10 +2095,10 @@ static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) return true; } -static void fuse_writepages_send(struct fuse_fill_wb_data *data) +static void fuse_writepages_send(struct inode *inode, + struct fuse_fill_wb_data *data) { struct fuse_writepage_args *wpa = data->wpa; - struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fi->lock); @@ -2102,195 +2107,148 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) spin_unlock(&fi->lock); } -static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, - struct fuse_args_pages *ap, +static bool fuse_writepage_need_send(struct fuse_conn *fc, loff_t pos, + unsigned len, struct fuse_args_pages *ap, struct fuse_fill_wb_data *data) { + struct folio *prev_folio; + struct fuse_folio_desc prev_desc; + unsigned bytes = data->nr_bytes + len; + loff_t prev_pos; + WARN_ON(!ap->num_folios); /* Reached max pages */ - if (data->nr_pages + folio_nr_pages(folio) > fc->max_pages) + if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages) return true; /* Reached max write bytes */ - if ((data->nr_pages * PAGE_SIZE) + folio_size(folio) > fc->max_write) + if (bytes > fc->max_write) return true; /* Discontinuity */ - if (folio_next_index(ap->folios[ap->num_folios - 1]) != folio->index) + prev_folio = ap->folios[ap->num_folios - 1]; + prev_desc = ap->descs[ap->num_folios - 1]; + prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length; + if (prev_pos != pos) return true; /* Need to grow the pages array? If so, did the expansion fail? */ - if (ap->num_folios == data->max_folios && !fuse_pages_realloc(data)) + if (ap->num_folios == data->max_folios && + !fuse_pages_realloc(data, fc->max_pages)) return true; return false; } -static int fuse_writepages_fill(struct folio *folio, - struct writeback_control *wbc, void *_data) +static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc, + struct folio *folio, u64 pos, + unsigned len, u64 end_pos) { - struct fuse_fill_wb_data *data = _data; + struct fuse_fill_wb_data *data = wpc->wb_ctx; struct fuse_writepage_args *wpa = data->wpa; struct fuse_args_pages *ap = &wpa->ia.ap; - struct inode *inode = data->inode; + struct inode *inode = wpc->inode; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - int err; + loff_t offset = offset_in_folio(folio, pos); + + WARN_ON_ONCE(!data); if (!data->ff) { - err = -EIO; data->ff = fuse_write_file_get(fi); if (!data->ff) - goto out_unlock; + return -EIO; } - if (wpa && fuse_writepage_need_send(fc, folio, ap, data)) { - fuse_writepages_send(data); + if (wpa && fuse_writepage_need_send(fc, pos, len, ap, data)) { + fuse_writepages_send(inode, data); data->wpa = NULL; - data->nr_pages = 0; + data->nr_bytes = 0; } if (data->wpa == NULL) { - err = -ENOMEM; - wpa = fuse_writepage_args_setup(folio, data->ff); + wpa = fuse_writepage_args_setup(folio, offset, data->ff); if (!wpa) - goto out_unlock; + return -ENOMEM; fuse_file_get(wpa->ia.ff); data->max_folios = 1; ap = &wpa->ia.ap; } - folio_start_writeback(folio); - fuse_writepage_args_page_fill(wpa, folio, ap->num_folios); - data->nr_pages += folio_nr_pages(folio); + iomap_start_folio_write(inode, folio, 1); + fuse_writepage_args_page_fill(wpa, folio, ap->num_folios, + offset, len); + data->nr_bytes += len; - err = 0; ap->num_folios++; if (!data->wpa) data->wpa = wpa; -out_unlock: - folio_unlock(folio); - return err; + return len; } -static int fuse_writepages(struct address_space *mapping, - struct writeback_control *wbc) +static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc, + int error) { - struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_fill_wb_data data; - int err; + struct fuse_fill_wb_data *data = wpc->wb_ctx; - err = -EIO; - if (fuse_is_bad(inode)) - goto out; - - if (wbc->sync_mode == WB_SYNC_NONE && - fc->num_background >= fc->congestion_threshold) - return 0; + WARN_ON_ONCE(!data); - data.inode = inode; - data.wpa = NULL; - data.ff = NULL; - data.nr_pages = 0; - - err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); - if (data.wpa) { - WARN_ON(!data.wpa->ia.ap.num_folios); - fuse_writepages_send(&data); + if (data->wpa) { + WARN_ON(!data->wpa->ia.ap.num_folios); + fuse_writepages_send(wpc->inode, data); } - if (data.ff) - fuse_file_put(data.ff, false); -out: - return err; -} - -/* - * It's worthy to make sure that space is reserved on disk for the write, - * but how to implement it without killing performance need more thinking. - */ -static int fuse_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct folio **foliop, void **fsdata) -{ - pgoff_t index = pos >> PAGE_SHIFT; - struct fuse_conn *fc = get_fuse_conn(file_inode(file)); - struct folio *folio; - loff_t fsize; - int err = -ENOMEM; - - WARN_ON(!fc->writeback_cache); - - folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, - mapping_gfp_mask(mapping)); - if (IS_ERR(folio)) - goto error; - - if (folio_test_uptodate(folio) || len >= folio_size(folio)) - goto success; - /* - * Check if the start of this folio comes after the end of file, - * in which case the readpage can be optimized away. - */ - fsize = i_size_read(mapping->host); - if (fsize <= folio_pos(folio)) { - size_t off = offset_in_folio(folio, pos); - if (off) - folio_zero_segment(folio, 0, off); - goto success; - } - err = fuse_do_readfolio(file, folio); - if (err) - goto cleanup; -success: - *foliop = folio; - return 0; + if (data->ff) + fuse_file_put(data->ff, false); -cleanup: - folio_unlock(folio); - folio_put(folio); -error: - return err; + return error; } -static int fuse_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) -{ - struct inode *inode = folio->mapping->host; - - /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ - if (!copied) - goto unlock; - - pos += copied; - if (!folio_test_uptodate(folio)) { - /* Zero any unwritten bytes at the end of the page */ - size_t endoff = pos & ~PAGE_MASK; - if (endoff) - folio_zero_segment(folio, endoff, PAGE_SIZE); - folio_mark_uptodate(folio); - } +static const struct iomap_writeback_ops fuse_writeback_ops = { + .writeback_range = fuse_iomap_writeback_range, + .writeback_submit = fuse_iomap_writeback_submit, +}; - if (pos > inode->i_size) - i_size_write(inode, pos); +static int fuse_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct inode *inode = mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_fill_wb_data data = {}; + struct iomap_writepage_ctx wpc = { + .inode = inode, + .iomap.type = IOMAP_MAPPED, + .wbc = wbc, + .ops = &fuse_writeback_ops, + .wb_ctx = &data, + }; - folio_mark_dirty(folio); + if (fuse_is_bad(inode)) + return -EIO; -unlock: - folio_unlock(folio); - folio_put(folio); + if (wbc->sync_mode == WB_SYNC_NONE && + fc->num_background >= fc->congestion_threshold) + return 0; - return copied; + return iomap_writepages(&wpc); } static int fuse_launder_folio(struct folio *folio) { int err = 0; + struct fuse_fill_wb_data data = {}; + struct iomap_writepage_ctx wpc = { + .inode = folio->mapping->host, + .iomap.type = IOMAP_MAPPED, + .ops = &fuse_writeback_ops, + .wb_ctx = &data, + }; + if (folio_clear_dirty_for_io(folio)) { - err = fuse_writepage_locked(folio); + err = iomap_writeback_folio(&wpc, folio); + err = fuse_iomap_writeback_submit(&wpc, err); if (!err) folio_wait_writeback(folio); } @@ -3143,12 +3101,13 @@ static const struct address_space_operations fuse_file_aops = { .readahead = fuse_readahead, .writepages = fuse_writepages, .launder_folio = fuse_launder_folio, - .dirty_folio = filemap_dirty_folio, + .dirty_folio = iomap_dirty_folio, + .release_folio = iomap_release_folio, + .invalidate_folio = iomap_invalidate_folio, + .is_partially_uptodate = iomap_is_partially_uptodate, .migrate_folio = filemap_migrate_folio, .bmap = fuse_bmap, .direct_IO = fuse_direct_IO, - .write_begin = fuse_write_begin, - .write_end = fuse_write_end, }; void fuse_init_file_inode(struct inode *inode, unsigned int flags) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index b54f4f57789f..ec248d13c8bf 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -913,12 +913,6 @@ struct fuse_conn { /** Device ID from the root super block */ dev_t dev; - /** Dentries in the control filesystem */ - struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES]; - - /** number of dentries used in the above array */ - int ctl_ndents; - /** Key for lock owner ID scrambling */ u32 scramble_key[4]; @@ -1109,7 +1103,6 @@ static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket) extern const struct file_operations fuse_dev_operations; extern const struct dentry_operations fuse_dentry_operations; -extern const struct dentry_operations fuse_root_dentry_operations; /** * Get a filled in inode @@ -1486,9 +1479,9 @@ void fuse_dax_cancel_work(struct fuse_conn *fc); long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); /* iomode.c */ int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 9572bdef49ee..ecb869e895ab 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1719,7 +1719,7 @@ static int fuse_fill_super_submount(struct super_block *sb, fi = get_fuse_inode(root); fi->nlookup--; - sb->s_d_op = &fuse_dentry_operations; + set_default_d_op(sb, &fuse_dentry_operations); sb->s_root = d_make_root(root); if (!sb->s_root) return -ENOMEM; @@ -1854,12 +1854,10 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) err = -ENOMEM; root = fuse_get_root_inode(sb, ctx->rootmode); - sb->s_d_op = &fuse_root_dentry_operations; + set_default_d_op(sb, &fuse_dentry_operations); root_dentry = d_make_root(root); if (!root_dentry) goto err_dev_free; - /* Root dentry doesn't have .d_revalidate */ - sb->s_d_op = &fuse_dentry_operations; mutex_lock(&fuse_mutex); err = -EINVAL; diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c index 2d9abf48828f..57032eadca6c 100644 --- a/fs/fuse/ioctl.c +++ b/fs/fuse/ioctl.c @@ -502,7 +502,7 @@ static void fuse_priv_ioctl_cleanup(struct inode *inode, struct fuse_file *ff) fuse_file_release(inode, ff, O_RDONLY, NULL, S_ISDIR(inode->i_mode)); } -int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct fuse_file *ff; @@ -536,11 +536,13 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa) cleanup: fuse_priv_ioctl_cleanup(inode, ff); + if (err == -ENOTTY) + err = -EOPNOTSUPP; return err; } int fuse_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct fuse_file *ff; @@ -572,5 +574,7 @@ int fuse_fileattr_set(struct mnt_idmap *idmap, cleanup: fuse_priv_ioctl_cleanup(inode, ff); + if (err == -ENOTTY) + err = -EOPNOTSUPP; return err; } diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 14f204cd5a82..47d74afd63ac 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -159,7 +159,11 @@ static int gfs2_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); - struct iomap_writepage_ctx wpc = { }; + struct iomap_writepage_ctx wpc = { + .inode = mapping->host, + .wbc = wbc, + .ops = &gfs2_writeback_ops, + }; int ret; /* @@ -168,7 +172,7 @@ static int gfs2_writepages(struct address_space *mapping, * want balance_dirty_pages() to loop indefinitely trying to write out * pages held in the ail that it can't find. */ - ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); + ret = iomap_writepages(&wpc); if (ret == 0 && wbc->nr_to_write > 0) set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); return ret; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 7703d0471139..131091520de6 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -963,12 +963,16 @@ static struct folio * gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len) { struct inode *inode = iter->inode; + struct gfs2_inode *ip = GFS2_I(inode); unsigned int blockmask = i_blocksize(inode) - 1; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned int blocks; struct folio *folio; int status; + if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip)) + return iomap_get_folio(iter, pos, len); + blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits; status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); if (status) @@ -987,7 +991,7 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); - if (!gfs2_is_stuffed(ip)) + if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip)) gfs2_trans_add_databufs(ip->i_gl, folio, offset_in_folio(folio, pos), copied); @@ -995,13 +999,14 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, folio_unlock(folio); folio_put(folio); - if (tr->tr_num_buf_new) - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - - gfs2_trans_end(sdp); + if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) { + if (tr->tr_num_buf_new) + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + gfs2_trans_end(sdp); + } } -static const struct iomap_folio_ops gfs2_iomap_folio_ops = { +const struct iomap_write_ops gfs2_iomap_write_ops = { .get_folio = gfs2_iomap_get_folio, .put_folio = gfs2_iomap_put_folio, }; @@ -1078,8 +1083,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, gfs2_trans_end(sdp); } - if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip)) - iomap->folio_ops = &gfs2_iomap_folio_ops; return 0; out_trans_end: @@ -1304,7 +1307,7 @@ static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length return 0; length = min(length, inode->i_size - from); return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops, - NULL); + &gfs2_iomap_write_ops, NULL); } #define GFS2_JTRUNC_REVOKES 8192 @@ -2469,23 +2472,26 @@ out: return error; } -static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode, - loff_t offset, unsigned int len) +static ssize_t gfs2_writeback_range(struct iomap_writepage_ctx *wpc, + struct folio *folio, u64 offset, unsigned int len, u64 end_pos) { - int ret; - - if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode)))) + if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(wpc->inode)))) return -EIO; - if (offset >= wpc->iomap.offset && - offset < wpc->iomap.offset + wpc->iomap.length) - return 0; + if (offset < wpc->iomap.offset || + offset >= wpc->iomap.offset + wpc->iomap.length) { + int ret; - memset(&wpc->iomap, 0, sizeof(wpc->iomap)); - ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap); - return ret; + memset(&wpc->iomap, 0, sizeof(wpc->iomap)); + ret = gfs2_iomap_get(wpc->inode, offset, INT_MAX, &wpc->iomap); + if (ret) + return ret; + } + + return iomap_add_to_ioend(wpc, folio, offset, end_pos, len); } const struct iomap_writeback_ops gfs2_writeback_ops = { - .map_blocks = gfs2_map_blocks, + .writeback_range = gfs2_writeback_range, + .writeback_submit = iomap_ioend_writeback_submit, }; diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index 4e8b1e8ebdf3..6cdc72dd55a3 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h @@ -44,6 +44,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, } extern const struct iomap_ops gfs2_iomap_ops; +extern const struct iomap_write_ops gfs2_iomap_write_ops; extern const struct iomap_writeback_ops gfs2_writeback_ops; int gfs2_unstuff_dinode(struct gfs2_inode *ip); diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index dbf1aede744c..509e2f0d97e7 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -60,6 +60,7 @@ #include <linux/crc32.h> #include <linux/vmalloc.h> #include <linux/bio.h> +#include <linux/log2.h> #include "gfs2.h" #include "incore.h" @@ -912,7 +913,6 @@ static int dir_make_exhash(struct inode *inode) struct qstr args; struct buffer_head *bh, *dibh; struct gfs2_leaf *leaf; - int y; u32 x; __be64 *lp; u64 bn; @@ -979,9 +979,7 @@ static int dir_make_exhash(struct inode *inode) i_size_write(inode, sdp->sd_sb.sb_bsize / 2); gfs2_add_inode_blocks(&dip->i_inode, 1); dip->i_diskflags |= GFS2_DIF_EXHASH; - - for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; - dip->i_depth = y; + dip->i_depth = ilog2(sdp->sd_hash_ptrs); gfs2_dinode_out(dip, dibh->b_data); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index fd1147aa3891..72d95185a39f 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -155,7 +155,7 @@ static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags) return fsflags; } -int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); @@ -276,7 +276,7 @@ out: } int gfs2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); u32 fsflags = fa->flags, gfsflags = 0; @@ -1058,7 +1058,8 @@ retry: } pagefault_disable(); - ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL); + ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, + &gfs2_iomap_write_ops, NULL); pagefault_enable(); if (ret > 0) written += ret; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index ba25b884169e..b6fd1cb17de7 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -590,35 +590,31 @@ static void gfs2_demote_wake(struct gfs2_glock *gl) static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) { const struct gfs2_glock_operations *glops = gl->gl_ops; - struct gfs2_holder *gh; - unsigned state = ret & LM_OUT_ST_MASK; - trace_gfs2_glock_state_change(gl, state); - state_change(gl, state); - gh = find_first_waiter(gl); + if (!(ret & ~LM_OUT_ST_MASK)) { + unsigned state = ret & LM_OUT_ST_MASK; + + trace_gfs2_glock_state_change(gl, state); + state_change(gl, state); + } + /* Demote to UN request arrived during demote to SH or DF */ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && - state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) + gl->gl_state != LM_ST_UNLOCKED && + gl->gl_demote_state == LM_ST_UNLOCKED) gl->gl_target = LM_ST_UNLOCKED; /* Check for state != intended state */ - if (unlikely(state != gl->gl_target)) { - if (gh && (ret & LM_OUT_CANCELED)) - gfs2_holder_wake(gh); + if (unlikely(gl->gl_state != gl->gl_target)) { + struct gfs2_holder *gh = find_first_waiter(gl); + if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { if (ret & LM_OUT_CANCELED) { list_del_init(&gh->gh_list); trace_gfs2_glock_queue(gh, 0); + gfs2_holder_wake(gh); gl->gl_target = gl->gl_state; - gh = find_first_waiter(gl); - if (gh) { - gl->gl_target = gh->gh_state; - if (do_promote(gl)) - goto out; - do_xmote(gl, gh, gl->gl_target); - return; - } goto out; } /* Some error or failed "try lock" - report it */ @@ -629,7 +625,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) goto out; } } - switch(state) { + switch(gl->gl_state) { /* Unlocked due to conversion deadlock, try again */ case LM_ST_UNLOCKED: do_xmote(gl, gh, gl->gl_target); @@ -640,8 +636,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) do_xmote(gl, gh, LM_ST_UNLOCKED); break; default: /* Everything else */ - fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", - gl->gl_target, state); + fs_err(gl->gl_name.ln_sbd, + "glock %u:%llu requested=%u ret=%u\n", + gl->gl_name.ln_type, gl->gl_name.ln_number, + gl->gl_req, ret); GLOCK_BUG_ON(gl, 1); } return; @@ -650,7 +648,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) /* Fast path - we got what we asked for */ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) gfs2_demote_wake(gl); - if (state != LM_ST_UNLOCKED) { + if (gl->gl_state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { int rv; @@ -802,7 +800,8 @@ skip_inval: * We skip telling dlm to do the locking, so we won't get a * reply that would otherwise clear GLF_LOCK. So we clear it here. */ - clear_bit(GLF_LOCK, &gl->gl_flags); + if (!test_bit(GLF_CANCELING, &gl->gl_flags)) + clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); return; diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index c171f745650f..9339a3bff6ee 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -92,12 +92,22 @@ enum { * LM_OUT_ST_MASK * Masks the lower two bits of lock state in the returned value. * + * LM_OUT_TRY_AGAIN + * The trylock request failed. + * + * LM_OUT_DEADLOCK + * The lock request failed because it would deadlock. + * * LM_OUT_CANCELED * The lock request was canceled. * + * LM_OUT_ERROR + * The lock request timed out or failed. */ #define LM_OUT_ST_MASK 0x00000003 +#define LM_OUT_TRY_AGAIN 0x00000020 +#define LM_OUT_DEADLOCK 0x00000010 #define LM_OUT_CANCELED 0x00000008 #define LM_OUT_ERROR 0x00000004 diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index cebd66b22694..fe0faad4892f 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -11,6 +11,7 @@ #include <linux/bio.h> #include <linux/posix_acl.h> #include <linux/security.h> +#include <linux/log2.h> #include "gfs2.h" #include "incore.h" @@ -450,6 +451,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) gfs2_consist_inode(ip); return -EIO; } + if ((ip->i_diskflags & GFS2_DIF_EXHASH) && + depth < ilog2(sdp->sd_hash_ptrs)) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 0a41c4e76b32..d4ad82f47eee 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -375,7 +375,6 @@ struct gfs2_glock { enum { GIF_QD_LOCKED = 1, - GIF_ALLOC_FAILED = 2, GIF_SW_PAGED = 3, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 187d789a8f1e..8760e7e20c9d 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -444,11 +444,9 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip) struct inode *inode = &ip->i_inode; struct gfs2_glock *gl = ip->i_gl; - if (unlikely(!gl)) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + /* This can only happen during incomplete inode creation. */ + if (unlikely(!gl)) return; - } truncate_inode_pages(gfs2_glock2aspace(gl), 0); truncate_inode_pages(&inode->i_data, 0); @@ -902,7 +900,6 @@ fail_gunlock3: fail_gunlock2: gfs2_glock_put(io_gl); fail_dealloc_inode: - set_bit(GIF_ALLOC_FAILED, &ip->i_flags); dealloc_error = 0; if (ip->i_eattr) dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized); diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index eafe123617e6..e43f08eb26e7 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -44,17 +44,17 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip) static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks) { - inode->i_blocks = blocks << (inode->i_blkbits - 9); + inode->i_blocks = blocks << (inode->i_blkbits - SECTOR_SHIFT); } static inline u64 gfs2_get_inode_blocks(const struct inode *inode) { - return inode->i_blocks >> (inode->i_blkbits - 9); + return inode->i_blocks >> (inode->i_blkbits - SECTOR_SHIFT); } static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change) { - change <<= inode->i_blkbits - 9; + change <<= inode->i_blkbits - SECTOR_SHIFT; gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change)); inode->i_blocks += change; } @@ -107,9 +107,9 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset); extern const struct file_operations gfs2_file_fops_nolock; extern const struct file_operations gfs2_dir_fops_nolock; -int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int gfs2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); void gfs2_set_inode_flags(struct inode *inode); #ifdef CONFIG_GFS2_FS_LOCKING_DLM diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 7cb9d216d8bb..cee5d199d2d8 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -119,7 +119,7 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl) static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; - unsigned ret = gl->gl_state; + unsigned ret; /* If the glock is dead, we only react to a dlm_unlock() reply. */ if (__lockref_is_dead(&gl->gl_lockref) && @@ -139,13 +139,16 @@ static void gdlm_ast(void *arg) gfs2_glock_free(gl); return; case -DLM_ECANCEL: /* Cancel while getting lock */ - ret |= LM_OUT_CANCELED; + ret = LM_OUT_CANCELED; goto out; case -EAGAIN: /* Try lock fails */ + ret = LM_OUT_TRY_AGAIN; + goto out; case -EDEADLK: /* Deadlock detected */ + ret = LM_OUT_DEADLOCK; goto out; case -ETIMEDOUT: /* Canceled due to timeout */ - ret |= LM_OUT_ERROR; + ret = LM_OUT_ERROR; goto out; case 0: /* Success */ break; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 9dc8885c95d0..7fb11ff71b5a 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -103,6 +103,7 @@ const struct address_space_operations gfs2_meta_aops = { .invalidate_folio = block_invalidate_folio, .writepages = gfs2_aspace_writepages, .release_folio = gfs2_release_folio, + .migrate_folio = buffer_migrate_folio_norefs, }; const struct address_space_operations gfs2_rgrp_aops = { @@ -110,6 +111,7 @@ const struct address_space_operations gfs2_rgrp_aops = { .invalidate_folio = block_invalidate_folio, .writepages = gfs2_aspace_writepages, .release_folio = gfs2_release_folio, + .migrate_folio = buffer_migrate_folio_norefs, }; /** @@ -228,7 +230,7 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) struct bio *bio; bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO); - bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> SECTOR_SHIFT); while (num > 0) { bh = *bhs; if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) { @@ -443,11 +445,9 @@ void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) struct buffer_head *bh; int ty; - if (!ip->i_gl) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + /* This can only happen during incomplete inode creation. */ + if (!ip->i_gl) return; - } gfs2_ail1_wipe(sdp, bstart, blen); while (blen) { diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 85c491fcf1a3..efe99b732551 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -163,7 +163,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) return -EINVAL; } - if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || + if (sb->sb_bsize < SECTOR_SIZE || sb->sb_bsize > PAGE_SIZE || (sb->sb_bsize & (sb->sb_bsize - 1))) { pr_warn("Invalid block size\n"); return -EINVAL; @@ -224,8 +224,8 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) if (unlikely(!sb)) return -ENOMEM; err = bdev_rw_virt(sdp->sd_vfs->s_bdev, - sector * (sdp->sd_vfs->s_blocksize >> 9), sb, PAGE_SIZE, - REQ_OP_READ | REQ_META); + sector << (sdp->sd_vfs->s_blocksize_bits - SECTOR_SHIFT), + sb, PAGE_SIZE, REQ_OP_READ | REQ_META); if (err) { pr_warn("error %d reading superblock\n", err); kfree(sb); @@ -257,7 +257,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) return error; } - sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9; + sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); sdp->sd_diptrs = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) / sizeof(u64); @@ -1145,7 +1145,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; - sb->s_d_op = &gfs2_dops; + set_default_d_op(sb, &gfs2_dops); sb->s_export_op = &gfs2_export_ops; sb->s_qcop = &gfs2_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; @@ -1155,12 +1155,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) /* Set up the buffer cache and fill in some fake block size values to allow us to read-in the on-disk superblock. */ - sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512); + sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, SECTOR_SIZE); error = -EINVAL; if (!sdp->sd_sb.sb_bsize) goto fail_free; sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; - sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9; + sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 7c518c4ff638..b42e2110084b 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -487,11 +487,9 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) int need_endtrans = 0; int ret; - if (unlikely(!ip->i_gl)) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + /* This can only happen during incomplete inode creation. */ + if (unlikely(!ip->i_gl)) return; - } if (gfs2_withdrawing_or_withdrawn(sdp)) return; diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index d5a1e63fa257..24864a66074b 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -232,32 +232,23 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) */ ret = gfs2_glock_nq(&sdp->sd_live_gh); + gfs2_glock_put(live_gl); /* drop extra reference we acquired */ + clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); + /* * If we actually got the "live" lock in EX mode, there are no other - * nodes available to replay our journal. So we try to replay it - * ourselves. We hold the "live" glock to prevent other mounters - * during recovery, then just dequeue it and reacquire it in our - * normal SH mode. Just in case the problem that caused us to - * withdraw prevents us from recovering our journal (e.g. io errors - * and such) we still check if the journal is clean before proceeding - * but we may wait forever until another mounter does the recovery. + * nodes available to replay our journal. */ if (ret == 0) { - fs_warn(sdp, "No other mounters found. Trying to recover our " - "own journal jid %d.\n", sdp->sd_lockstruct.ls_jid); - if (gfs2_recover_journal(sdp->sd_jdesc, 1)) - fs_warn(sdp, "Unable to recover our journal jid %d.\n", - sdp->sd_lockstruct.ls_jid); - gfs2_glock_dq_wait(&sdp->sd_live_gh); - gfs2_holder_reinit(LM_ST_SHARED, - LM_FLAG_NOEXP | GL_EXACT | GL_NOPID, - &sdp->sd_live_gh); - gfs2_glock_nq(&sdp->sd_live_gh); + fs_warn(sdp, "No other mounters found.\n"); + /* + * We are about to release the lockspace. By keeping live_gl + * locked here, we ensure that the next mounter coming along + * will be a "first" mounter which will perform recovery. + */ + goto skip_recovery; } - gfs2_glock_put(live_gl); /* drop extra reference we acquired */ - clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); - /* * At this point our journal is evicted, so we need to get a new inode * for it. Once done, we need to call gfs2_find_jhead which diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index ef9498a6e88a..34e9804e0f36 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -16,6 +16,9 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr; + if (!tree || !fd) + return -EINVAL; + fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index cb823a8a6ba9..e8cd1a31f247 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,6 +15,48 @@ #include "btree.h" +static inline +bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +{ + bool is_valid = off < node->tree->node_size; + + if (!is_valid) { + pr_err("requested invalid offset: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d\n", + node->this, node->type, node->height, + node->tree->node_size, off); + } + + return is_valid; +} + +static inline +int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +{ + unsigned int node_size; + + if (!is_bnode_offset_valid(node, off)) + return 0; + + node_size = node->tree->node_size; + + if ((off + len) > node_size) { + int new_len = (int)node_size - off; + + pr_err("requested length has been corrected: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, " + "requested_len %d, corrected_len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len, new_len); + + return new_len; + } + + return len; +} + void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; @@ -22,6 +64,20 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) int bytes_read; int bytes_to_read; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagenum = off >> PAGE_SHIFT; off &= ~PAGE_MASK; /* compute page offset for the first page */ @@ -80,6 +136,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; page = node->page[0]; @@ -104,6 +174,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) { struct page *page; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; page = node->page[0]; @@ -119,6 +203,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(src_node, src, len); + len = check_and_correct_requested_length(dst_node, dst, len); + src += src_node->page_offset; dst += dst_node->page_offset; src_page = src_node->page[0]; @@ -136,6 +224,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(node, src, len); + len = check_and_correct_requested_length(node, dst, len); + src += node->page_offset; dst += node->page_offset; page = node->page[0]; @@ -482,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node) if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_unhash(node); spin_unlock(&tree->hash_lock); + hfs_bnode_clear(node, 0, tree->node_size); hfs_bmap_free(node); hfs_bnode_free(node); return; diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 2fa4b1f8cc7f..e86e1e235658 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke struct hfs_btree *tree; struct hfs_btree_header_rec *head; struct address_space *mapping; - struct page *page; + struct folio *folio; + struct buffer_head *bh; unsigned int size; + u16 dblock; + sector_t start_block; + loff_t offset; tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) @@ -75,12 +79,40 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke unlock_new_inode(tree->inode); mapping = tree->inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) + folio = filemap_grab_folio(mapping, 0); + if (IS_ERR(folio)) goto free_inode; + folio_zero_range(folio, 0, folio_size(folio)); + + dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0); + start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div); + + size = folio_size(folio); + offset = 0; + while (size > 0) { + size_t len; + + bh = sb_bread(sb, start_block); + if (!bh) { + pr_err("unable to read tree header\n"); + goto put_folio; + } + + len = min_t(size_t, folio_size(folio), sb->s_blocksize); + memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize); + + brelse(bh); + + start_block++; + offset += len; + size -= len; + } + + folio_mark_uptodate(folio); + /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); @@ -95,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke size = tree->node_size; if (!is_power_of_2(size)) - goto fail_page; + goto fail_folio; if (!tree->node_count) - goto fail_page; + goto fail_folio; switch (id) { case HFS_EXT_CNID: if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { pr_err("invalid extent max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; case HFS_CAT_CNID: if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { pr_err("invalid catalog max_key_len %d\n", tree->max_key_len); - goto fail_page; + goto fail_folio; } break; default: @@ -121,12 +153,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; kunmap_local(head); - put_page(page); + folio_unlock(folio); + folio_put(folio); return tree; -fail_page: +fail_folio: kunmap_local(head); - put_page(page); +put_folio: + folio_unlock(folio); + folio_put(folio); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c index 4a0ce131e233..580c62981dbd 100644 --- a/fs/hfs/extent.c +++ b/fs/hfs/extent.c @@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2) * * Find a block within an extent record */ -static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) +u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) { int i; u16 count; diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index a0c7cb0f79fc..7c5a7ecfa246 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -190,6 +190,7 @@ extern const struct inode_operations hfs_dir_inode_operations; /* extent.c */ extern int hfs_ext_keycmp(const btree_key *, const btree_key *); +extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off); extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int); extern int hfs_ext_write_extent(struct inode *); extern int hfs_extend_file(struct inode *); @@ -201,7 +202,7 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern const struct address_space_operations hfs_aops; extern const struct address_space_operations hfs_btree_aops; -int hfs_write_begin(struct file *file, struct address_space *mapping, +int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata); extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t); extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index a81ce7a740b9..bf4cb7e78396 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -44,12 +44,12 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to) } } -int hfs_write_begin(struct file *file, struct address_space *mapping, +int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { int ret; - ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, + ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, hfs_get_block, &HFS_I(mapping->host)->phys_size); if (unlikely(ret)) @@ -690,8 +690,9 @@ static const struct file_operations hfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .splice_read = filemap_splice_read, + .splice_write = iter_file_splice_write, .fsync = hfs_file_fsync, .open = hfs_file_open, .release = hfs_file_release, diff --git a/fs/hfs/super.c b/fs/hfs/super.c index fe09c2093a93..388a318297ec 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -365,7 +365,7 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc) if (!root_inode) goto bail_no_root; - sb->s_d_op = &hfs_dentry_operations; + set_default_d_op(sb, &hfs_dentry_operations); res = -ENOMEM; sb->s_root = d_make_root(root_inode); if (!sb->s_root) diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 079ea80534f7..14f4995588ff 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -18,12 +18,68 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" +static inline +bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +{ + bool is_valid = off < node->tree->node_size; + + if (!is_valid) { + pr_err("requested invalid offset: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d\n", + node->this, node->type, node->height, + node->tree->node_size, off); + } + + return is_valid; +} + +static inline +int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +{ + unsigned int node_size; + + if (!is_bnode_offset_valid(node, off)) + return 0; + + node_size = node->tree->node_size; + + if ((off + len) > node_size) { + int new_len = (int)node_size - off; + + pr_err("requested length has been corrected: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, " + "requested_len %d, corrected_len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len, new_len); + + return new_len; + } + + return len; +} + /* Copy a specified range of bytes from the raw data of a node */ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page **pagep; int l; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -81,6 +137,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) struct page **pagep; int l; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -109,6 +179,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) struct page **pagep; int l; + if (!is_bnode_offset_valid(node, off)) + return; + + if (len == 0) { + pr_err("requested zero length: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len); + return; + } + + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); off &= ~PAGE_MASK; @@ -133,6 +217,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(src_node, src, len); + len = check_and_correct_requested_length(dst_node, dst, len); + src += src_node->page_offset; dst += dst_node->page_offset; src_page = src_node->page + (src >> PAGE_SHIFT); @@ -187,6 +275,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); if (!len) return; + + len = check_and_correct_requested_length(node, src, len); + len = check_and_correct_requested_length(node, dst, len); + src += node->page_offset; dst += node->page_offset; if (dst > src) { diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index a6d61685ae79..b1699b3c246a 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -342,9 +342,6 @@ static int hfsplus_free_extents(struct super_block *sb, int i; int err = 0; - /* Mapping the allocation file may lock the extent tree */ - WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock)); - hfsplus_dump_extent(extent); for (i = 0; i < 8; extent++, i++) { count = be32_to_cpu(extent->block_count); diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 2f089bff0095..96a5c24813dd 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -473,8 +473,10 @@ extern const struct address_space_operations hfsplus_aops; extern const struct address_space_operations hfsplus_btree_aops; extern const struct dentry_operations hfsplus_dentry_operations; -int hfsplus_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct folio **foliop, void **fsdata); +int hfsplus_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, struct folio **foliop, + void **fsdata); struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, umode_t mode); void hfsplus_delete_inode(struct inode *inode); @@ -489,9 +491,9 @@ int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path, unsigned int query_flags); int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); -int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int hfsplus_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); /* ioctl.c */ long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index f331e9574217..b51a411ecd23 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -38,12 +38,14 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to) } } -int hfsplus_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct folio **foliop, void **fsdata) +int hfsplus_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned len, struct folio **foliop, + void **fsdata) { int ret; - ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, + ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, hfsplus_get_block, &HFSPLUS_I(mapping->host)->phys_size); if (unlikely(ret)) @@ -366,8 +368,9 @@ static const struct file_operations hfsplus_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .splice_read = filemap_splice_read, + .splice_write = iter_file_splice_write, .fsync = hfsplus_file_fsync, .open = hfsplus_file_open, .release = hfsplus_file_release, @@ -654,7 +657,7 @@ out: return res; } -int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); @@ -673,7 +676,7 @@ int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int hfsplus_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 948b8aaee33e..86351bdc8985 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -222,8 +222,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) error2 = hfsplus_submit_bio(sb, sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, - sbi->s_vhdr_buf, NULL, REQ_OP_WRITE | - REQ_SYNC); + sbi->s_vhdr_buf, NULL, REQ_OP_WRITE); if (!error) error = error2; if (!write_backup) @@ -231,8 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) error2 = hfsplus_submit_bio(sb, sbi->part_start + sbi->sect_count - 2, - sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE | - REQ_SYNC); + sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE); if (!error) error2 = error; out: @@ -508,7 +506,7 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc) goto out_put_alloc_file; } - sb->s_d_op = &hfsplus_dentry_operations; + set_default_d_op(sb, &hfsplus_dentry_operations); sb->s_root = d_make_root(root); if (!sb->s_root) { err = -ENOMEM; diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 73342c925a4b..36b6cf2a3abb 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -132,7 +132,14 @@ int hfsplus_uni2asc(struct super_block *sb, op = astr; ip = ustr->unicode; + ustrlen = be16_to_cpu(ustr->length); + if (ustrlen > HFSPLUS_MAX_STRLEN) { + ustrlen = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(ustr->length), ustrlen); + } + len = *len_p; ce1 = NULL; compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index 9a1a93e3888b..18dc3d254d21 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -172,7 +172,11 @@ check_attr_tree_state_again: return PTR_ERR(attr_file); } - BUG_ON(i_size_read(attr_file) != 0); + if (i_size_read(attr_file) != 0) { + err = -EIO; + pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n"); + goto end_attr_file_creation; + } hip = HFSPLUS_I(attr_file); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 702c41317589..01e516175bcd 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -382,7 +382,7 @@ static const struct file_operations hostfs_file_fops = { .splice_write = iter_file_splice_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .open = hostfs_open, .release = hostfs_file_release, .fsync = hostfs_fsync, @@ -445,7 +445,8 @@ static int hostfs_read_folio(struct file *file, struct folio *folio) return ret; } -static int hostfs_write_begin(struct file *file, struct address_space *mapping, +static int hostfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -458,7 +459,8 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping, return 0; } -static int hostfs_write_end(struct file *file, struct address_space *mapping, +static int hostfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { @@ -468,7 +470,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, int err; buffer = kmap_local_folio(folio, from); - err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied); + err = write_file(FILE_HOSTFS_I(iocb->ki_filp)->fd, &pos, buffer, copied); kunmap_local(buffer); if (!folio_test_uptodate(folio) && err == folio_size(folio)) @@ -933,7 +935,7 @@ static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; - sb->s_d_op = &simple_dentry_operations; + sb->s_d_flags = DCACHE_DONTCACHE; sb->s_maxbytes = MAX_LFS_FILESIZE; err = super_setup_bdi(sb); if (err) diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 449a3fc1b8d9..263b5bbe1849 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -188,13 +188,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to) hpfs_unlock(inode->i_sb); } -static int hpfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int hpfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; - ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata, + ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata, hpfs_get_block, &hpfs_i(mapping->host)->mmu_private); if (unlikely(ret)) @@ -203,13 +204,14 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int hpfs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int hpfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; int err; - err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (err < len) hpfs_write_failed(mapping, pos + len); if (!(err < 0)) { @@ -255,7 +257,7 @@ const struct file_operations hpfs_file_ops = .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .release = hpfs_file_release, .fsync = hpfs_file_fsync, .splice_read = filemap_splice_read, diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 27567920abe4..42b779b4d87f 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -554,7 +554,7 @@ static int hpfs_fill_super(struct super_block *s, struct fs_context *fc) /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; - s->s_d_op = &hpfs_dentry_operations; + set_default_d_op(s, &hpfs_dentry_operations); s->s_time_min = local_to_gmt(s, 0); s->s_time_max = local_to_gmt(s, U32_MAX); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index e4de5425838d..9ddd67da0eeb 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -311,7 +311,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) return retval; } -static int hugetlbfs_write_begin(struct file *file, +static int hugetlbfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) @@ -319,9 +319,10 @@ static int hugetlbfs_write_begin(struct file *file, return -EINVAL; } -static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int hugetlbfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { BUG(); return -EINVAL; @@ -1433,6 +1434,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize_bits = huge_page_shift(ctx->hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; + sb->s_d_flags = DCACHE_DONTCACHE; sb->s_time_gran = 1; /* @@ -1587,7 +1589,7 @@ static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) } else { struct hugetlbfs_fs_context *ctx = fc->fs_private; ctx->hstate = h; - mnt = fc_mount(fc); + mnt = fc_mount_longterm(fc); put_fs_context(fc); } if (IS_ERR(mnt)) diff --git a/fs/inode.c b/fs/inode.c index 99318b157a9a..01ebdc40021e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -865,12 +865,12 @@ static void dispose_list(struct list_head *head) */ void evict_inodes(struct super_block *sb) { - struct inode *inode, *next; + struct inode *inode; LIST_HEAD(dispose); again: spin_lock(&sb->s_inode_list_lock); - list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { if (atomic_read(&inode->i_count)) continue; @@ -1158,9 +1158,8 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) /* Set new key only if filesystem hasn't already changed it */ if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { /* - * ensure nobody is actually holding i_mutex + * ensure nobody is actually holding i_rwsem */ - // mutex_destroy(&inode->i_mutex); init_rwsem(&inode->i_rwsem); lockdep_set_class(&inode->i_rwsem, &type->i_mutex_dir_key); @@ -2615,7 +2614,7 @@ EXPORT_SYMBOL(inode_dio_finished); * proceed with a truncate or equivalent operation. * * Must be called under a lock that serializes taking new references - * to i_dio_count, usually by inode->i_mutex. + * to i_dio_count, usually by inode->i_rwsem. */ void inode_dio_wait(struct inode *inode) { @@ -2633,7 +2632,7 @@ EXPORT_SYMBOL(inode_dio_wait_interruptible); /* * inode_set_flags - atomically set some inode flags * - * Note: the caller should be holding i_mutex, or else be sure that + * Note: the caller should be holding i_rwsem exclusively, or else be sure that * they have exclusive access to the inode structure (i.e., while the * inode is being instantiated). The reason for the cmpxchg() loop * --- which wouldn't be necessary if all code paths which modify @@ -2641,7 +2640,7 @@ EXPORT_SYMBOL(inode_dio_wait_interruptible); * code path which doesn't today so we use cmpxchg() out of an abundance * of caution. * - * In the long run, i_mutex is overkill, and we should probably look + * In the long run, i_rwsem is overkill, and we should probably look * at using the i_lock spinlock to protect i_flags, and then make sure * it is so documented in include/linux/fs.h and that all code follows * the locking convention!! diff --git a/fs/internal.h b/fs/internal.h index 393f6c5c24f6..38e8aab27bbd 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -101,6 +101,7 @@ extern void chroot_fs_refs(const struct path *, const struct path *); struct file *alloc_empty_file(int flags, const struct cred *cred); struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred); struct file *alloc_empty_backing_file(int flags, const struct cred *cred); +void backing_file_set_user_path(struct file *f, const struct path *path); static inline void file_put_write_access(struct file *file) { @@ -322,12 +323,15 @@ struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns); struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap); void mnt_idmap_put(struct mnt_idmap *idmap); struct stashed_operations { + struct dentry *(*stash_dentry)(struct dentry **stashed, + struct dentry *dentry); void (*put_data)(void *data); int (*init_inode)(struct inode *inode, void *data); }; int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data, struct path *path); void stashed_dentry_prune(struct dentry *dentry); +struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry); struct dentry *stashed_dentry_get(struct dentry **stashed); /** * path_mounted - check whether path is mounted @@ -350,3 +354,4 @@ int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path, unsigned int query_flags); int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); +void pidfs_get_root(struct path *path); diff --git a/fs/ioctl.c b/fs/ioctl.c index 69107a245b4c..0248cb8db2d3 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -453,315 +453,6 @@ out: return ret; } -/** - * fileattr_fill_xflags - initialize fileattr with xflags - * @fa: fileattr pointer - * @xflags: FS_XFLAG_* flags - * - * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All - * other fields are zeroed. - */ -void fileattr_fill_xflags(struct fileattr *fa, u32 xflags) -{ - memset(fa, 0, sizeof(*fa)); - fa->fsx_valid = true; - fa->fsx_xflags = xflags; - if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE) - fa->flags |= FS_IMMUTABLE_FL; - if (fa->fsx_xflags & FS_XFLAG_APPEND) - fa->flags |= FS_APPEND_FL; - if (fa->fsx_xflags & FS_XFLAG_SYNC) - fa->flags |= FS_SYNC_FL; - if (fa->fsx_xflags & FS_XFLAG_NOATIME) - fa->flags |= FS_NOATIME_FL; - if (fa->fsx_xflags & FS_XFLAG_NODUMP) - fa->flags |= FS_NODUMP_FL; - if (fa->fsx_xflags & FS_XFLAG_DAX) - fa->flags |= FS_DAX_FL; - if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) - fa->flags |= FS_PROJINHERIT_FL; -} -EXPORT_SYMBOL(fileattr_fill_xflags); - -/** - * fileattr_fill_flags - initialize fileattr with flags - * @fa: fileattr pointer - * @flags: FS_*_FL flags - * - * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags). - * All other fields are zeroed. - */ -void fileattr_fill_flags(struct fileattr *fa, u32 flags) -{ - memset(fa, 0, sizeof(*fa)); - fa->flags_valid = true; - fa->flags = flags; - if (fa->flags & FS_SYNC_FL) - fa->fsx_xflags |= FS_XFLAG_SYNC; - if (fa->flags & FS_IMMUTABLE_FL) - fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; - if (fa->flags & FS_APPEND_FL) - fa->fsx_xflags |= FS_XFLAG_APPEND; - if (fa->flags & FS_NODUMP_FL) - fa->fsx_xflags |= FS_XFLAG_NODUMP; - if (fa->flags & FS_NOATIME_FL) - fa->fsx_xflags |= FS_XFLAG_NOATIME; - if (fa->flags & FS_DAX_FL) - fa->fsx_xflags |= FS_XFLAG_DAX; - if (fa->flags & FS_PROJINHERIT_FL) - fa->fsx_xflags |= FS_XFLAG_PROJINHERIT; -} -EXPORT_SYMBOL(fileattr_fill_flags); - -/** - * vfs_fileattr_get - retrieve miscellaneous file attributes - * @dentry: the object to retrieve from - * @fa: fileattr pointer - * - * Call i_op->fileattr_get() callback, if exists. - * - * Return: 0 on success, or a negative error on failure. - */ -int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) -{ - struct inode *inode = d_inode(dentry); - - if (!inode->i_op->fileattr_get) - return -ENOIOCTLCMD; - - return inode->i_op->fileattr_get(dentry, fa); -} -EXPORT_SYMBOL(vfs_fileattr_get); - -/** - * copy_fsxattr_to_user - copy fsxattr to userspace. - * @fa: fileattr pointer - * @ufa: fsxattr user pointer - * - * Return: 0 on success, or -EFAULT on failure. - */ -int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa) -{ - struct fsxattr xfa; - - memset(&xfa, 0, sizeof(xfa)); - xfa.fsx_xflags = fa->fsx_xflags; - xfa.fsx_extsize = fa->fsx_extsize; - xfa.fsx_nextents = fa->fsx_nextents; - xfa.fsx_projid = fa->fsx_projid; - xfa.fsx_cowextsize = fa->fsx_cowextsize; - - if (copy_to_user(ufa, &xfa, sizeof(xfa))) - return -EFAULT; - - return 0; -} -EXPORT_SYMBOL(copy_fsxattr_to_user); - -static int copy_fsxattr_from_user(struct fileattr *fa, - struct fsxattr __user *ufa) -{ - struct fsxattr xfa; - - if (copy_from_user(&xfa, ufa, sizeof(xfa))) - return -EFAULT; - - fileattr_fill_xflags(fa, xfa.fsx_xflags); - fa->fsx_extsize = xfa.fsx_extsize; - fa->fsx_nextents = xfa.fsx_nextents; - fa->fsx_projid = xfa.fsx_projid; - fa->fsx_cowextsize = xfa.fsx_cowextsize; - - return 0; -} - -/* - * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject - * any invalid configurations. - * - * Note: must be called with inode lock held. - */ -static int fileattr_set_prepare(struct inode *inode, - const struct fileattr *old_ma, - struct fileattr *fa) -{ - int err; - - /* - * The IMMUTABLE and APPEND_ONLY flags can only be changed by - * the relevant capability. - */ - if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && - !capable(CAP_LINUX_IMMUTABLE)) - return -EPERM; - - err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags); - if (err) - return err; - - /* - * Project Quota ID state is only allowed to change from within the init - * namespace. Enforce that restriction only if we are trying to change - * the quota ID state. Everything else is allowed in user namespaces. - */ - if (current_user_ns() != &init_user_ns) { - if (old_ma->fsx_projid != fa->fsx_projid) - return -EINVAL; - if ((old_ma->fsx_xflags ^ fa->fsx_xflags) & - FS_XFLAG_PROJINHERIT) - return -EINVAL; - } else { - /* - * Caller is allowed to change the project ID. If it is being - * changed, make sure that the new value is valid. - */ - if (old_ma->fsx_projid != fa->fsx_projid && - !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid))) - return -EINVAL; - } - - /* Check extent size hints. */ - if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) - return -EINVAL; - - if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && - !S_ISDIR(inode->i_mode)) - return -EINVAL; - - if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && - !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) - return -EINVAL; - - /* - * It is only valid to set the DAX flag on regular files and - * directories on filesystems. - */ - if ((fa->fsx_xflags & FS_XFLAG_DAX) && - !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) - return -EINVAL; - - /* Extent size hints of zero turn off the flags. */ - if (fa->fsx_extsize == 0) - fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); - if (fa->fsx_cowextsize == 0) - fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; - - return 0; -} - -/** - * vfs_fileattr_set - change miscellaneous file attributes - * @idmap: idmap of the mount - * @dentry: the object to change - * @fa: fileattr pointer - * - * After verifying permissions, call i_op->fileattr_set() callback, if - * exists. - * - * Verifying attributes involves retrieving current attributes with - * i_op->fileattr_get(), this also allows initializing attributes that have - * not been set by the caller to current values. Inode lock is held - * thoughout to prevent racing with another instance. - * - * Return: 0 on success, or a negative error on failure. - */ -int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, - struct fileattr *fa) -{ - struct inode *inode = d_inode(dentry); - struct fileattr old_ma = {}; - int err; - - if (!inode->i_op->fileattr_set) - return -ENOIOCTLCMD; - - if (!inode_owner_or_capable(idmap, inode)) - return -EPERM; - - inode_lock(inode); - err = vfs_fileattr_get(dentry, &old_ma); - if (!err) { - /* initialize missing bits from old_ma */ - if (fa->flags_valid) { - fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON; - fa->fsx_extsize = old_ma.fsx_extsize; - fa->fsx_nextents = old_ma.fsx_nextents; - fa->fsx_projid = old_ma.fsx_projid; - fa->fsx_cowextsize = old_ma.fsx_cowextsize; - } else { - fa->flags |= old_ma.flags & ~FS_COMMON_FL; - } - err = fileattr_set_prepare(inode, &old_ma, fa); - if (!err) - err = inode->i_op->fileattr_set(idmap, dentry, fa); - } - inode_unlock(inode); - - return err; -} -EXPORT_SYMBOL(vfs_fileattr_set); - -static int ioctl_getflags(struct file *file, unsigned int __user *argp) -{ - struct fileattr fa = { .flags_valid = true }; /* hint only */ - int err; - - err = vfs_fileattr_get(file->f_path.dentry, &fa); - if (!err) - err = put_user(fa.flags, argp); - return err; -} - -static int ioctl_setflags(struct file *file, unsigned int __user *argp) -{ - struct mnt_idmap *idmap = file_mnt_idmap(file); - struct dentry *dentry = file->f_path.dentry; - struct fileattr fa; - unsigned int flags; - int err; - - err = get_user(flags, argp); - if (!err) { - err = mnt_want_write_file(file); - if (!err) { - fileattr_fill_flags(&fa, flags); - err = vfs_fileattr_set(idmap, dentry, &fa); - mnt_drop_write_file(file); - } - } - return err; -} - -static int ioctl_fsgetxattr(struct file *file, void __user *argp) -{ - struct fileattr fa = { .fsx_valid = true }; /* hint only */ - int err; - - err = vfs_fileattr_get(file->f_path.dentry, &fa); - if (!err) - err = copy_fsxattr_to_user(&fa, argp); - - return err; -} - -static int ioctl_fssetxattr(struct file *file, void __user *argp) -{ - struct mnt_idmap *idmap = file_mnt_idmap(file); - struct dentry *dentry = file->f_path.dentry; - struct fileattr fa; - int err; - - err = copy_fsxattr_from_user(&fa, argp); - if (!err) { - err = mnt_want_write_file(file); - if (!err) { - err = vfs_fileattr_set(idmap, dentry, &fa); - mnt_drop_write_file(file); - } - } - return err; -} - static int ioctl_getfsuuid(struct file *file, void __user *argp) { struct super_block *sb = file_inode(file)->i_sb; diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile index 69e8ebb41302..f7e1c8534c46 100644 --- a/fs/iomap/Makefile +++ b/fs/iomap/Makefile @@ -9,9 +9,9 @@ ccflags-y += -I $(src) # needed for trace events obj-$(CONFIG_FS_IOMAP) += iomap.o iomap-y += trace.o \ - iter.o -iomap-$(CONFIG_BLOCK) += buffered-io.o \ - direct-io.o \ + iter.o \ + buffered-io.o +iomap-$(CONFIG_BLOCK) += direct-io.o \ ioend.o \ fiemap.o \ seek.o diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index fb4519158f3a..fd827398afd2 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -3,20 +3,11 @@ * Copyright (C) 2010 Red Hat, Inc. * Copyright (C) 2016-2023 Christoph Hellwig. */ -#include <linux/module.h> -#include <linux/compiler.h> -#include <linux/fs.h> #include <linux/iomap.h> -#include <linux/pagemap.h> -#include <linux/uio.h> #include <linux/buffer_head.h> -#include <linux/dax.h> #include <linux/writeback.h> #include <linux/swap.h> -#include <linux/bio.h> -#include <linux/sched/signal.h> #include <linux/migrate.h> -#include "internal.h" #include "trace.h" #include "../internal.h" @@ -287,6 +278,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, *lenp = plen; } +static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, + loff_t pos) +{ + const struct iomap *srcmap = iomap_iter_srcmap(iter); + + return srcmap->type != IOMAP_MAPPED || + (srcmap->flags & IOMAP_F_NEW) || + pos >= i_size_read(iter->inode); +} + +/** + * iomap_read_inline_data - copy inline data into the page cache + * @iter: iteration structure + * @folio: folio to copy to + * + * Copy the inline data in @iter into @folio and zero out the rest of the folio. + * Only a single IOMAP_INLINE extent is allowed at the end of each file. + * Returns zero for success to complete the read, or the usual negative errno. + */ +static int iomap_read_inline_data(const struct iomap_iter *iter, + struct folio *folio) +{ + const struct iomap *iomap = iomap_iter_srcmap(iter); + size_t size = i_size_read(iter->inode) - iomap->offset; + size_t offset = offset_in_folio(folio, iomap->offset); + + if (folio_test_uptodate(folio)) + return 0; + + if (WARN_ON_ONCE(size > iomap->length)) + return -EIO; + if (offset > 0) + ifs_alloc(iter->inode, folio, iter->flags); + + folio_fill_tail(folio, offset, iomap->inline_data, size); + iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); + return 0; +} + +#ifdef CONFIG_BLOCK static void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, int error) { @@ -326,45 +357,6 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -/** - * iomap_read_inline_data - copy inline data into the page cache - * @iter: iteration structure - * @folio: folio to copy to - * - * Copy the inline data in @iter into @folio and zero out the rest of the folio. - * Only a single IOMAP_INLINE extent is allowed at the end of each file. - * Returns zero for success to complete the read, or the usual negative errno. - */ -static int iomap_read_inline_data(const struct iomap_iter *iter, - struct folio *folio) -{ - const struct iomap *iomap = iomap_iter_srcmap(iter); - size_t size = i_size_read(iter->inode) - iomap->offset; - size_t offset = offset_in_folio(folio, iomap->offset); - - if (folio_test_uptodate(folio)) - return 0; - - if (WARN_ON_ONCE(size > iomap->length)) - return -EIO; - if (offset > 0) - ifs_alloc(iter->inode, folio, iter->flags); - - folio_fill_tail(folio, offset, iomap->inline_data, size); - iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); - return 0; -} - -static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, - loff_t pos) -{ - const struct iomap *srcmap = iomap_iter_srcmap(iter); - - return srcmap->type != IOMAP_MAPPED || - (srcmap->flags & IOMAP_F_NEW) || - pos >= i_size_read(iter->inode); -} - static int iomap_readpage_iter(struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { @@ -557,6 +549,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) } EXPORT_SYMBOL_GPL(iomap_readahead); +static int iomap_read_folio_range(const struct iomap_iter *iter, + struct folio *folio, loff_t pos, size_t len) +{ + const struct iomap *srcmap = iomap_iter_srcmap(iter); + struct bio_vec bvec; + struct bio bio; + + bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ); + bio.bi_iter.bi_sector = iomap_sector(srcmap, pos); + bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos)); + return submit_bio_wait(&bio); +} +#else +static int iomap_read_folio_range(const struct iomap_iter *iter, + struct folio *folio, loff_t pos, size_t len) +{ + WARN_ON_ONCE(1); + return -EIO; +} +#endif /* CONFIG_BLOCK */ + /* * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. @@ -670,22 +683,10 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) pos + len - 1); } -static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, - size_t poff, size_t plen, const struct iomap *iomap) -{ - struct bio_vec bvec; - struct bio bio; - - bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); - bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); - bio_add_folio_nofail(&bio, folio, plen, poff); - return submit_bio_wait(&bio); -} - -static int __iomap_write_begin(const struct iomap_iter *iter, size_t len, +static int __iomap_write_begin(const struct iomap_iter *iter, + const struct iomap_write_ops *write_ops, size_t len, struct folio *folio) { - const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_folio_state *ifs; loff_t pos = iter->pos; loff_t block_size = i_blocksize(iter->inode); @@ -734,8 +735,12 @@ static int __iomap_write_begin(const struct iomap_iter *iter, size_t len, if (iter->flags & IOMAP_NOWAIT) return -EAGAIN; - status = iomap_read_folio_sync(block_start, folio, - poff, plen, srcmap); + if (write_ops && write_ops->read_folio_range) + status = write_ops->read_folio_range(iter, + folio, block_start, plen); + else + status = iomap_read_folio_range(iter, + folio, block_start, plen); if (status) return status; } @@ -745,28 +750,27 @@ static int __iomap_write_begin(const struct iomap_iter *iter, size_t len, return 0; } -static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len) +static struct folio *__iomap_get_folio(struct iomap_iter *iter, + const struct iomap_write_ops *write_ops, size_t len) { - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; loff_t pos = iter->pos; if (!mapping_large_folio_support(iter->inode->i_mapping)) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); - if (folio_ops && folio_ops->get_folio) - return folio_ops->get_folio(iter, pos, len); - else - return iomap_get_folio(iter, pos, len); + if (write_ops && write_ops->get_folio) + return write_ops->get_folio(iter, pos, len); + return iomap_get_folio(iter, pos, len); } -static void __iomap_put_folio(struct iomap_iter *iter, size_t ret, +static void __iomap_put_folio(struct iomap_iter *iter, + const struct iomap_write_ops *write_ops, size_t ret, struct folio *folio) { - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; loff_t pos = iter->pos; - if (folio_ops && folio_ops->put_folio) { - folio_ops->put_folio(iter->inode, pos, ret, folio); + if (write_ops && write_ops->put_folio) { + write_ops->put_folio(iter->inode, pos, ret, folio); } else { folio_unlock(folio); folio_put(folio); @@ -803,10 +807,10 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter, * offset, and length. Callers can optionally pass a max length *plen, * otherwise init to zero. */ -static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, +static int iomap_write_begin(struct iomap_iter *iter, + const struct iomap_write_ops *write_ops, struct folio **foliop, size_t *poffset, u64 *plen) { - const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; u64 len = min_t(u64, SIZE_MAX, iomap_length(iter)); @@ -821,7 +825,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, if (fatal_signal_pending(current)) return -EINTR; - folio = __iomap_get_folio(iter, len); + folio = __iomap_get_folio(iter, write_ops, len); if (IS_ERR(folio)) return PTR_ERR(folio); @@ -835,8 +839,8 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, * could do the wrong thing here (zero a page range incorrectly or fail * to zero) and corrupt data. */ - if (folio_ops && folio_ops->iomap_valid) { - bool iomap_valid = folio_ops->iomap_valid(iter->inode, + if (write_ops && write_ops->iomap_valid) { + bool iomap_valid = write_ops->iomap_valid(iter->inode, &iter->iomap); if (!iomap_valid) { iter->iomap.flags |= IOMAP_F_STALE; @@ -852,7 +856,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else - status = __iomap_write_begin(iter, len, folio); + status = __iomap_write_begin(iter, write_ops, len, folio); if (unlikely(status)) goto out_unlock; @@ -862,8 +866,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, return 0; out_unlock: - __iomap_put_folio(iter, 0, folio); - + __iomap_put_folio(iter, write_ops, 0, folio); return status; } @@ -926,8 +929,7 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied, if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { size_t bh_written; - bh_written = block_write_end(NULL, iter->inode->i_mapping, pos, - len, copied, folio, NULL); + bh_written = block_write_end(pos, len, copied, folio); WARN_ON_ONCE(bh_written != copied && bh_written != 0); return bh_written == copied; } @@ -935,7 +937,8 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied, return __iomap_write_end(iter->inode, pos, len, copied, folio); } -static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) +static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i, + const struct iomap_write_ops *write_ops) { ssize_t total_written = 0; int status = 0; @@ -979,7 +982,8 @@ retry: break; } - status = iomap_write_begin(iter, &folio, &offset, &bytes); + status = iomap_write_begin(iter, write_ops, &folio, &offset, + &bytes); if (unlikely(status)) { iomap_write_failed(iter->inode, iter->pos, bytes); break; @@ -1008,7 +1012,7 @@ retry: i_size_write(iter->inode, pos + written); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } - __iomap_put_folio(iter, written, folio); + __iomap_put_folio(iter, write_ops, written, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); @@ -1041,7 +1045,8 @@ retry: ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, - const struct iomap_ops *ops, void *private) + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private) { struct iomap_iter iter = { .inode = iocb->ki_filp->f_mapping->host, @@ -1058,7 +1063,7 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, iter.flags |= IOMAP_DONTCACHE; while ((ret = iomap_iter(&iter, ops)) > 0) - iter.status = iomap_write_iter(&iter, i); + iter.status = iomap_write_iter(&iter, i, write_ops); if (unlikely(iter.pos == iocb->ki_pos)) return ret; @@ -1292,7 +1297,8 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, } EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); -static int iomap_unshare_iter(struct iomap_iter *iter) +static int iomap_unshare_iter(struct iomap_iter *iter, + const struct iomap_write_ops *write_ops) { struct iomap *iomap = &iter->iomap; u64 bytes = iomap_length(iter); @@ -1307,14 +1313,15 @@ static int iomap_unshare_iter(struct iomap_iter *iter) bool ret; bytes = min_t(u64, SIZE_MAX, bytes); - status = iomap_write_begin(iter, &folio, &offset, &bytes); + status = iomap_write_begin(iter, write_ops, &folio, &offset, + &bytes); if (unlikely(status)) return status; if (iomap->flags & IOMAP_F_STALE) break; ret = iomap_write_end(iter, bytes, bytes, folio); - __iomap_put_folio(iter, bytes, folio); + __iomap_put_folio(iter, write_ops, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; @@ -1332,7 +1339,8 @@ static int iomap_unshare_iter(struct iomap_iter *iter) int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, - const struct iomap_ops *ops) + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops) { struct iomap_iter iter = { .inode = inode, @@ -1347,7 +1355,7 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) - iter.status = iomap_unshare_iter(&iter); + iter.status = iomap_unshare_iter(&iter, write_ops); return ret; } EXPORT_SYMBOL_GPL(iomap_file_unshare); @@ -1366,7 +1374,8 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i) return filemap_write_and_wait_range(mapping, i->pos, end); } -static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) +static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, + const struct iomap_write_ops *write_ops) { u64 bytes = iomap_length(iter); int status; @@ -1377,7 +1386,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) bool ret; bytes = min_t(u64, SIZE_MAX, bytes); - status = iomap_write_begin(iter, &folio, &offset, &bytes); + status = iomap_write_begin(iter, write_ops, &folio, &offset, + &bytes); if (status) return status; if (iter->iomap.flags & IOMAP_F_STALE) @@ -1390,7 +1400,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) folio_mark_accessed(folio); ret = iomap_write_end(iter, bytes, bytes, folio); - __iomap_put_folio(iter, bytes, folio); + __iomap_put_folio(iter, write_ops, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; @@ -1406,7 +1416,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, - const struct iomap_ops *ops, void *private) + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private) { struct iomap_iter iter = { .inode = inode, @@ -1436,7 +1447,8 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) { iter.len = plen; while ((ret = iomap_iter(&iter, ops)) > 0) - iter.status = iomap_zero_iter(&iter, did_zero); + iter.status = iomap_zero_iter(&iter, did_zero, + write_ops); iter.len = len - (iter.pos - pos); if (ret || !iter.len) @@ -1467,7 +1479,7 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, continue; } - iter.status = iomap_zero_iter(&iter, did_zero); + iter.status = iomap_zero_iter(&iter, did_zero, write_ops); } return ret; } @@ -1475,7 +1487,8 @@ EXPORT_SYMBOL_GPL(iomap_zero_range); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, - const struct iomap_ops *ops, void *private) + const struct iomap_ops *ops, + const struct iomap_write_ops *write_ops, void *private) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); @@ -1484,7 +1497,7 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, if (!off) return 0; return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops, - private); + write_ops, private); } EXPORT_SYMBOL_GPL(iomap_truncate_page); @@ -1538,280 +1551,54 @@ out_unlock: } EXPORT_SYMBOL_GPL(iomap_page_mkwrite); -static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, +void iomap_start_folio_write(struct inode *inode, struct folio *folio, size_t len) { struct iomap_folio_state *ifs = folio->private; WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); - WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); - - if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) - folio_end_writeback(folio); -} - -/* - * We're now finished for good with this ioend structure. Update the page - * state, release holds on bios, and finally free up memory. Do not use the - * ioend after this. - */ -u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend) -{ - struct inode *inode = ioend->io_inode; - struct bio *bio = &ioend->io_bio; - struct folio_iter fi; - u32 folio_count = 0; - - if (ioend->io_error) { - mapping_set_error(inode->i_mapping, ioend->io_error); - if (!bio_flagged(bio, BIO_QUIET)) { - pr_err_ratelimited( -"%s: writeback error on inode %lu, offset %lld, sector %llu", - inode->i_sb->s_id, inode->i_ino, - ioend->io_offset, ioend->io_sector); - } - } - - /* walk all folios in bio, ending page IO on them */ - bio_for_each_folio_all(fi, bio) { - iomap_finish_folio_write(inode, fi.folio, fi.length); - folio_count++; - } - - bio_put(bio); /* frees the ioend */ - return folio_count; -} - -static void iomap_writepage_end_bio(struct bio *bio) -{ - struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); - - ioend->io_error = blk_status_to_errno(bio->bi_status); - iomap_finish_ioend_buffered(ioend); -} - -/* - * Submit an ioend. - * - * If @error is non-zero, it means that we have a situation where some part of - * the submission process has failed after we've marked pages for writeback. - * We cannot cancel ioend directly in that case, so call the bio end I/O handler - * with the error status here to run the normal I/O completion handler to clear - * the writeback bit and let the file system proess the errors. - */ -static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error) -{ - if (!wpc->ioend) - return error; - - /* - * Let the file systems prepare the I/O submission and hook in an I/O - * comletion handler. This also needs to happen in case after a - * failure happened so that the file system end I/O handler gets called - * to clean up. - */ - if (wpc->ops->submit_ioend) { - error = wpc->ops->submit_ioend(wpc, error); - } else { - if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE)) - error = -EIO; - if (!error) - submit_bio(&wpc->ioend->io_bio); - } - - if (error) { - wpc->ioend->io_bio.bi_status = errno_to_blk_status(error); - bio_endio(&wpc->ioend->io_bio); - } - - wpc->ioend = NULL; - return error; -} - -static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc, - struct writeback_control *wbc, struct inode *inode, loff_t pos, - u16 ioend_flags) -{ - struct bio *bio; - - bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, - REQ_OP_WRITE | wbc_to_write_flags(wbc), - GFP_NOFS, &iomap_ioend_bioset); - bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos); - bio->bi_end_io = iomap_writepage_end_bio; - bio->bi_write_hint = inode->i_write_hint; - wbc_init_bio(wbc, bio); - wpc->nr_folios = 0; - return iomap_init_ioend(inode, bio, pos, ioend_flags); -} - -static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos, - u16 ioend_flags) -{ - if (ioend_flags & IOMAP_IOEND_BOUNDARY) - return false; - if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) != - (wpc->ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS)) - return false; - if (pos != wpc->ioend->io_offset + wpc->ioend->io_size) - return false; - if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) && - iomap_sector(&wpc->iomap, pos) != - bio_end_sector(&wpc->ioend->io_bio)) - return false; - /* - * Limit ioend bio chain lengths to minimise IO completion latency. This - * also prevents long tight loops ending page writeback on all the - * folios in the ioend. - */ - if (wpc->nr_folios >= IOEND_BATCH_SIZE) - return false; - return true; + if (ifs) + atomic_add(len, &ifs->write_bytes_pending); } +EXPORT_SYMBOL_GPL(iomap_start_folio_write); -/* - * Test to see if we have an existing ioend structure that we could append to - * first; otherwise finish off the current ioend and start another. - * - * If a new ioend is created and cached, the old ioend is submitted to the block - * layer instantly. Batching optimisations are provided by higher level block - * plugging. - * - * At the end of a writeback pass, there will be a cached ioend remaining on the - * writepage context that the caller will need to submit. - */ -static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, - struct writeback_control *wbc, struct folio *folio, - struct inode *inode, loff_t pos, loff_t end_pos, - unsigned len) +void iomap_finish_folio_write(struct inode *inode, struct folio *folio, + size_t len) { struct iomap_folio_state *ifs = folio->private; - size_t poff = offset_in_folio(folio, pos); - unsigned int ioend_flags = 0; - int error; - - if (wpc->iomap.type == IOMAP_UNWRITTEN) - ioend_flags |= IOMAP_IOEND_UNWRITTEN; - if (wpc->iomap.flags & IOMAP_F_SHARED) - ioend_flags |= IOMAP_IOEND_SHARED; - if (folio_test_dropbehind(folio)) - ioend_flags |= IOMAP_IOEND_DONTCACHE; - if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY)) - ioend_flags |= IOMAP_IOEND_BOUNDARY; - - if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) { -new_ioend: - error = iomap_submit_ioend(wpc, 0); - if (error) - return error; - wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos, - ioend_flags); - } - if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff)) - goto new_ioend; - - if (ifs) - atomic_add(len, &ifs->write_bytes_pending); - - /* - * Clamp io_offset and io_size to the incore EOF so that ondisk - * file size updates in the ioend completion are byte-accurate. - * This avoids recovering files with zeroed tail regions when - * writeback races with appending writes: - * - * Thread 1: Thread 2: - * ------------ ----------- - * write [A, A+B] - * update inode size to A+B - * submit I/O [A, A+BS] - * write [A+B, A+B+C] - * update inode size to A+B+C - * <I/O completes, updates disk size to min(A+B+C, A+BS)> - * <power failure> - * - * After reboot: - * 1) with A+B+C < A+BS, the file has zero padding in range - * [A+B, A+B+C] - * - * |< Block Size (BS) >| - * |DDDDDDDDDDDD0000000000000| - * ^ ^ ^ - * A A+B A+B+C - * (EOF) - * - * 2) with A+B+C > A+BS, the file has zero padding in range - * [A+B, A+BS] - * - * |< Block Size (BS) >|< Block Size (BS) >| - * |DDDDDDDDDDDD0000000000000|00000000000000000000000000| - * ^ ^ ^ ^ - * A A+B A+BS A+B+C - * (EOF) - * - * D = Valid Data - * 0 = Zero Padding - * - * Note that this defeats the ability to chain the ioends of - * appending writes. - */ - wpc->ioend->io_size += len; - if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos) - wpc->ioend->io_size = end_pos - wpc->ioend->io_offset; + WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); + WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); - wbc_account_cgroup_owner(wbc, folio, len); - return 0; + if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) + folio_end_writeback(folio); } +EXPORT_SYMBOL_GPL(iomap_finish_folio_write); -static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc, - struct writeback_control *wbc, struct folio *folio, - struct inode *inode, u64 pos, u64 end_pos, - unsigned dirty_len, unsigned *count) +static int iomap_writeback_range(struct iomap_writepage_ctx *wpc, + struct folio *folio, u64 pos, u32 rlen, u64 end_pos, + bool *wb_pending) { - int error; - do { - unsigned map_len; + ssize_t ret; - error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len); - if (error) - break; - trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap); - - map_len = min_t(u64, dirty_len, - wpc->iomap.offset + wpc->iomap.length - pos); - WARN_ON_ONCE(!folio->private && map_len < dirty_len); + ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos); + if (WARN_ON_ONCE(ret == 0 || ret > rlen)) + return -EIO; + if (ret < 0) + return ret; + rlen -= ret; + pos += ret; - switch (wpc->iomap.type) { - case IOMAP_INLINE: - WARN_ON_ONCE(1); - error = -EIO; - break; - case IOMAP_HOLE: - break; - default: - error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos, - end_pos, map_len); - if (!error) - (*count)++; - break; - } - dirty_len -= map_len; - pos += map_len; - } while (dirty_len && !error); + /* + * Holes are not be written back by ->writeback_range, so track + * if we did handle anything that is not a hole here. + */ + if (wpc->iomap.type != IOMAP_HOLE) + *wb_pending = true; + } while (rlen); - /* - * We cannot cancel the ioend directly here on error. We may have - * already set other pages under writeback and hence we have to run I/O - * completion to mark the error state of the pages under writeback - * appropriately. - * - * Just let the file system know what portion of the folio failed to - * map. - */ - if (error && wpc->ops->discard_folio) - wpc->ops->discard_folio(folio, pos); - return error; + return 0; } /* @@ -1820,7 +1607,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc, * If the folio is entirely beyond i_size, return false. If it straddles * i_size, adjust end_pos and zero all data beyond i_size. */ -static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode, +static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode, u64 *end_pos) { u64 isize = i_size_read(inode); @@ -1872,15 +1659,14 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode, return true; } -static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, - struct writeback_control *wbc, struct folio *folio) +int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio) { struct iomap_folio_state *ifs = folio->private; - struct inode *inode = folio->mapping->host; + struct inode *inode = wpc->inode; u64 pos = folio_pos(folio); u64 end_pos = pos + folio_size(folio); u64 end_aligned = 0; - unsigned count = 0; + bool wb_pending = false; int error = 0; u32 rlen; @@ -1888,12 +1674,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, WARN_ON_ONCE(folio_test_dirty(folio)); WARN_ON_ONCE(folio_test_writeback(folio)); - trace_iomap_writepage(inode, pos, folio_size(folio)); + trace_iomap_writeback_folio(inode, pos, folio_size(folio)); - if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) { - folio_unlock(folio); + if (!iomap_writeback_handle_eof(folio, inode, &end_pos)) return 0; - } WARN_ON_ONCE(end_pos <= pos); if (i_blocks_per_folio(inode, folio) > 1) { @@ -1909,7 +1693,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, * all blocks. */ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0); - atomic_inc(&ifs->write_bytes_pending); + iomap_start_folio_write(inode, folio, 1); } /* @@ -1923,14 +1707,14 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, */ end_aligned = round_up(end_pos, i_blocksize(inode)); while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) { - error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, - pos, end_pos, rlen, &count); + error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos, + &wb_pending); if (error) break; pos += rlen; } - if (count) + if (wb_pending) wpc->nr_folios++; /* @@ -1947,23 +1731,22 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, * already at this point. In that case we need to clear the writeback * bit ourselves right after unlocking the page. */ - folio_unlock(folio); if (ifs) { if (atomic_dec_and_test(&ifs->write_bytes_pending)) folio_end_writeback(folio); } else { - if (!count) + if (!wb_pending) folio_end_writeback(folio); } mapping_set_error(inode->i_mapping, error); return error; } +EXPORT_SYMBOL_GPL(iomap_writeback_folio); int -iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, - struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops) +iomap_writepages(struct iomap_writepage_ctx *wpc) { + struct address_space *mapping = wpc->inode->i_mapping; struct folio *folio = NULL; int error; @@ -1975,9 +1758,22 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, PF_MEMALLOC)) return -EIO; - wpc->ops = ops; - while ((folio = writeback_iter(mapping, wbc, folio, &error))) - error = iomap_writepage_map(wpc, wbc, folio); - return iomap_submit_ioend(wpc, error); + while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) { + error = iomap_writeback_folio(wpc, folio); + folio_unlock(folio); + } + + /* + * If @error is non-zero, it means that we have a situation where some + * part of the submission process has failed after we've marked pages + * for writeback. + * + * We cannot cancel the writeback directly in that case, so always call + * ->writeback_submit to run the I/O completion handler to clear the + * writeback bit and let the file system proess the errors. + */ + if (wpc->wb_ctx) + return wpc->ops->writeback_submit(wpc, error); + return error; } EXPORT_SYMBOL_GPL(iomap_writepages); diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 844261a31156..6f25d4cfea9f 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -3,14 +3,9 @@ * Copyright (C) 2010 Red Hat, Inc. * Copyright (c) 2016-2025 Christoph Hellwig. */ -#include <linux/module.h> -#include <linux/compiler.h> -#include <linux/fs.h> #include <linux/fscrypt.h> #include <linux/pagemap.h> #include <linux/iomap.h> -#include <linux/backing-dev.h> -#include <linux/uio.h> #include <linux/task_io_accounting_ops.h> #include "internal.h" #include "trace.h" diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c index 80675c42e94e..d11dadff8286 100644 --- a/fs/iomap/fiemap.c +++ b/fs/iomap/fiemap.c @@ -2,9 +2,6 @@ /* * Copyright (c) 2016-2021 Christoph Hellwig. */ -#include <linux/module.h> -#include <linux/compiler.h> -#include <linux/fs.h> #include <linux/iomap.h> #include <linux/fiemap.h> #include <linux/pagemap.h> diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h index f6992a3bf66a..d05cb3aed96e 100644 --- a/fs/iomap/internal.h +++ b/fs/iomap/internal.h @@ -4,7 +4,6 @@ #define IOEND_BATCH_SIZE 4096 -u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend); u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend); #endif /* _IOMAP_INTERNAL_H */ diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c index 18894ebba6db..b49fa75eab26 100644 --- a/fs/iomap/ioend.c +++ b/fs/iomap/ioend.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2024-2025 Christoph Hellwig. + * Copyright (c) 2016-2025 Christoph Hellwig. */ #include <linux/iomap.h> #include <linux/list_sort.h> +#include <linux/pagemap.h> +#include <linux/writeback.h> #include "internal.h" +#include "trace.h" struct bio_set iomap_ioend_bioset; EXPORT_SYMBOL_GPL(iomap_ioend_bioset); @@ -28,6 +31,221 @@ struct iomap_ioend *iomap_init_ioend(struct inode *inode, } EXPORT_SYMBOL_GPL(iomap_init_ioend); +/* + * We're now finished for good with this ioend structure. Update the folio + * state, release holds on bios, and finally free up memory. Do not use the + * ioend after this. + */ +static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend) +{ + struct inode *inode = ioend->io_inode; + struct bio *bio = &ioend->io_bio; + struct folio_iter fi; + u32 folio_count = 0; + + if (ioend->io_error) { + mapping_set_error(inode->i_mapping, ioend->io_error); + if (!bio_flagged(bio, BIO_QUIET)) { + pr_err_ratelimited( +"%s: writeback error on inode %lu, offset %lld, sector %llu", + inode->i_sb->s_id, inode->i_ino, + ioend->io_offset, ioend->io_sector); + } + } + + /* walk all folios in bio, ending page IO on them */ + bio_for_each_folio_all(fi, bio) { + iomap_finish_folio_write(inode, fi.folio, fi.length); + folio_count++; + } + + bio_put(bio); /* frees the ioend */ + return folio_count; +} + +static void ioend_writeback_end_bio(struct bio *bio) +{ + struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); + + ioend->io_error = blk_status_to_errno(bio->bi_status); + iomap_finish_ioend_buffered(ioend); +} + +/* + * We cannot cancel the ioend directly in case of an error, so call the bio end + * I/O handler with the error status here to run the normal I/O completion + * handler. + */ +int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error) +{ + struct iomap_ioend *ioend = wpc->wb_ctx; + + if (!ioend->io_bio.bi_end_io) + ioend->io_bio.bi_end_io = ioend_writeback_end_bio; + + if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE)) + error = -EIO; + + if (error) { + ioend->io_bio.bi_status = errno_to_blk_status(error); + bio_endio(&ioend->io_bio); + return error; + } + + submit_bio(&ioend->io_bio); + return 0; +} +EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit); + +static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc, + loff_t pos, u16 ioend_flags) +{ + struct bio *bio; + + bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, + REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc), + GFP_NOFS, &iomap_ioend_bioset); + bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos); + bio->bi_write_hint = wpc->inode->i_write_hint; + wbc_init_bio(wpc->wbc, bio); + wpc->nr_folios = 0; + return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags); +} + +static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos, + u16 ioend_flags) +{ + struct iomap_ioend *ioend = wpc->wb_ctx; + + if (ioend_flags & IOMAP_IOEND_BOUNDARY) + return false; + if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) != + (ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS)) + return false; + if (pos != ioend->io_offset + ioend->io_size) + return false; + if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) && + iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio)) + return false; + /* + * Limit ioend bio chain lengths to minimise IO completion latency. This + * also prevents long tight loops ending page writeback on all the + * folios in the ioend. + */ + if (wpc->nr_folios >= IOEND_BATCH_SIZE) + return false; + return true; +} + +/* + * Test to see if we have an existing ioend structure that we could append to + * first; otherwise finish off the current ioend and start another. + * + * If a new ioend is created and cached, the old ioend is submitted to the block + * layer instantly. Batching optimisations are provided by higher level block + * plugging. + * + * At the end of a writeback pass, there will be a cached ioend remaining on the + * writepage context that the caller will need to submit. + */ +ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio, + loff_t pos, loff_t end_pos, unsigned int dirty_len) +{ + struct iomap_ioend *ioend = wpc->wb_ctx; + size_t poff = offset_in_folio(folio, pos); + unsigned int ioend_flags = 0; + unsigned int map_len = min_t(u64, dirty_len, + wpc->iomap.offset + wpc->iomap.length - pos); + int error; + + trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap); + + WARN_ON_ONCE(!folio->private && map_len < dirty_len); + + switch (wpc->iomap.type) { + case IOMAP_INLINE: + WARN_ON_ONCE(1); + return -EIO; + case IOMAP_HOLE: + return map_len; + default: + break; + } + + if (wpc->iomap.type == IOMAP_UNWRITTEN) + ioend_flags |= IOMAP_IOEND_UNWRITTEN; + if (wpc->iomap.flags & IOMAP_F_SHARED) + ioend_flags |= IOMAP_IOEND_SHARED; + if (folio_test_dropbehind(folio)) + ioend_flags |= IOMAP_IOEND_DONTCACHE; + if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY)) + ioend_flags |= IOMAP_IOEND_BOUNDARY; + + if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) { +new_ioend: + if (ioend) { + error = wpc->ops->writeback_submit(wpc, 0); + if (error) + return error; + } + wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags); + } + + if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff)) + goto new_ioend; + + iomap_start_folio_write(wpc->inode, folio, map_len); + + /* + * Clamp io_offset and io_size to the incore EOF so that ondisk + * file size updates in the ioend completion are byte-accurate. + * This avoids recovering files with zeroed tail regions when + * writeback races with appending writes: + * + * Thread 1: Thread 2: + * ------------ ----------- + * write [A, A+B] + * update inode size to A+B + * submit I/O [A, A+BS] + * write [A+B, A+B+C] + * update inode size to A+B+C + * <I/O completes, updates disk size to min(A+B+C, A+BS)> + * <power failure> + * + * After reboot: + * 1) with A+B+C < A+BS, the file has zero padding in range + * [A+B, A+B+C] + * + * |< Block Size (BS) >| + * |DDDDDDDDDDDD0000000000000| + * ^ ^ ^ + * A A+B A+B+C + * (EOF) + * + * 2) with A+B+C > A+BS, the file has zero padding in range + * [A+B, A+BS] + * + * |< Block Size (BS) >|< Block Size (BS) >| + * |DDDDDDDDDDDD0000000000000|00000000000000000000000000| + * ^ ^ ^ ^ + * A A+B A+BS A+B+C + * (EOF) + * + * D = Valid Data + * 0 = Zero Padding + * + * Note that this defeats the ability to chain the ioends of + * appending writes. + */ + ioend->io_size += map_len; + if (ioend->io_offset + ioend->io_size > end_pos) + ioend->io_size = end_pos - ioend->io_offset; + + wbc_account_cgroup_owner(wpc->wbc, folio, map_len); + return map_len; +} +EXPORT_SYMBOL_GPL(iomap_add_to_ioend); + static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { if (ioend->io_parent) { diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c index 6ffc6a7b9ba5..cef77ca0c20b 100644 --- a/fs/iomap/iter.c +++ b/fs/iomap/iter.c @@ -3,7 +3,6 @@ * Copyright (C) 2010 Red Hat, Inc. * Copyright (c) 2016-2021 Christoph Hellwig. */ -#include <linux/fs.h> #include <linux/iomap.h> #include "trace.h" diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index 04d7919636c1..56db2dd4b10d 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -3,12 +3,8 @@ * Copyright (C) 2017 Red Hat, Inc. * Copyright (c) 2018-2021 Christoph Hellwig. */ -#include <linux/module.h> -#include <linux/compiler.h> -#include <linux/fs.h> #include <linux/iomap.h> #include <linux/pagemap.h> -#include <linux/pagevec.h> static int iomap_seek_hole_iter(struct iomap_iter *iter, loff_t *hole_pos) diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c index c1a762c10ce4..0db77c449467 100644 --- a/fs/iomap/swapfile.c +++ b/fs/iomap/swapfile.c @@ -3,9 +3,6 @@ * Copyright (C) 2018 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> */ -#include <linux/module.h> -#include <linux/compiler.h> -#include <linux/fs.h> #include <linux/iomap.h> #include <linux/swap.h> diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c index 728d5443daf5..da217246b1a9 100644 --- a/fs/iomap/trace.c +++ b/fs/iomap/trace.c @@ -3,7 +3,6 @@ * Copyright (c) 2019 Christoph Hellwig */ #include <linux/iomap.h> -#include <linux/uio.h> /* * We include this last to have the helpers above available for the trace diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 455cc6f90be0..6ad66e6ba653 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -79,7 +79,7 @@ DECLARE_EVENT_CLASS(iomap_range_class, DEFINE_EVENT(iomap_range_class, name, \ TP_PROTO(struct inode *inode, loff_t off, u64 len),\ TP_ARGS(inode, off, len)) -DEFINE_RANGE_EVENT(iomap_writepage); +DEFINE_RANGE_EVENT(iomap_writeback_folio); DEFINE_RANGE_EVENT(iomap_release_folio); DEFINE_RANGE_EVENT(iomap_invalidate_folio); DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail); @@ -169,7 +169,7 @@ DEFINE_EVENT(iomap_class, name, \ DEFINE_IOMAP_EVENT(iomap_iter_dstmap); DEFINE_IOMAP_EVENT(iomap_iter_srcmap); -TRACE_EVENT(iomap_writepage_map, +TRACE_EVENT(iomap_add_to_ioend, TP_PROTO(struct inode *inode, u64 pos, unsigned int dirty_len, struct iomap *iomap), TP_ARGS(inode, pos, dirty_len, iomap), diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 33e6a620c103..6f0e6b19383c 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -939,7 +939,7 @@ root_found: sbi->s_check = opt->check; if (table) - s->s_d_op = &isofs_dentry_ops[table - 1]; + set_default_d_op(s, &isofs_dentry_ops[table - 1]); /* get the root dentry */ s->s_root = d_make_root(inode); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 13c18ccc13b0..dd3dff95cb24 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -21,12 +21,14 @@ #include <linux/jffs2.h> #include "nodelist.h" -static int jffs2_write_end(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata); -static int jffs2_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata); +static int jffs2_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata); +static int jffs2_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata); static int jffs2_read_folio(struct file *filp, struct folio *folio); int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) @@ -54,7 +56,7 @@ const struct file_operations jffs2_file_operations = .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl=jffs2_ioctl, - .mmap = generic_file_readonly_mmap, + .mmap_prepare = generic_file_readonly_mmap_prepare, .fsync = jffs2_fsync, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, @@ -121,9 +123,10 @@ static int jffs2_read_folio(struct file *file, struct folio *folio) return ret; } -static int jffs2_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int jffs2_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { struct folio *folio; struct inode *inode = mapping->host; @@ -235,9 +238,10 @@ out_err: return ret; } -static int jffs2_write_end(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int jffs2_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { /* Actually commit the write from the page cache page we're looking at. * For now, we write the full page out each time. It sucks, but it's simple diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 01b6912e60f8..5e47951db630 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -143,7 +143,7 @@ const struct file_operations jfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .fsync = jfs_fsync, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 60fc92dee24d..083e7fa54709 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -290,9 +290,10 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to) } } -static int jfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int jfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; @@ -303,13 +304,14 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int jfs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, struct folio *folio, - void *fsdata) +static int jfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { int ret; - ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (ret < len) jfs_write_failed(mapping, pos + len); return ret; diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c index f7bd7e8f5be4..563f148be8af 100644 --- a/fs/jfs/ioctl.c +++ b/fs/jfs/ioctl.c @@ -57,7 +57,7 @@ static long jfs_map_ext2(unsigned long flags, int from) return mapped; } -int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct jfs_inode_info *jfs_inode = JFS_IP(d_inode(dentry)); unsigned int flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE; @@ -71,7 +71,7 @@ int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int jfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct jfs_inode_info *jfs_inode = JFS_IP(inode); diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index ea80661597ac..2c6c81c8cb9f 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -9,9 +9,9 @@ struct fid; extern struct inode *ialloc(struct inode *, umode_t); extern int jfs_fsync(struct file *, loff_t, loff_t, int); -extern int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +extern int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); extern int jfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); extern long jfs_ioctl(struct file *, unsigned int, unsigned long); extern struct inode *jfs_iget(struct super_block *, unsigned long); extern int jfs_commit_inode(struct inode *, int); diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 10368c188c5e..3cfb86c5a36e 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -542,7 +542,7 @@ static int jfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_magic = JFS_SUPER_MAGIC; if (sbi->mntflag & JFS_OS2) - sb->s_d_op = &jfs_ci_dentry_operations; + set_default_d_op(sb, &jfs_ci_dentry_operations); inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index b83054da68b3..457f91c412d4 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -24,45 +24,46 @@ static const struct inode_operations kernfs_iops = { .listxattr = kernfs_iop_listxattr, }; -static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc) +static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, bool alloc) { - static DEFINE_MUTEX(iattr_mutex); - struct kernfs_iattrs *ret; + struct kernfs_iattrs *ret __free(kfree) = NULL; + struct kernfs_iattrs *attr; - mutex_lock(&iattr_mutex); + attr = READ_ONCE(kn->iattr); + if (attr || !alloc) + return attr; - if (kn->iattr || !alloc) - goto out_unlock; - - kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL); - if (!kn->iattr) - goto out_unlock; + ret = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL); + if (!ret) + return NULL; /* assign default attributes */ - kn->iattr->ia_uid = GLOBAL_ROOT_UID; - kn->iattr->ia_gid = GLOBAL_ROOT_GID; - - ktime_get_real_ts64(&kn->iattr->ia_atime); - kn->iattr->ia_mtime = kn->iattr->ia_atime; - kn->iattr->ia_ctime = kn->iattr->ia_atime; - - simple_xattrs_init(&kn->iattr->xattrs); - atomic_set(&kn->iattr->nr_user_xattrs, 0); - atomic_set(&kn->iattr->user_xattr_size, 0); -out_unlock: - ret = kn->iattr; - mutex_unlock(&iattr_mutex); - return ret; + ret->ia_uid = GLOBAL_ROOT_UID; + ret->ia_gid = GLOBAL_ROOT_GID; + + ktime_get_real_ts64(&ret->ia_atime); + ret->ia_mtime = ret->ia_atime; + ret->ia_ctime = ret->ia_atime; + + simple_xattrs_init(&ret->xattrs); + atomic_set(&ret->nr_user_xattrs, 0); + atomic_set(&ret->user_xattr_size, 0); + + /* If someone raced us, recognize it. */ + if (!try_cmpxchg(&kn->iattr, &attr, ret)) + return READ_ONCE(kn->iattr); + + return no_free_ptr(ret); } static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn) { - return __kernfs_iattrs(kn, 1); + return __kernfs_iattrs(kn, true); } static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn) { - return __kernfs_iattrs(kn, 0); + return __kernfs_iattrs(kn, false); } int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) @@ -166,9 +167,10 @@ static inline void set_inode_attr(struct inode *inode, static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode) { - struct kernfs_iattrs *attrs = kn->iattr; + struct kernfs_iattrs *attrs; inode->i_mode = kn->mode; + attrs = kernfs_iattrs_noalloc(kn); if (attrs) /* * kernfs_node has non-default attributes get them from @@ -306,7 +308,9 @@ int kernfs_xattr_set(struct kernfs_node *kn, const char *name, const void *value, size_t size, int flags) { struct simple_xattr *old_xattr; - struct kernfs_iattrs *attrs = kernfs_iattrs(kn); + struct kernfs_iattrs *attrs; + + attrs = kernfs_iattrs(kn); if (!attrs) return -ENOMEM; @@ -345,8 +349,9 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn, struct simple_xattrs *xattrs, const void *value, size_t size, int flags) { - atomic_t *sz = &kn->iattr->user_xattr_size; - atomic_t *nr = &kn->iattr->nr_user_xattrs; + struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn); + atomic_t *sz = &attr->user_xattr_size; + atomic_t *nr = &attr->nr_user_xattrs; struct simple_xattr *old_xattr; int ret; @@ -384,8 +389,9 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn, struct simple_xattrs *xattrs, const void *value, size_t size, int flags) { - atomic_t *sz = &kn->iattr->user_xattr_size; - atomic_t *nr = &kn->iattr->nr_user_xattrs; + struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn); + atomic_t *sz = &attr->user_xattr_size; + atomic_t *nr = &attr->nr_user_xattrs; struct simple_xattr *old_xattr; old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags); diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index c1719b5778a1..e384a69fbece 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -318,7 +318,7 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k return -ENOMEM; } sb->s_root = root; - sb->s_d_op = &kernfs_dops; + set_default_d_op(sb, &kernfs_dops); return 0; } diff --git a/fs/libfs.c b/fs/libfs.c index 6f487fc6be34..ce8c496a6940 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -62,11 +62,6 @@ int always_delete_dentry(const struct dentry *dentry) } EXPORT_SYMBOL(always_delete_dentry); -const struct dentry_operations simple_dentry_operations = { - .d_delete = always_delete_dentry, -}; -EXPORT_SYMBOL(simple_dentry_operations); - /* * Lookup the data. This is trivial - if the dentry didn't already * exist, we know it is negative. Set d_op to delete negative dentries. @@ -75,9 +70,11 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned { if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); - if (!dentry->d_sb->s_d_op) - d_set_d_op(dentry, &simple_dentry_operations); - + if (!dentry->d_op && !(dentry->d_flags & DCACHE_DONTCACHE)) { + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_DONTCACHE; + spin_unlock(&dentry->d_lock); + } if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir)) return NULL; @@ -605,15 +602,16 @@ struct dentry *find_next_child(struct dentry *parent, struct dentry *prev) } EXPORT_SYMBOL(find_next_child); -void simple_recursive_removal(struct dentry *dentry, - void (*callback)(struct dentry *)) +static void __simple_recursive_removal(struct dentry *dentry, + void (*callback)(struct dentry *), + bool locked) { struct dentry *this = dget(dentry); while (true) { struct dentry *victim = NULL, *child; struct inode *inode = this->d_inode; - inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_CHILD); if (d_is_dir(this)) inode->i_flags |= S_DEAD; while ((child = find_next_child(this, victim)) == NULL) { @@ -625,15 +623,13 @@ void simple_recursive_removal(struct dentry *dentry, victim = this; this = this->d_parent; inode = this->d_inode; - inode_lock(inode); + if (!locked || victim != dentry) + inode_lock_nested(inode, I_MUTEX_CHILD); if (simple_positive(victim)) { d_invalidate(victim); // avoid lost mounts - if (d_is_dir(victim)) - fsnotify_rmdir(inode, victim); - else - fsnotify_unlink(inode, victim); if (callback) callback(victim); + fsnotify_delete(inode, d_inode(victim), victim); dput(victim); // unpin it } if (victim == dentry) { @@ -641,7 +637,8 @@ void simple_recursive_removal(struct dentry *dentry, inode_set_ctime_current(inode)); if (d_is_dir(dentry)) drop_nlink(inode); - inode_unlock(inode); + if (!locked) + inode_unlock(inode); dput(dentry); return; } @@ -650,8 +647,22 @@ void simple_recursive_removal(struct dentry *dentry, this = child; } } + +void simple_recursive_removal(struct dentry *dentry, + void (*callback)(struct dentry *)) +{ + return __simple_recursive_removal(dentry, callback, false); +} EXPORT_SYMBOL(simple_recursive_removal); +/* caller holds parent directory with I_MUTEX_PARENT */ +void locked_recursive_removal(struct dentry *dentry, + void (*callback)(struct dentry *)) +{ + return __simple_recursive_removal(dentry, callback, true); +} +EXPORT_SYMBOL(locked_recursive_removal); + static const struct super_operations simple_super_operations = { .statfs = simple_statfs, }; @@ -684,7 +695,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc) s->s_root = d_make_root(root); if (!s->s_root) return -ENOMEM; - s->s_d_op = ctx->dops; + set_default_d_op(s, ctx->dops); return 0; } @@ -910,7 +921,7 @@ static int simple_read_folio(struct file *file, struct folio *folio) return 0; } -int simple_write_begin(struct file *file, struct address_space *mapping, +int simple_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -935,7 +946,7 @@ EXPORT_SYMBOL(simple_write_begin); /** * simple_write_end - .write_end helper for non-block-device FSes - * @file: See .write_end of address_space_operations + * @iocb: kernel I/O control block * @mapping: " * @pos: " * @len: " @@ -946,7 +957,8 @@ EXPORT_SYMBOL(simple_write_begin); * simple_write_end does the minimum needed for updating a folio after * writing is done. It has the same API signature as the .write_end of * address_space_operations vector. So it can just be set onto .write_end for - * FSes that don't need any other processing. i_mutex is assumed to be held. + * FSes that don't need any other processing. i_rwsem is assumed to be held + * exclusively. * Block based filesystems should use generic_write_end(). * NOTE: Even though i_size might get updated by this function, mark_inode_dirty * is not called, so a filesystem that actually does store data in .write_inode @@ -955,9 +967,10 @@ EXPORT_SYMBOL(simple_write_begin); * * Use *ONLY* with simple_read_folio() */ -static int simple_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int simple_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = folio->mapping->host; loff_t last_pos = pos + copied; @@ -973,7 +986,7 @@ static int simple_write_end(struct file *file, struct address_space *mapping, } /* * No need to use i_size_read() here, the i_size - * cannot change under us because we hold the i_mutex. + * cannot change under us because we hold the i_rwsem. */ if (last_pos > inode->i_size) i_size_write(inode, last_pos); @@ -1583,13 +1596,17 @@ EXPORT_SYMBOL(generic_file_fsync); int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) { u64 last_fs_block = num_blocks - 1; - u64 last_fs_page = - last_fs_block >> (PAGE_SHIFT - blocksize_bits); + u64 last_fs_page, max_bytes; + + if (check_shl_overflow(num_blocks, blocksize_bits, &max_bytes)) + return -EFBIG; + + last_fs_page = (max_bytes >> PAGE_SHIFT) - 1; if (unlikely(num_blocks == 0)) return 0; - if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT)) + if (blocksize_bits < 9) return -EINVAL; if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || @@ -1948,22 +1965,22 @@ static const struct dentry_operations generic_encrypted_dentry_ops = { * @sb: superblock to be configured * * Filesystems supporting casefolding and/or fscrypt can call this - * helper at mount-time to configure sb->s_d_op to best set of dentry - * operations required for the enabled features. The helper must be - * called after these have been configured, but before the root dentry - * is created. + * helper at mount-time to configure default dentry_operations to the + * best set of dentry operations required for the enabled features. + * The helper must be called after these have been configured, but + * before the root dentry is created. */ void generic_set_sb_d_ops(struct super_block *sb) { #if IS_ENABLED(CONFIG_UNICODE) if (sb->s_encoding) { - sb->s_d_op = &generic_ci_dentry_ops; + set_default_d_op(sb, &generic_ci_dentry_ops); return; } #endif #ifdef CONFIG_FS_ENCRYPTION if (sb->s_cop) { - sb->s_d_op = &generic_encrypted_dentry_ops; + set_default_d_op(sb, &generic_encrypted_dentry_ops); return; } #endif @@ -2126,6 +2143,8 @@ struct dentry *stashed_dentry_get(struct dentry **stashed) dentry = rcu_dereference(*stashed); if (!dentry) return NULL; + if (IS_ERR(dentry)) + return dentry; if (!lockref_get_not_dead(&dentry->d_lockref)) return NULL; return dentry; @@ -2158,7 +2177,6 @@ static struct dentry *prepare_anon_dentry(struct dentry **stashed, /* Notice when this is changed. */ WARN_ON_ONCE(!S_ISREG(inode->i_mode)); - WARN_ON_ONCE(!IS_IMMUTABLE(inode)); dentry = d_alloc_anon(sb); if (!dentry) { @@ -2174,8 +2192,7 @@ static struct dentry *prepare_anon_dentry(struct dentry **stashed, return dentry; } -static struct dentry *stash_dentry(struct dentry **stashed, - struct dentry *dentry) +struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry) { guard(rcu)(); for (;;) { @@ -2216,14 +2233,16 @@ static struct dentry *stash_dentry(struct dentry **stashed, int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data, struct path *path) { - struct dentry *dentry; + struct dentry *dentry, *res; const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info; /* See if dentry can be reused. */ - path->dentry = stashed_dentry_get(stashed); - if (path->dentry) { + res = stashed_dentry_get(stashed); + if (IS_ERR(res)) + return PTR_ERR(res); + if (res) { sops->put_data(data); - goto out_path; + goto make_path; } /* Allocate a new dentry. */ @@ -2232,14 +2251,22 @@ int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data, return PTR_ERR(dentry); /* Added a new dentry. @data is now owned by the filesystem. */ - path->dentry = stash_dentry(stashed, dentry); - if (path->dentry != dentry) + if (sops->stash_dentry) + res = sops->stash_dentry(stashed, dentry); + else + res = stash_dentry(stashed, dentry); + if (IS_ERR(res)) { + dput(dentry); + return PTR_ERR(res); + } + if (res != dentry) dput(dentry); -out_path: - WARN_ON_ONCE(path->dentry->d_fsdata != stashed); - WARN_ON_ONCE(d_inode(path->dentry)->i_private != data); +make_path: + path->dentry = res; path->mnt = mntget(mnt); + VFS_WARN_ON_ONCE(path->dentry->d_fsdata != stashed); + VFS_WARN_ON_ONCE(d_inode(path->dentry)->i_private != data); return 0; } @@ -2261,3 +2288,28 @@ void stashed_dentry_prune(struct dentry *dentry) */ cmpxchg(stashed, dentry, NULL); } + +/* parent must be held exclusive */ +struct dentry *simple_start_creating(struct dentry *parent, const char *name) +{ + struct dentry *dentry; + struct inode *dir = d_inode(parent); + + inode_lock(dir); + if (unlikely(IS_DEADDIR(dir))) { + inode_unlock(dir); + return ERR_PTR(-ENOENT); + } + dentry = lookup_noperm(&QSTR(name), parent); + if (IS_ERR(dentry)) { + inode_unlock(dir); + return dentry; + } + if (dentry->d_inode) { + dput(dentry); + inode_unlock(dir); + return ERR_PTR(-EEXIST); + } + return dentry; +} +EXPORT_SYMBOL(simple_start_creating); diff --git a/fs/locks.c b/fs/locks.c index 1619cddfa7a4..559f02aa4172 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -712,7 +712,7 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker) fl->fl_lmops && fl->fl_lmops->lm_notify) fl->fl_lmops->lm_notify(fl); else - locks_wake_up(fl); + locks_wake_up_waiter(waiter); /* * The setting of flc_blocker to NULL marks the "done" @@ -1794,7 +1794,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr /* * In the delegation case we need mutual exclusion with - * a number of operations that take the i_mutex. We trylock + * a number of operations that take the i_rwsem. We trylock * because delegations are an optional optimization, and if * there's some chance of a conflict--we'd rather not * bother, maybe that's a sign this just isn't a good file to diff --git a/fs/minix/dir.c b/fs/minix/dir.c index dd2a425b41f0..19052fc47e9e 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -45,7 +45,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len) struct address_space *mapping = folio->mapping; struct inode *dir = mapping->host; - block_write_end(NULL, mapping, pos, len, len, folio, NULL); + block_write_end(pos, len, len, folio); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); diff --git a/fs/minix/file.c b/fs/minix/file.c index 906d192ab7f3..dca7ac71f049 100644 --- a/fs/minix/file.c +++ b/fs/minix/file.c @@ -17,7 +17,7 @@ const struct file_operations minix_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .fsync = generic_file_fsync, .splice_read = filemap_splice_read, }; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index f007e389d5d2..df9d11479caf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -442,9 +442,10 @@ static void minix_write_failed(struct address_space *mapping, loff_t to) } } -static int minix_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int minix_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; diff --git a/fs/mount.h b/fs/mount.h index ad7173037924..97737051a8b9 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -44,7 +44,6 @@ struct mountpoint { struct hlist_node m_hash; struct dentry *m_dentry; struct hlist_head m_list; - int m_count; }; struct mount { @@ -70,8 +69,8 @@ struct mount { struct list_head mnt_list; struct list_head mnt_expire; /* link in fs-specific expiry list */ struct list_head mnt_share; /* circular list of shared mounts */ - struct list_head mnt_slave_list;/* list of slave mounts */ - struct list_head mnt_slave; /* slave list entry */ + struct hlist_head mnt_slave_list;/* list of slave mounts */ + struct hlist_node mnt_slave; /* slave list entry */ struct mount *mnt_master; /* slave is on master->mnt_slave_list */ struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ @@ -79,21 +78,38 @@ struct mount { struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ struct hlist_node mnt_umount; }; - struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; struct list_head to_notify; /* need to queue notification */ struct mnt_namespace *prev_ns; /* previous namespace (NULL if none) */ #endif + int mnt_t_flags; /* namespace_sem-protected flags */ int mnt_id; /* mount identifier, reused */ u64 mnt_id_unique; /* mount ID unique until reboot */ int mnt_group_id; /* peer group identifier */ int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; + struct mount *overmount; /* mounted on ->mnt_root */ } __randomize_layout; +enum { + T_SHARED = 1, /* mount is shared */ + T_UNBINDABLE = 2, /* mount is unbindable */ + T_MARKED = 4, /* internal mark for propagate_... */ + T_UMOUNT_CANDIDATE = 8, /* for propagate_umount */ + + /* + * T_SHARED_MASK is the set of flags that should be cleared when a + * mount becomes shared. Currently, this is only the flag that says a + * mount cannot be bind mounted, since this is how we create a mount + * that shares events with another mount. If you add a new T_* + * flag, consider how it interacts with shared mounts. + */ + T_SHARED_MASK = T_UNBINDABLE, +}; + #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ static inline struct mount *real_mount(struct vfsmount *mnt) @@ -101,7 +117,7 @@ static inline struct mount *real_mount(struct vfsmount *mnt) return container_of(mnt, struct mount, mnt); } -static inline int mnt_has_parent(struct mount *mnt) +static inline int mnt_has_parent(const struct mount *mnt) { return mnt != mnt->mnt_parent; } @@ -146,8 +162,8 @@ struct proc_mounts { extern const struct seq_operations mounts_op; -extern bool __is_local_mountpoint(struct dentry *dentry); -static inline bool is_local_mountpoint(struct dentry *dentry) +extern bool __is_local_mountpoint(const struct dentry *dentry); +static inline bool is_local_mountpoint(const struct dentry *dentry) { if (!d_mountpoint(dentry)) return false; @@ -160,6 +176,13 @@ static inline bool is_anon_ns(struct mnt_namespace *ns) return ns->seq == 0; } +static inline bool anon_ns_root(const struct mount *m) +{ + struct mnt_namespace *ns = READ_ONCE(m->mnt_ns); + + return !IS_ERR_OR_NULL(ns) && is_anon_ns(ns) && m == ns->root; +} + static inline bool mnt_ns_attached(const struct mount *mnt) { return !RB_EMPTY_NODE(&mnt->mnt_node); @@ -170,7 +193,7 @@ static inline bool mnt_ns_empty(const struct mnt_namespace *ns) return RB_EMPTY_ROOT(&ns->mounts); } -static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list) +static inline void move_from_ns(struct mount *mnt) { struct mnt_namespace *ns = mnt->mnt_ns; WARN_ON(!mnt_ns_attached(mnt)); @@ -180,7 +203,6 @@ static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list) ns->mnt_first_node = rb_next(&mnt->mnt_node); rb_erase(&mnt->mnt_node, &ns->mounts); RB_CLEAR_NODE(&mnt->mnt_node); - list_add_tail(&mnt->mnt_list, dt_list); } bool has_locked_children(struct mount *mnt, struct dentry *dentry); diff --git a/fs/namei.c b/fs/namei.c index c26a7ee42184..cd43ff89fbaa 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1012,10 +1012,10 @@ static int set_root(struct nameidata *nd) unsigned seq; do { - seq = read_seqcount_begin(&fs->seq); + seq = read_seqbegin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); - } while (read_seqcount_retry(&fs->seq, seq)); + } while (read_seqretry(&fs->seq, seq)); } else { get_fs_root(fs, &nd->root); nd->state |= ND_ROOT_GRABBED; @@ -1469,7 +1469,7 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped, int ret = 0; while (flags & DCACHE_MANAGED_DENTRY) { - /* Allow the filesystem to manage the transit without i_mutex + /* Allow the filesystem to manage the transit without i_rwsem * being held. */ if (flags & DCACHE_MANAGE_TRANSIT) { ret = path->dentry->d_op->d_manage(path, false); @@ -1665,9 +1665,17 @@ static struct dentry *lookup_dcache(const struct qstr *name, return dentry; } -static struct dentry *lookup_one_qstr_excl_raw(const struct qstr *name, - struct dentry *base, - unsigned int flags) +/* + * Parent directory has inode locked exclusive. This is one + * and only case when ->lookup() gets called on non in-lookup + * dentries - as the matter of fact, this only gets called + * when directory is guaranteed to have no in-lookup children + * at all. + * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed. + * Will return -EEXIST if name is found and LOOKUP_EXCL was passed. + */ +struct dentry *lookup_one_qstr_excl(const struct qstr *name, + struct dentry *base, unsigned int flags) { struct dentry *dentry; struct dentry *old; @@ -1675,7 +1683,7 @@ static struct dentry *lookup_one_qstr_excl_raw(const struct qstr *name, dentry = lookup_dcache(name, base, flags); if (dentry) - return dentry; + goto found; /* Don't create child dentry for a dead directory. */ dir = base->d_inode; @@ -1691,24 +1699,7 @@ static struct dentry *lookup_one_qstr_excl_raw(const struct qstr *name, dput(dentry); dentry = old; } - return dentry; -} - -/* - * Parent directory has inode locked exclusive. This is one - * and only case when ->lookup() gets called on non in-lookup - * dentries - as the matter of fact, this only gets called - * when directory is guaranteed to have no in-lookup children - * at all. - * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed. - * Will return -EEXIST if name is found and LOOKUP_EXCL was passed. - */ -struct dentry *lookup_one_qstr_excl(const struct qstr *name, - struct dentry *base, unsigned int flags) -{ - struct dentry *dentry; - - dentry = lookup_one_qstr_excl_raw(name, base, flags); +found: if (IS_ERR(dentry)) return dentry; if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) { @@ -2580,11 +2571,11 @@ static const char *path_init(struct nameidata *nd, unsigned flags) unsigned seq; do { - seq = read_seqcount_begin(&fs->seq); + seq = read_seqbegin(&fs->seq); nd->path = fs->pwd; nd->inode = nd->path.dentry->d_inode; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); - } while (read_seqcount_retry(&fs->seq, seq)); + } while (read_seqretry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); nd->inode = nd->path.dentry->d_inode; @@ -2790,7 +2781,7 @@ struct dentry *kern_path_locked_negative(const char *name, struct path *path) if (unlikely(type != LAST_NORM)) return ERR_PTR(-EINVAL); inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT); - d = lookup_one_qstr_excl_raw(&last, parent_path.dentry, 0); + d = lookup_one_qstr_excl(&last, parent_path.dentry, LOOKUP_CREATE); if (IS_ERR(d)) { inode_unlock(parent_path.dentry->d_inode); return d; @@ -2946,7 +2937,7 @@ EXPORT_SYMBOL(try_lookup_noperm); * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. It does no permission checking. * - * The caller must hold base->i_mutex. + * The caller must hold base->i_rwsem. */ struct dentry *lookup_noperm(struct qstr *name, struct dentry *base) { @@ -2972,7 +2963,7 @@ EXPORT_SYMBOL(lookup_noperm); * * This can be used for in-kernel filesystem clients such as file servers. * - * The caller must hold base->i_mutex. + * The caller must hold base->i_rwsem. */ struct dentry *lookup_one(struct mnt_idmap *idmap, struct qstr *name, struct dentry *base) @@ -4551,13 +4542,13 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname) * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * - * The caller must hold dir->i_mutex. + * The caller must hold dir->i_rwsem exclusively. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop - * dir->i_mutex before doing so. + * dir->i_rwsem before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not @@ -4616,7 +4607,7 @@ EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its - * directory's i_mutex. Truncate can take a long time if there is a lot of + * directory's i_rwsem. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ @@ -4794,13 +4785,13 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * - * The caller must hold dir->i_mutex + * The caller must hold dir->i_rwsem exclusively. * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the - * caller should drop the i_mutex before doing so. + * caller should drop the i_rwsem before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not @@ -4996,7 +4987,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname * c) we may have to lock up to _four_ objects - parents and victim (if it exists), * and source (if it's a non-directory or a subdirectory that moves to * different parent). - * And that - after we got ->i_mutex on parents (until then we don't know + * And that - after we got ->i_rwsem on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we @@ -5008,15 +4999,16 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when - * we are removing the target. Solution: we will have to grab ->i_mutex + * we are removing the target. Solution: we will have to grab ->i_rwsem * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on - * ->i_mutex on parents, which works but leads to some truly excessive + * ->i_rwsem on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct renamedata *rd) { int error; - struct inode *old_dir = rd->old_dir, *new_dir = rd->new_dir; + struct inode *old_dir = d_inode(rd->old_parent); + struct inode *new_dir = d_inode(rd->new_parent); struct dentry *old_dentry = rd->old_dentry; struct dentry *new_dentry = rd->new_dentry; struct inode **delegated_inode = rd->delegated_inode; @@ -5275,10 +5267,10 @@ retry_deleg: if (error) goto exit5; - rd.old_dir = old_path.dentry->d_inode; + rd.old_parent = old_path.dentry; rd.old_dentry = old_dentry; rd.old_mnt_idmap = mnt_idmap(old_path.mnt); - rd.new_dir = new_path.dentry->d_inode; + rd.new_parent = new_path.dentry; rd.new_dentry = new_dentry; rd.new_mnt_idmap = mnt_idmap(new_path.mnt); rd.delegated_inode = &delegated_inode; diff --git a/fs/namespace.c b/fs/namespace.c index 54c59e091919..ddfd4457d338 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -79,6 +79,7 @@ static struct kmem_cache *mnt_cache __ro_after_init; static DECLARE_RWSEM(namespace_sem); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ +static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */ static DEFINE_SEQLOCK(mnt_ns_tree_lock); #ifdef CONFIG_FSNOTIFY @@ -380,10 +381,9 @@ static struct mount *alloc_vfsmnt(const char *name) INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); - INIT_LIST_HEAD(&mnt->mnt_slave_list); - INIT_LIST_HEAD(&mnt->mnt_slave); + INIT_HLIST_HEAD(&mnt->mnt_slave_list); + INIT_HLIST_NODE(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); - INIT_LIST_HEAD(&mnt->mnt_umounting); INIT_HLIST_HEAD(&mnt->mnt_stuck_children); RB_CLEAR_NODE(&mnt->mnt_node); mnt->mnt.mnt_idmap = &nop_mnt_idmap; @@ -894,7 +894,7 @@ struct vfsmount *lookup_mnt(const struct path *path) * namespace not just a mount that happens to have some specified * parent mount. */ -bool __is_local_mountpoint(struct dentry *dentry) +bool __is_local_mountpoint(const struct dentry *dentry) { struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt, *n; @@ -911,42 +911,48 @@ bool __is_local_mountpoint(struct dentry *dentry) return is_covered; } -static struct mountpoint *lookup_mountpoint(struct dentry *dentry) +struct pinned_mountpoint { + struct hlist_node node; + struct mountpoint *mp; +}; + +static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; hlist_for_each_entry(mp, chain, m_hash) { if (mp->m_dentry == dentry) { - mp->m_count++; - return mp; + hlist_add_head(&m->node, &mp->m_list); + m->mp = mp; + return true; } } - return NULL; + return false; } -static struct mountpoint *get_mountpoint(struct dentry *dentry) +static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) { - struct mountpoint *mp, *new = NULL; + struct mountpoint *mp __free(kfree) = NULL; + bool found; int ret; if (d_mountpoint(dentry)) { /* might be worth a WARN_ON() */ if (d_unlinked(dentry)) - return ERR_PTR(-ENOENT); + return -ENOENT; mountpoint: read_seqlock_excl(&mount_lock); - mp = lookup_mountpoint(dentry); + found = lookup_mountpoint(dentry, m); read_sequnlock_excl(&mount_lock); - if (mp) - goto done; + if (found) + return 0; } - if (!new) - new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); - if (!new) - return ERR_PTR(-ENOMEM); - + if (!mp) + mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); + if (!mp) + return -ENOMEM; /* Exactly one processes may set d_mounted */ ret = d_set_mounted(dentry); @@ -956,34 +962,28 @@ mountpoint: goto mountpoint; /* The dentry is not available as a mountpoint? */ - mp = ERR_PTR(ret); if (ret) - goto done; + return ret; /* Add the new mountpoint to the hash table */ read_seqlock_excl(&mount_lock); - new->m_dentry = dget(dentry); - new->m_count = 1; - hlist_add_head(&new->m_hash, mp_hash(dentry)); - INIT_HLIST_HEAD(&new->m_list); + mp->m_dentry = dget(dentry); + hlist_add_head(&mp->m_hash, mp_hash(dentry)); + INIT_HLIST_HEAD(&mp->m_list); + hlist_add_head(&m->node, &mp->m_list); + m->mp = no_free_ptr(mp); read_sequnlock_excl(&mount_lock); - - mp = new; - new = NULL; -done: - kfree(new); - return mp; + return 0; } /* * vfsmount lock must be held. Additionally, the caller is responsible * for serializing calls for given disposal list. */ -static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) +static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list) { - if (!--mp->m_count) { + if (hlist_empty(&mp->m_list)) { struct dentry *dentry = mp->m_dentry; - BUG_ON(!hlist_empty(&mp->m_list)); spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); @@ -993,10 +993,15 @@ static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) } } -/* called with namespace_lock and vfsmount lock */ -static void put_mountpoint(struct mountpoint *mp) +/* + * locks: mount_lock [read_seqlock_excl], namespace_sem [excl] + */ +static void unpin_mountpoint(struct pinned_mountpoint *m) { - __put_mountpoint(mp, &ex_mountpoints); + if (m->mp) { + hlist_del(&m->node); + maybe_free_mountpoint(m->mp, &ex_mountpoints); + } } static inline int check_mnt(struct mount *mnt) @@ -1038,11 +1043,14 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) } /* - * vfsmount lock must be held for write + * locks: mount_lock[write_seqlock] */ -static struct mountpoint *unhash_mnt(struct mount *mnt) +static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list) { struct mountpoint *mp; + struct mount *parent = mnt->mnt_parent; + if (unlikely(parent->overmount == mnt)) + parent->overmount = NULL; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); @@ -1050,15 +1058,15 @@ static struct mountpoint *unhash_mnt(struct mount *mnt) hlist_del_init(&mnt->mnt_mp_list); mp = mnt->mnt_mp; mnt->mnt_mp = NULL; - return mp; + maybe_free_mountpoint(mp, shrink_list); } /* - * vfsmount lock must be held for write + * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints) */ static void umount_mnt(struct mount *mnt) { - put_mountpoint(unhash_mnt(mnt)); + __umount_mnt(mnt, &ex_mountpoints); } /* @@ -1068,43 +1076,17 @@ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) { - mp->m_count++; - mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = mp->m_dentry; child_mnt->mnt_parent = mnt; child_mnt->mnt_mp = mp; hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); } -/** - * mnt_set_mountpoint_beneath - mount a mount beneath another one - * - * @new_parent: the source mount - * @top_mnt: the mount beneath which @new_parent is mounted - * @new_mp: the new mountpoint of @top_mnt on @new_parent - * - * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and - * parent @top_mnt->mnt_parent and mount it on top of @new_parent at - * @new_mp. And mount @new_parent on the old parent and old - * mountpoint of @top_mnt. - * - * Context: This function expects namespace_lock() and lock_mount_hash() - * to have been acquired in that order. - */ -static void mnt_set_mountpoint_beneath(struct mount *new_parent, - struct mount *top_mnt, - struct mountpoint *new_mp) -{ - struct mount *old_top_parent = top_mnt->mnt_parent; - struct mountpoint *old_top_mp = top_mnt->mnt_mp; - - mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent); - mnt_change_mountpoint(new_parent, new_mp, top_mnt); -} - - -static void __attach_mnt(struct mount *mnt, struct mount *parent) +static void make_visible(struct mount *mnt) { + struct mount *parent = mnt->mnt_parent; + if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root)) + parent->overmount = mnt; hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); @@ -1116,51 +1098,34 @@ static void __attach_mnt(struct mount *mnt, struct mount *parent) * @parent: the parent * @mnt: the new mount * @mp: the new mountpoint - * @beneath: whether to mount @mnt beneath or on top of @parent * - * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt + * Mount @mnt at @mp on @parent. Then attach @mnt * to @parent's child mount list and to @mount_hashtable. * - * If @beneath is true, remove @mnt from its current parent and - * mountpoint and mount it on @mp on @parent, and mount @parent on the - * old parent and old mountpoint of @mnt. Finally, attach @parent to - * @mnt_hashtable and @parent->mnt_parent->mnt_mounts. - * - * Note, when __attach_mnt() is called @mnt->mnt_parent already points + * Note, when make_visible() is called @mnt->mnt_parent already points * to the correct parent. * * Context: This function expects namespace_lock() and lock_mount_hash() * to have been acquired in that order. */ static void attach_mnt(struct mount *mnt, struct mount *parent, - struct mountpoint *mp, bool beneath) + struct mountpoint *mp) { - if (beneath) - mnt_set_mountpoint_beneath(mnt, parent, mp); - else - mnt_set_mountpoint(parent, mp, mnt); - /* - * Note, @mnt->mnt_parent has to be used. If @mnt was mounted - * beneath @parent then @mnt will need to be attached to - * @parent's old parent, not @parent. IOW, @mnt->mnt_parent - * isn't the same mount as @parent. - */ - __attach_mnt(mnt, mnt->mnt_parent); + mnt_set_mountpoint(parent, mp, mnt); + make_visible(mnt); } void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) { struct mountpoint *old_mp = mnt->mnt_mp; - struct mount *old_parent = mnt->mnt_parent; list_del_init(&mnt->mnt_child); hlist_del_init(&mnt->mnt_mp_list); hlist_del_init_rcu(&mnt->mnt_hash); - attach_mnt(mnt, parent, mp, false); + attach_mnt(mnt, parent, mp); - put_mountpoint(old_mp); - mnt_add_count(old_parent, -1); + maybe_free_mountpoint(old_mp, &ex_mountpoints); } static inline struct mount *node_to_mount(struct rb_node *node) @@ -1197,32 +1162,6 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt) mnt_notify_add(mnt); } -/* - * vfsmount lock must be held for write - */ -static void commit_tree(struct mount *mnt) -{ - struct mount *parent = mnt->mnt_parent; - struct mount *m; - LIST_HEAD(head); - struct mnt_namespace *n = parent->mnt_ns; - - BUG_ON(parent == mnt); - - list_add_tail(&head, &mnt->mnt_list); - while (!list_empty(&head)) { - m = list_first_entry(&head, typeof(*m), mnt_list); - list_del(&m->mnt_list); - - mnt_add_to_ns(n, m); - } - n->nr_mounts += n->pending_mounts; - n->pending_mounts = 0; - - __attach_mnt(mnt, parent); - touch_mnt_namespace(n); -} - static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; @@ -1249,6 +1188,27 @@ static struct mount *skip_mnt_tree(struct mount *p) return p; } +/* + * vfsmount lock must be held for write + */ +static void commit_tree(struct mount *mnt) +{ + struct mnt_namespace *n = mnt->mnt_parent->mnt_ns; + + if (!mnt_ns_attached(mnt)) { + for (struct mount *m = mnt; m; m = next_mnt(m, mnt)) + if (unlikely(mnt_ns_attached(m))) + m = skip_mnt_tree(m); + else + mnt_add_to_ns(n, m); + n->nr_mounts += n->pending_mounts; + n->pending_mounts = 0; + } + + make_visible(mnt); + touch_mnt_namespace(n); +} + /** * vfs_create_mount - Create a mount for a configured superblock * @fc: The configuration context with the superblock attached @@ -1296,6 +1256,15 @@ struct vfsmount *fc_mount(struct fs_context *fc) } EXPORT_SYMBOL(fc_mount); +struct vfsmount *fc_mount_longterm(struct fs_context *fc) +{ + struct vfsmount *mnt = fc_mount(fc); + if (!IS_ERR(mnt)) + real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; + return mnt; +} +EXPORT_SYMBOL(fc_mount_longterm); + struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) @@ -1337,7 +1306,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, if (!mnt) return ERR_PTR(-ENOMEM); - if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) + mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) & + ~MNT_INTERNAL_FLAGS; + + if (flag & (CL_SLAVE | CL_PRIVATE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; @@ -1348,8 +1320,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, goto out_free; } - mnt->mnt.mnt_flags = old->mnt.mnt_flags; - mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); + if (mnt->mnt_group_id) + set_mnt_shared(mnt); atomic_inc(&sb->s_active); mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); @@ -1362,30 +1334,19 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, list_add_tail(&mnt->mnt_instance, &sb->s_mounts); unlock_mount_hash(); - if ((flag & CL_SLAVE) || - ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { - list_add(&mnt->mnt_slave, &old->mnt_slave_list); + if (flag & CL_PRIVATE) // we are done with it + return mnt; + + if (peers(mnt, old)) + list_add(&mnt->mnt_share, &old->mnt_share); + + if ((flag & CL_SLAVE) && old->mnt_group_id) { + hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; - CLEAR_MNT_SHARED(mnt); - } else if (!(flag & CL_PRIVATE)) { - if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) - list_add(&mnt->mnt_share, &old->mnt_share); - if (IS_MNT_SLAVE(old)) - list_add(&mnt->mnt_slave, &old->mnt_slave); + } else if (IS_MNT_SLAVE(old)) { + hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; - } else { - CLEAR_MNT_SHARED(mnt); - } - if (flag & CL_MAKE_SHARED) - set_mnt_shared(mnt); - - /* stick the duplicate mount on the same expiry list - * as the original if that was on one */ - if (flag & CL_EXPIRE) { - if (!list_empty(&old->mnt_expire)) - list_add(&mnt->mnt_expire, &old->mnt_expire); } - return mnt; out_free: @@ -1478,11 +1439,13 @@ static void mntput_no_expire(struct mount *mnt) rcu_read_unlock(); list_del(&mnt->mnt_instance); + if (unlikely(!list_empty(&mnt->mnt_expire))) + list_del(&mnt->mnt_expire); if (unlikely(!list_empty(&mnt->mnt_mounts))) { struct mount *p, *tmp; list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { - __put_mountpoint(unhash_mnt(p), &list); + __umount_mnt(p, &list); hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); } } @@ -1679,23 +1642,19 @@ const struct seq_operations mounts_op = { int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); - int actual_refs = 0; - int minimum_refs = 0; - struct mount *p; - BUG_ON(!m); + bool busy = false; /* write lock needed for mnt_get_count */ lock_mount_hash(); - for (p = mnt; p; p = next_mnt(p, mnt)) { - actual_refs += mnt_get_count(p); - minimum_refs += 2; + for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) { + if (mnt_get_count(p) > (p == mnt ? 2 : 1)) { + busy = true; + break; + } } unlock_mount_hash(); - if (actual_refs > minimum_refs) - return 0; - - return 1; + return !busy; } EXPORT_SYMBOL(may_umount_tree); @@ -1771,15 +1730,18 @@ static bool need_notify_mnt_list(void) } #endif +static void free_mnt_ns(struct mnt_namespace *); static void namespace_unlock(void) { struct hlist_head head; struct hlist_node *p; struct mount *m; + struct mnt_namespace *ns = emptied_ns; LIST_HEAD(list); hlist_move_list(&unmounted, &head); list_splice_init(&ex_mountpoints, &list); + emptied_ns = NULL; if (need_notify_mnt_list()) { /* @@ -1793,6 +1755,11 @@ static void namespace_unlock(void) } else { up_write(&namespace_sem); } + if (unlikely(ns)) { + /* Make sure we notice when we leak mounts. */ + VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); + free_mnt_ns(ns); + } shrink_dentry_list(&list); @@ -1865,9 +1832,8 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) for (p = mnt; p; p = next_mnt(p, mnt)) { p->mnt.mnt_flags |= MNT_UMOUNT; if (mnt_ns_attached(p)) - move_from_ns(p, &tmp_list); - else - list_move(&p->mnt_list, &tmp_list); + move_from_ns(p); + list_add_tail(&p->mnt_list, &tmp_list); } /* Hide the mounts from mnt_mounts */ @@ -1896,7 +1862,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) disconnect = disconnect_mount(p, how); if (mnt_has_parent(p)) { - mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); @@ -1973,7 +1938,7 @@ static int do_umount(struct mount *mnt, int flags) * all race cases, but it's a slowpath. */ lock_mount_hash(); - if (mnt_get_count(mnt) != 2) { + if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) { unlock_mount_hash(); return -EBUSY; } @@ -2019,23 +1984,27 @@ static int do_umount(struct mount *mnt, int flags) namespace_lock(); lock_mount_hash(); - /* Recheck MNT_LOCKED with the locks held */ + /* Repeat the earlier racy checks, now that we are holding the locks */ retval = -EINVAL; + if (!check_mnt(mnt)) + goto out; + if (mnt->mnt.mnt_flags & MNT_LOCKED) goto out; + if (!mnt_has_parent(mnt)) /* not the absolute root */ + goto out; + event++; if (flags & MNT_DETACH) { - if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list)) - umount_tree(mnt, UMOUNT_PROPAGATE); + umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { smp_mb(); // paired with __legitimize_mnt() shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { - if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list)) - umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); retval = 0; } } @@ -2053,29 +2022,28 @@ out: * detach_mounts allows lazily unmounting those mounts instead of * leaking them. * - * The caller may hold dentry->d_inode->i_mutex. + * The caller may hold dentry->d_inode->i_rwsem. */ void __detach_mounts(struct dentry *dentry) { - struct mountpoint *mp; + struct pinned_mountpoint mp = {}; struct mount *mnt; namespace_lock(); lock_mount_hash(); - mp = lookup_mountpoint(dentry); - if (!mp) + if (!lookup_mountpoint(dentry, &mp)) goto out_unlock; event++; - while (!hlist_empty(&mp->m_list)) { - mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); + while (mp.node.next) { + mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { umount_mnt(mnt); hlist_add_head(&mnt->mnt_umount, &unmounted); } else umount_tree(mnt, UMOUNT_CONNECTED); } - put_mountpoint(mp); + unpin_mountpoint(&mp); out_unlock: unlock_mount_hash(); namespace_unlock(); @@ -2259,7 +2227,6 @@ struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, return dst_mnt; src_parent = src_root; - dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint; list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) @@ -2294,8 +2261,16 @@ struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, if (IS_ERR(dst_mnt)) goto out; lock_mount_hash(); - list_add_tail(&dst_mnt->mnt_list, &res->mnt_list); - attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false); + if (src_mnt->mnt.mnt_flags & MNT_LOCKED) + dst_mnt->mnt.mnt_flags |= MNT_LOCKED; + if (unlikely(flag & CL_EXPIRE)) { + /* stick the duplicate mount on the same expiry + * list as the original if that was on one */ + if (!list_empty(&src_mnt->mnt_expire)) + list_add(&dst_mnt->mnt_expire, + &src_mnt->mnt_expire); + } + attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp); unlock_mount_hash(); } } @@ -2368,78 +2343,36 @@ void drop_collected_paths(struct path *paths, struct path *prealloc) kfree(paths); } -static void free_mnt_ns(struct mnt_namespace *); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); -static inline bool must_dissolve(struct mnt_namespace *mnt_ns) -{ - /* - * This mount belonged to an anonymous mount namespace - * but was moved to a non-anonymous mount namespace and - * then unmounted. - */ - if (unlikely(!mnt_ns)) - return false; - - /* - * This mount belongs to a non-anonymous mount namespace - * and we know that such a mount can never transition to - * an anonymous mount namespace again. - */ - if (!is_anon_ns(mnt_ns)) { - /* - * A detached mount either belongs to an anonymous mount - * namespace or a non-anonymous mount namespace. It - * should never belong to something purely internal. - */ - VFS_WARN_ON_ONCE(mnt_ns == MNT_NS_INTERNAL); - return false; - } - - return true; -} - void dissolve_on_fput(struct vfsmount *mnt) { - struct mnt_namespace *ns; struct mount *m = real_mount(mnt); + /* + * m used to be the root of anon namespace; if it still is one, + * we need to dissolve the mount tree and free that namespace. + * Let's try to avoid taking namespace_sem if we can determine + * that there's nothing to do without it - rcu_read_lock() is + * enough to make anon_ns_root() memory-safe and once m has + * left its namespace, it's no longer our concern, since it will + * never become a root of anon ns again. + */ + scoped_guard(rcu) { - if (!must_dissolve(READ_ONCE(m->mnt_ns))) + if (!anon_ns_root(m)) return; } scoped_guard(namespace_lock, &namespace_sem) { - ns = m->mnt_ns; - if (!must_dissolve(ns)) - return; - - /* - * After must_dissolve() we know that this is a detached - * mount in an anonymous mount namespace. - * - * Now when mnt_has_parent() reports that this mount - * tree has a parent, we know that this anonymous mount - * tree has been moved to another anonymous mount - * namespace. - * - * So when closing this file we cannot unmount the mount - * tree. This will be done when the file referring to - * the root of the anonymous mount namespace will be - * closed (It could already be closed but it would sync - * on @namespace_sem and wait for us to finish.). - */ - if (mnt_has_parent(m)) + if (!anon_ns_root(m)) return; + emptied_ns = m->mnt_ns; lock_mount_hash(); umount_tree(m, UMOUNT_CONNECTED); unlock_mount_hash(); } - - /* Make sure we notice when we leak mounts. */ - VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); - free_mnt_ns(ns); } static bool __has_locked_children(struct mount *mnt, struct dentry *dentry) @@ -2518,9 +2451,7 @@ struct vfsmount *clone_private_mount(const struct path *path) * loops get created. */ if (!check_mnt(old_mnt)) { - if (!is_mounted(&old_mnt->mnt) || - !is_anon_ns(old_mnt->mnt_ns) || - mnt_has_parent(old_mnt)) + if (!anon_ns_root(old_mnt)) return ERR_PTR(-EINVAL); if (!check_for_nsfs_mounts(old_mnt)) @@ -2564,7 +2495,7 @@ static void lock_mnt_tree(struct mount *mnt) if (flags & MNT_NOEXEC) flags |= MNT_LOCK_NOEXEC; /* Don't allow unprivileged users to reveal what is under a mount */ - if (list_empty(&p->mnt_expire)) + if (list_empty(&p->mnt_expire) && p != mnt) flags |= MNT_LOCKED; p->mnt.mnt_flags = flags; } @@ -2585,7 +2516,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse) struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { - if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { + if (!p->mnt_group_id) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); @@ -2621,17 +2552,15 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt) } enum mnt_tree_flags_t { - MNT_TREE_MOVE = BIT(0), - MNT_TREE_BENEATH = BIT(1), - MNT_TREE_PROPAGATION = BIT(2), + MNT_TREE_BENEATH = BIT(0), + MNT_TREE_PROPAGATION = BIT(1), }; /** * attach_recursive_mnt - attach a source mount tree * @source_mnt: mount tree to be attached - * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath + * @dest_mnt: mount that @source_mnt will be mounted on * @dest_mp: the mountpoint @source_mnt will be mounted at - * @flags: modify how @source_mnt is supposed to be attached * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. @@ -2694,26 +2623,31 @@ enum mnt_tree_flags_t { * Otherwise a negative error code is returned. */ static int attach_recursive_mnt(struct mount *source_mnt, - struct mount *top_mnt, - struct mountpoint *dest_mp, - enum mnt_tree_flags_t flags) + struct mount *dest_mnt, + struct mountpoint *dest_mp) { struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; HLIST_HEAD(tree_list); - struct mnt_namespace *ns = top_mnt->mnt_ns; - struct mountpoint *smp; - struct mount *child, *dest_mnt, *p; + struct mnt_namespace *ns = dest_mnt->mnt_ns; + struct pinned_mountpoint root = {}; + struct mountpoint *shorter = NULL; + struct mount *child, *p; + struct mount *top; struct hlist_node *n; int err = 0; - bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH; + bool moving = mnt_has_parent(source_mnt); /* * Preallocate a mountpoint in case the new mounts need to be * mounted beneath mounts on the same mountpoint. */ - smp = get_mountpoint(source_mnt->mnt.mnt_root); - if (IS_ERR(smp)) - return PTR_ERR(smp); + for (top = source_mnt; unlikely(top->overmount); top = top->overmount) { + if (!shorter && is_mnt_ns_file(top->mnt.mnt_root)) + shorter = top->mnt_mp; + } + err = get_mountpoint(top->mnt.mnt_root, &root); + if (err) + return err; /* Is there space to add these mounts to the mount namespace? */ if (!moving) { @@ -2722,11 +2656,6 @@ static int attach_recursive_mnt(struct mount *source_mnt, goto out; } - if (beneath) - dest_mnt = top_mnt->mnt_parent; - else - dest_mnt = top_mnt; - if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) @@ -2743,42 +2672,50 @@ static int attach_recursive_mnt(struct mount *source_mnt, } if (moving) { - if (beneath) - dest_mp = smp; - unhash_mnt(source_mnt); - attach_mnt(source_mnt, top_mnt, dest_mp, beneath); + umount_mnt(source_mnt); mnt_notify_add(source_mnt); - touch_mnt_namespace(source_mnt->mnt_ns); + /* if the mount is moved, it should no longer be expired + * automatically */ + list_del_init(&source_mnt->mnt_expire); } else { if (source_mnt->mnt_ns) { - LIST_HEAD(head); - /* move from anon - the caller will destroy */ + emptied_ns = source_mnt->mnt_ns; for (p = source_mnt; p; p = next_mnt(p, source_mnt)) - move_from_ns(p, &head); - list_del_init(&head); + move_from_ns(p); } - if (beneath) - mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp); - else - mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); - commit_tree(source_mnt); } + mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); + /* + * Now the original copy is in the same state as the secondaries - + * its root attached to mountpoint, but not hashed and all mounts + * in it are either in our namespace or in no namespace at all. + * Add the original to the list of copies and deal with the + * rest of work for all of them uniformly. + */ + hlist_add_head(&source_mnt->mnt_hash, &tree_list); + hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { struct mount *q; hlist_del_init(&child->mnt_hash); /* Notice when we are propagating across user namespaces */ if (child->mnt_parent->mnt_ns->user_ns != user_ns) lock_mnt_tree(child); - child->mnt.mnt_flags &= ~MNT_LOCKED; q = __lookup_mnt(&child->mnt_parent->mnt, child->mnt_mountpoint); - if (q) - mnt_change_mountpoint(child, smp, q); + if (q) { + struct mountpoint *mp = root.mp; + struct mount *r = child; + while (unlikely(r->overmount)) + r = r->overmount; + if (unlikely(shorter) && child != source_mnt) + mp = shorter; + mnt_change_mountpoint(r, mp, q); + } commit_tree(child); } - put_mountpoint(smp); + unpin_mountpoint(&root); unlock_mount_hash(); return 0; @@ -2795,7 +2732,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, ns->pending_mounts = 0; read_seqlock_excl(&mount_lock); - put_mountpoint(smp); + unpin_mountpoint(&root); read_sequnlock_excl(&mount_lock); return err; @@ -2835,12 +2772,12 @@ static int attach_recursive_mnt(struct mount *source_mnt, * Return: Either the target mountpoint on the top mount or the top * mount's mountpoint. */ -static struct mountpoint *do_lock_mount(struct path *path, bool beneath) +static int do_lock_mount(struct path *path, struct pinned_mountpoint *pinned, bool beneath) { struct vfsmount *mnt = path->mnt; struct dentry *dentry; - struct mountpoint *mp = ERR_PTR(-ENOENT); struct path under = {}; + int err = -ENOENT; for (;;) { struct mount *m = real_mount(mnt); @@ -2878,8 +2815,8 @@ static struct mountpoint *do_lock_mount(struct path *path, bool beneath) path->dentry = dget(mnt->mnt_root); continue; // got overmounted } - mp = get_mountpoint(dentry); - if (IS_ERR(mp)) + err = get_mountpoint(dentry, pinned); + if (err) break; if (beneath) { /* @@ -2890,25 +2827,25 @@ static struct mountpoint *do_lock_mount(struct path *path, bool beneath) */ path_put(&under); } - return mp; + return 0; } namespace_unlock(); inode_unlock(dentry->d_inode); if (beneath) path_put(&under); - return mp; + return err; } -static inline struct mountpoint *lock_mount(struct path *path) +static inline int lock_mount(struct path *path, struct pinned_mountpoint *m) { - return do_lock_mount(path, false); + return do_lock_mount(path, m, false); } -static void unlock_mount(struct mountpoint *where) +static void unlock_mount(struct pinned_mountpoint *m) { - inode_unlock(where->m_dentry->d_inode); + inode_unlock(m->mp->m_dentry->d_inode); read_seqlock_excl(&mount_lock); - put_mountpoint(where); + unpin_mountpoint(m); read_sequnlock_excl(&mount_lock); namespace_unlock(); } @@ -2922,7 +2859,7 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) d_is_dir(mnt->mnt.mnt_root)) return -ENOTDIR; - return attach_recursive_mnt(mnt, p, mp, 0); + return attach_recursive_mnt(mnt, p, mp); } /* @@ -2971,10 +2908,8 @@ static int do_change_type(struct path *path, int ms_flags) goto out_unlock; } - lock_mount_hash(); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); - unlock_mount_hash(); out_unlock: namespace_unlock(); @@ -3048,26 +2983,21 @@ static inline bool may_copy_tree(struct path *path) static struct mount *__do_loopback(struct path *old_path, int recurse) { - struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); + struct mount *old = real_mount(old_path->mnt); if (IS_MNT_UNBINDABLE(old)) - return mnt; + return ERR_PTR(-EINVAL); if (!may_copy_tree(old_path)) - return mnt; + return ERR_PTR(-EINVAL); if (!recurse && __has_locked_children(old, old_path->dentry)) - return mnt; + return ERR_PTR(-EINVAL); if (recurse) - mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); + return copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); else - mnt = clone_mnt(old, old_path->dentry, 0); - - if (!IS_ERR(mnt)) - mnt->mnt.mnt_flags &= ~MNT_LOCKED; - - return mnt; + return clone_mnt(old, old_path->dentry, 0); } /* @@ -3078,7 +3008,7 @@ static int do_loopback(struct path *path, const char *old_name, { struct path old_path; struct mount *mnt = NULL, *parent; - struct mountpoint *mp; + struct pinned_mountpoint mp = {}; int err; if (!old_name || !*old_name) return -EINVAL; @@ -3090,11 +3020,9 @@ static int do_loopback(struct path *path, const char *old_name, if (mnt_ns_loop(old_path.dentry)) goto out; - mp = lock_mount(path); - if (IS_ERR(mp)) { - err = PTR_ERR(mp); + err = lock_mount(path, &mp); + if (err) goto out; - } parent = real_mount(path->mnt); if (!check_mnt(parent)) @@ -3106,14 +3034,14 @@ static int do_loopback(struct path *path, const char *old_name, goto out2; } - err = graft_tree(mnt, parent, mp); + err = graft_tree(mnt, parent, mp.mp); if (err) { lock_mount_hash(); umount_tree(mnt, UMOUNT_SYNC); unlock_mount_hash(); } out2: - unlock_mount(mp); + unlock_mount(&mp); out: path_put(&old_path); return err; @@ -3461,18 +3389,14 @@ static int do_set_group(struct path *from_path, struct path *to_path) goto out; if (IS_MNT_SLAVE(from)) { - struct mount *m = from->mnt_master; - - list_add(&to->mnt_slave, &from->mnt_slave); - to->mnt_master = m; + hlist_add_behind(&to->mnt_slave, &from->mnt_slave); + to->mnt_master = from->mnt_master; } if (IS_MNT_SHARED(from)) { to->mnt_group_id = from->mnt_group_id; list_add(&to->mnt_share, &from->mnt_share); - lock_mount_hash(); set_mnt_shared(to); - unlock_mount_hash(); } err = 0; @@ -3509,6 +3433,17 @@ static inline bool path_overmounted(const struct path *path) return unlikely(!no_child); } +/* + * Check if there is a possibly empty chain of descent from p1 to p2. + * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl). + */ +static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2) +{ + while (p2 != p1 && mnt_has_parent(p2)) + p2 = p2->mnt_parent; + return p2 == p1; +} + /** * can_move_mount_beneath - check that we can mount beneath the top mount * @from: mount to mount beneath @@ -3560,9 +3495,8 @@ static int can_move_mount_beneath(const struct path *from, if (parent_mnt_to == current->nsproxy->mnt_ns->root) return -EINVAL; - for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent) - if (p == mnt_to) - return -EINVAL; + if (mount_is_ancestor(mnt_to, mnt_from)) + return -EINVAL; /* * If the parent mount propagates to the child mount this would @@ -3647,27 +3581,20 @@ static int do_move_mount(struct path *old_path, struct mount *p; struct mount *old; struct mount *parent; - struct mountpoint *mp, *old_mp; + struct pinned_mountpoint mp; int err; - bool attached, beneath = flags & MNT_TREE_BENEATH; + bool beneath = flags & MNT_TREE_BENEATH; - mp = do_lock_mount(new_path, beneath); - if (IS_ERR(mp)) - return PTR_ERR(mp); + err = do_lock_mount(new_path, &mp, beneath); + if (err) + return err; old = real_mount(old_path->mnt); p = real_mount(new_path->mnt); parent = old->mnt_parent; - attached = mnt_has_parent(old); - if (attached) - flags |= MNT_TREE_MOVE; - old_mp = old->mnt_mp; ns = old->mnt_ns; err = -EINVAL; - /* The thing moved must be mounted... */ - if (!is_mounted(&old->mnt)) - goto out; if (check_mnt(old)) { /* if the source is in our namespace... */ @@ -3677,13 +3604,14 @@ static int do_move_mount(struct path *old_path, /* ... and the target should be in our namespace */ if (!check_mnt(p)) goto out; + /* parent of the source should not be shared */ + if (IS_MNT_SHARED(parent)) + goto out; } else { /* * otherwise the source must be the root of some anon namespace. - * AV: check for mount being root of an anon namespace is worth - * an inlined predicate... */ - if (!is_anon_ns(ns) || mnt_has_parent(old)) + if (!anon_ns_root(old)) goto out; /* * Bail out early if the target is within the same namespace - @@ -3706,20 +3634,14 @@ static int do_move_mount(struct path *old_path, if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry)) goto out; - /* - * Don't move a mount residing in a shared parent. - */ - if (attached && IS_MNT_SHARED(parent)) - goto out; if (beneath) { - err = can_move_mount_beneath(old_path, new_path, mp); + err = can_move_mount_beneath(old_path, new_path, mp.mp); if (err) goto out; err = -EINVAL; p = p->mnt_parent; - flags |= MNT_TREE_BENEATH; } /* @@ -3731,30 +3653,12 @@ static int do_move_mount(struct path *old_path, err = -ELOOP; if (!check_for_nsfs_mounts(old)) goto out; - for (; mnt_has_parent(p); p = p->mnt_parent) - if (p == old) - goto out; - - err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags); - if (err) + if (mount_is_ancestor(old, p)) goto out; - /* if the mount is moved, it should no longer be expire - * automatically */ - list_del_init(&old->mnt_expire); - if (attached) - put_mountpoint(old_mp); + err = attach_recursive_mnt(old, p, mp.mp); out: - unlock_mount(mp); - if (!err) { - if (attached) { - mntput_no_expire(parent); - } else { - /* Make sure we notice when we leak mounts. */ - VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); - free_mnt_ns(ns); - } - } + unlock_mount(&mp); return err; } @@ -3815,7 +3719,7 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, unsigned int mnt_flags) { struct vfsmount *mnt; - struct mountpoint *mp; + struct pinned_mountpoint mp = {}; struct super_block *sb = fc->root->d_sb; int error; @@ -3836,13 +3740,12 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, mnt_warn_timestamp_expiry(mountpoint, mnt); - mp = lock_mount(mountpoint); - if (IS_ERR(mp)) { - mntput(mnt); - return PTR_ERR(mp); + error = lock_mount(mountpoint, &mp); + if (!error) { + error = do_add_mount(real_mount(mnt), mp.mp, + mountpoint, mnt_flags); + unlock_mount(&mp); } - error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags); - unlock_mount(mp); if (error < 0) mntput(mnt); return error; @@ -3910,7 +3813,7 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags, int finish_automount(struct vfsmount *m, const struct path *path) { struct dentry *dentry = path->dentry; - struct mountpoint *mp; + struct pinned_mountpoint mp = {}; struct mount *mnt; int err; @@ -3942,14 +3845,13 @@ int finish_automount(struct vfsmount *m, const struct path *path) err = 0; goto discard_locked; } - mp = get_mountpoint(dentry); - if (IS_ERR(mp)) { - err = PTR_ERR(mp); + err = get_mountpoint(dentry, &mp); + if (err) goto discard_locked; - } - err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); - unlock_mount(mp); + err = do_add_mount(mnt, mp.mp, path, + path->mnt->mnt_flags | MNT_SHRINKABLE); + unlock_mount(&mp); if (unlikely(err)) goto discard; return 0; @@ -3958,12 +3860,6 @@ discard_locked: namespace_unlock(); inode_unlock(dentry->d_inode); discard: - /* remove m from any expiration list it may be on */ - if (!list_empty(&mnt->mnt_expire)) { - namespace_lock(); - list_del_init(&mnt->mnt_expire); - namespace_unlock(); - } mntput(m); return err; } @@ -3975,11 +3871,9 @@ discard: */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { - namespace_lock(); - + read_seqlock_excl(&mount_lock); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); - - namespace_unlock(); + read_sequnlock_excl(&mount_lock); } EXPORT_SYMBOL(mnt_set_expiry); @@ -4333,7 +4227,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) - copy_flags |= CL_SHARED_TO_SLAVE; + copy_flags |= CL_SLAVE; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); @@ -4758,7 +4652,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, { struct path new, old, root; struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; - struct mountpoint *old_mp, *root_mp; + struct pinned_mountpoint old_mp = {}; int error; if (!may_mount()) @@ -4779,9 +4673,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, goto out2; get_fs_root(current->fs, &root); - old_mp = lock_mount(&old); - error = PTR_ERR(old_mp); - if (IS_ERR(old_mp)) + error = lock_mount(&old, &old_mp); + if (error) goto out3; error = -EINVAL; @@ -4808,11 +4701,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (!path_mounted(&root)) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) - goto out4; /* not attached */ + goto out4; /* absolute root */ if (!path_mounted(&new)) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) - goto out4; /* not attached */ + goto out4; /* absolute root */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(old_mnt, old.dentry, &new)) goto out4; @@ -4821,29 +4714,25 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, goto out4; lock_mount_hash(); umount_mnt(new_mnt); - root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */ if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { new_mnt->mnt.mnt_flags |= MNT_LOCKED; root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; } - /* mount old root on put_old */ - attach_mnt(root_mnt, old_mnt, old_mp, false); /* mount new_root on / */ - attach_mnt(new_mnt, root_parent, root_mp, false); - mnt_add_count(root_parent, -1); + attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp); + umount_mnt(root_mnt); + /* mount old root on put_old */ + attach_mnt(root_mnt, old_mnt, old_mp.mp); touch_mnt_namespace(current->nsproxy->mnt_ns); /* A moved mount should not expire automatically */ list_del_init(&new_mnt->mnt_expire); - put_mountpoint(root_mp); unlock_mount_hash(); mnt_notify_add(root_mnt); mnt_notify_add(new_mnt); chroot_fs_refs(&root, &new); error = 0; out4: - unlock_mount(old_mp); - if (!error) - mntput_no_expire(ex_parent); + unlock_mount(&old_mp); out3: path_put(&root); out2: @@ -5045,22 +4934,7 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) err = -EINVAL; lock_mount_hash(); - /* Ensure that this isn't anything purely vfs internal. */ - if (!is_mounted(&mnt->mnt)) - goto out; - - /* - * If this is an attached mount make sure it's located in the callers - * mount namespace. If it's not don't let the caller interact with it. - * - * If this mount doesn't have a parent it's most often simply a - * detached mount with an anonymous mount namespace. IOW, something - * that's simply not attached yet. But there are apparently also users - * that do change mount properties on the rootfs itself. That obviously - * neither has a parent nor is it a detached mount so we cannot - * unconditionally check for detached mounts. - */ - if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt)) + if (!anon_ns_root(mnt) && !check_mnt(mnt)) goto out; /* @@ -5424,7 +5298,7 @@ static void statmount_mnt_basic(struct kstatmount *s) s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id; s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt); s->sm.mnt_propagation = mnt_to_propagation_flags(m); - s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0; + s->sm.mnt_peer_group = m->mnt_group_id; s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0; } @@ -6216,9 +6090,11 @@ static void __init init_mount_tree(void) if (IS_ERR(mnt)) panic("Can't create rootfs"); - ns = alloc_mnt_ns(&init_user_ns, false); + ns = alloc_mnt_ns(&init_user_ns, true); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); + ns->seq = atomic64_inc_return(&mnt_ns_seq); + ns->ns.inum = PROC_MNT_INIT_INO; m = real_mount(mnt); ns->root = m; ns->nr_mounts = 1; @@ -6228,7 +6104,6 @@ static void __init init_mount_tree(void) root.mnt = mnt; root.dentry = mnt->mnt_root; - mnt->mnt_flags |= MNT_LOCKED; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); @@ -6276,11 +6151,11 @@ void put_mnt_ns(struct mnt_namespace *ns) if (!refcount_dec_and_test(&ns->ns.count)) return; namespace_lock(); + emptied_ns = ns; lock_mount_hash(); umount_tree(ns->root, 0); unlock_mount_hash(); namespace_unlock(); - free_mnt_ns(ns); } struct vfsmount *kern_mount(struct file_system_type *type) diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c index d8d50a88de04..d526f5ba7887 100644 --- a/fs/nfs/blocklayout/rpc_pipefs.c +++ b/fs/nfs/blocklayout/rpc_pipefs.c @@ -141,24 +141,18 @@ static const struct rpc_pipe_ops bl_upcall_ops = { .destroy_msg = bl_pipe_destroy_msg, }; -static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb, +static int nfs4blocklayout_register_sb(struct super_block *sb, struct rpc_pipe *pipe) { - struct dentry *dir, *dentry; + struct dentry *dir; + int err; dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME); if (dir == NULL) - return ERR_PTR(-ENOENT); - dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); + return -ENOENT; + err = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); dput(dir); - return dentry; -} - -static void nfs4blocklayout_unregister_sb(struct super_block *sb, - struct rpc_pipe *pipe) -{ - if (pipe->dentry) - rpc_unlink(pipe->dentry); + return err; } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, @@ -167,7 +161,6 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, struct super_block *sb = ptr; struct net *net = sb->s_fs_info; struct nfs_net *nn = net_generic(net, nfs_net_id); - struct dentry *dentry; int ret = 0; if (!try_module_get(THIS_MODULE)) @@ -180,16 +173,10 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, switch (event) { case RPC_PIPEFS_MOUNT: - dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - break; - } - nn->bl_device_pipe->dentry = dentry; + ret = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); break; case RPC_PIPEFS_UMOUNT: - if (nn->bl_device_pipe->dentry) - nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); + rpc_unlink(nn->bl_device_pipe); break; default: ret = -ENOTSUPP; @@ -203,18 +190,17 @@ static struct notifier_block nfs4blocklayout_block = { .notifier_call = rpc_pipefs_event, }; -static struct dentry *nfs4blocklayout_register_net(struct net *net, - struct rpc_pipe *pipe) +static int nfs4blocklayout_register_net(struct net *net, struct rpc_pipe *pipe) { struct super_block *pipefs_sb; - struct dentry *dentry; + int ret; pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) - return NULL; - dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); + return 0; + ret = nfs4blocklayout_register_sb(pipefs_sb, pipe); rpc_put_sb_net(net); - return dentry; + return ret; } static void nfs4blocklayout_unregister_net(struct net *net, @@ -224,7 +210,7 @@ static void nfs4blocklayout_unregister_net(struct net *net, pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { - nfs4blocklayout_unregister_sb(pipefs_sb, pipe); + rpc_unlink(pipe); rpc_put_sb_net(net); } } @@ -232,20 +218,17 @@ static void nfs4blocklayout_unregister_net(struct net *net, static int nfs4blocklayout_net_init(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); - struct dentry *dentry; + int err; mutex_init(&nn->bl_mutex); init_waitqueue_head(&nn->bl_wq); nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); if (IS_ERR(nn->bl_device_pipe)) return PTR_ERR(nn->bl_device_pipe); - dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe); - if (IS_ERR(dentry)) { + err = nfs4blocklayout_register_net(net, nn->bl_device_pipe); + if (unlikely(err)) rpc_destroy_pipe_data(nn->bl_device_pipe); - return PTR_ERR(dentry); - } - nn->bl_device_pipe->dentry = dentry; - return 0; + return err; } static void nfs4blocklayout_net_exit(struct net *net) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 033feeab8c34..86e36c630f09 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -207,24 +207,25 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe EXPORT_SYMBOL_GPL(nfs_file_splice_read); int -nfs_file_mmap(struct file *file, struct vm_area_struct *vma) +nfs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); int status; dprintk("NFS: mmap(%pD2)\n", file); - /* Note: generic_file_mmap() returns ENOSYS on nommu systems + /* Note: generic_file_mmap_prepare() returns ENOSYS on nommu systems * so we call that before revalidating the mapping */ - status = generic_file_mmap(file, vma); + status = generic_file_mmap_prepare(desc); if (!status) { - vma->vm_ops = &nfs_file_vm_ops; + desc->vm_ops = &nfs_file_vm_ops; status = nfs_revalidate_mapping(inode, file->f_mapping); } return status; } -EXPORT_SYMBOL_GPL(nfs_file_mmap); +EXPORT_SYMBOL_GPL(nfs_file_mmap_prepare); /* * Flush any dirty pages for this process, and check for write errors. @@ -342,12 +343,14 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio, * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ -static int nfs_write_begin(struct file *file, struct address_space *mapping, +static int nfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { fgf_t fgp = FGP_WRITEBEGIN; struct folio *folio; + struct file *file = iocb->ki_filp; int once_thru = 0; int ret; @@ -377,10 +380,12 @@ start: return ret; } -static int nfs_write_end(struct file *file, struct address_space *mapping, +static int nfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { + struct file *file = iocb->ki_filp; struct nfs_open_context *ctx = nfs_file_open_context(file); unsigned offset = offset_in_folio(folio, pos); int status; @@ -899,7 +904,7 @@ const struct file_operations nfs_file_operations = { .llseek = nfs_file_llseek, .read_iter = nfs_file_read, .write_iter = nfs_file_write, - .mmap = nfs_file_mmap, + .mmap_prepare = nfs_file_mmap_prepare, .open = nfs_file_open, .flush = nfs_file_flush, .release = nfs_file_release, diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 69c2c10ee658..26551ff09a52 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -432,7 +432,7 @@ loff_t nfs_file_llseek(struct file *, loff_t, int); ssize_t nfs_file_read(struct kiocb *, struct iov_iter *); ssize_t nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); -int nfs_file_mmap(struct file *, struct vm_area_struct *); +int nfs_file_mmap_prepare(struct vm_area_desc *); ssize_t nfs_file_write(struct kiocb *, struct iov_iter *); int nfs_file_release(struct inode *, struct file *); int nfs_lock(struct file *, int, struct file_lock *); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 5e9d66f3466c..5c749b6117bb 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -456,7 +456,7 @@ static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease, const struct file_operations nfs4_file_operations = { .read_iter = nfs_file_read, .write_iter = nfs_file_write, - .mmap = nfs_file_mmap, + .mmap_prepare = nfs_file_mmap_prepare, .open = nfs4_file_open, .flush = nfs4_file_flush, .release = nfs_file_release, diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 25a7c771cfd8..00932500fce4 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -424,26 +424,16 @@ static void nfs_idmap_pipe_destroy(struct dentry *dir, struct rpc_pipe_dir_object *pdo) { struct idmap *idmap = pdo->pdo_data; - struct rpc_pipe *pipe = idmap->idmap_pipe; - if (pipe->dentry) { - rpc_unlink(pipe->dentry); - pipe->dentry = NULL; - } + rpc_unlink(idmap->idmap_pipe); } static int nfs_idmap_pipe_create(struct dentry *dir, struct rpc_pipe_dir_object *pdo) { struct idmap *idmap = pdo->pdo_data; - struct rpc_pipe *pipe = idmap->idmap_pipe; - struct dentry *dentry; - dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - pipe->dentry = dentry; - return 0; + return rpc_mkpipe_dentry(dir, "idmap", idmap, idmap->idmap_pipe); } static const struct rpc_pipe_dir_object_ops nfs_idmap_pipe_dir_object_ops = { diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 91b5503b6f74..72dee6f3050e 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1183,7 +1183,7 @@ static int nfs_set_super(struct super_block *s, struct fs_context *fc) struct nfs_server *server = fc->s_fs_info; int ret; - s->s_d_op = server->nfs_client->rpc_ops->dentry_ops; + set_default_d_op(s, server->nfs_client->rpc_ops->dentry_ops); ret = set_anon_super(s, server); if (ret == 0) server->s_dev = s->s_dev; diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 08a20e5bcf7f..19078a043e85 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -178,11 +178,13 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, { struct iomap *iomaps; int nr_iomaps; + __be32 nfserr; - nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, i_blocksize(inode)); - if (nr_iomaps < 0) - return nfserrno(nr_iomaps); + nfserr = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, + lcp->lc_up_len, &iomaps, &nr_iomaps, + i_blocksize(inode)); + if (nfserr != nfs_ok) + return nfserr; return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); } @@ -316,11 +318,13 @@ nfsd4_scsi_proc_layoutcommit(struct inode *inode, { struct iomap *iomaps; int nr_iomaps; + __be32 nfserr; - nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, i_blocksize(inode)); - if (nr_iomaps < 0) - return nfserrno(nr_iomaps); + nfserr = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, + lcp->lc_up_len, &iomaps, &nr_iomaps, + i_blocksize(inode)); + if (nfserr != nfs_ok) + return nfserr; return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); } diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index ce78f74715ee..bcf21fde9120 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c @@ -112,35 +112,46 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, return 0; } -int +/** + * nfsd4_block_decode_layoutupdate - decode the block layout extent array + * @p: pointer to the xdr data + * @len: number of bytes to decode + * @iomapp: pointer to store the decoded extent array + * @nr_iomapsp: pointer to store the number of extents + * @block_size: alignment of extent offset and length + * + * This function decodes the opaque field of the layoutupdate4 structure + * in a layoutcommit request for the block layout driver. The field is + * actually an array of extents sent by the client. It also checks that + * the file offset, storage offset and length of each extent are aligned + * by @block_size. + * + * Return values: + * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid + * %nfserr_bad_xdr: The encoded array in @p is invalid + * %nfserr_inval: An unaligned extent found + * %nfserr_delay: Failed to allocate memory for @iomapp + */ +__be32 nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size) + int *nr_iomapsp, u32 block_size) { struct iomap *iomaps; u32 nr_iomaps, i; - if (len < sizeof(u32)) { - dprintk("%s: extent array too small: %u\n", __func__, len); - return -EINVAL; - } + if (len < sizeof(u32)) + return nfserr_bad_xdr; len -= sizeof(u32); - if (len % PNFS_BLOCK_EXTENT_SIZE) { - dprintk("%s: extent array invalid: %u\n", __func__, len); - return -EINVAL; - } + if (len % PNFS_BLOCK_EXTENT_SIZE) + return nfserr_bad_xdr; nr_iomaps = be32_to_cpup(p++); - if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) { - dprintk("%s: extent array size mismatch: %u/%u\n", - __func__, len, nr_iomaps); - return -EINVAL; - } + if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) + return nfserr_bad_xdr; iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); - if (!iomaps) { - dprintk("%s: failed to allocate extent array\n", __func__); - return -ENOMEM; - } + if (!iomaps) + return nfserr_delay; for (i = 0; i < nr_iomaps; i++) { struct pnfs_block_extent bex; @@ -150,26 +161,18 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, p = xdr_decode_hyper(p, &bex.foff); if (bex.foff & (block_size - 1)) { - dprintk("%s: unaligned offset 0x%llx\n", - __func__, bex.foff); goto fail; } p = xdr_decode_hyper(p, &bex.len); if (bex.len & (block_size - 1)) { - dprintk("%s: unaligned length 0x%llx\n", - __func__, bex.foff); goto fail; } p = xdr_decode_hyper(p, &bex.soff); if (bex.soff & (block_size - 1)) { - dprintk("%s: unaligned disk offset 0x%llx\n", - __func__, bex.soff); goto fail; } bex.es = be32_to_cpup(p++); if (bex.es != PNFS_BLOCK_READWRITE_DATA) { - dprintk("%s: incorrect extent state %d\n", - __func__, bex.es); goto fail; } @@ -178,59 +181,71 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, } *iomapp = iomaps; - return nr_iomaps; + *nr_iomapsp = nr_iomaps; + return nfs_ok; fail: kfree(iomaps); - return -EINVAL; + return nfserr_inval; } -int +/** + * nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array + * @p: pointer to the xdr data + * @len: number of bytes to decode + * @iomapp: pointer to store the decoded extent array + * @nr_iomapsp: pointer to store the number of extents + * @block_size: alignment of extent offset and length + * + * This function decodes the opaque field of the layoutupdate4 structure + * in a layoutcommit request for the scsi layout driver. The field is + * actually an array of extents sent by the client. It also checks that + * the offset and length of each extent are aligned by @block_size. + * + * Return values: + * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid + * %nfserr_bad_xdr: The encoded array in @p is invalid + * %nfserr_inval: An unaligned extent found + * %nfserr_delay: Failed to allocate memory for @iomapp + */ +__be32 nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size) + int *nr_iomapsp, u32 block_size) { struct iomap *iomaps; u32 nr_iomaps, expected, i; - if (len < sizeof(u32)) { - dprintk("%s: extent array too small: %u\n", __func__, len); - return -EINVAL; - } + if (len < sizeof(u32)) + return nfserr_bad_xdr; nr_iomaps = be32_to_cpup(p++); expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE; - if (len != expected) { - dprintk("%s: extent array size mismatch: %u/%u\n", - __func__, len, expected); - return -EINVAL; - } + if (len != expected) + return nfserr_bad_xdr; iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); - if (!iomaps) { - dprintk("%s: failed to allocate extent array\n", __func__); - return -ENOMEM; - } + if (!iomaps) + return nfserr_delay; for (i = 0; i < nr_iomaps; i++) { u64 val; p = xdr_decode_hyper(p, &val); if (val & (block_size - 1)) { - dprintk("%s: unaligned offset 0x%llx\n", __func__, val); goto fail; } iomaps[i].offset = val; p = xdr_decode_hyper(p, &val); if (val & (block_size - 1)) { - dprintk("%s: unaligned length 0x%llx\n", __func__, val); goto fail; } iomaps[i].length = val; } *iomapp = iomaps; - return nr_iomaps; + *nr_iomapsp = nr_iomaps; + return nfs_ok; fail: kfree(iomaps); - return -EINVAL; + return nfserr_inval; } diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h index 4e28ac8f1127..15b3569f3d9a 100644 --- a/fs/nfsd/blocklayoutxdr.h +++ b/fs/nfsd/blocklayoutxdr.h @@ -54,9 +54,9 @@ __be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfsd4_getdeviceinfo *gdp); __be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr, const struct nfsd4_layoutget *lgp); -int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size); -int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size); +__be32 nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, + struct iomap **iomapp, int *nr_iomapsp, u32 block_size); +__be32 nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, + struct iomap **iomapp, int *nr_iomapsp, u32 block_size); #endif /* _NFSD_BLOCKLAYOUTXDR_H */ diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 88ae410b4113..cadfc2bae60e 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -82,8 +82,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) int len; struct auth_domain *dom = NULL; int err; - int fsidtype; - char *ep; + u8 fsidtype; struct svc_expkey key; struct svc_expkey *ek = NULL; @@ -109,10 +108,9 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; - fsidtype = simple_strtoul(buf, &ep, 10); - if (*ep) + if (kstrtou8(buf, 10, &fsidtype)) goto out; - dprintk("found fsidtype %d\n", fsidtype); + dprintk("found fsidtype %u\n", fsidtype); if (key_len(fsidtype)==0) /* invalid type */ goto out; if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0) diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h index 4d92b99c1ffd..b9c0adb3ce09 100644 --- a/fs/nfsd/export.h +++ b/fs/nfsd/export.h @@ -88,7 +88,7 @@ struct svc_expkey { struct cache_head h; struct auth_domain * ek_client; - int ek_fsidtype; + u8 ek_fsidtype; u32 ek_fsid[6]; struct path ek_path; diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index e108b6c705b4..732abf6b92a5 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -372,7 +372,7 @@ nfsd_file_put(struct nfsd_file *nf) /** * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller - * @nf: nfsd_file of which to put the reference + * @pnf: nfsd_file of which to put the reference * * First save the associated net to return to caller, then put * the reference of the nfsd_file. diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c index 80d9ff6608a7..4f6468eb2adf 100644 --- a/fs/nfsd/localio.c +++ b/fs/nfsd/localio.c @@ -32,7 +32,7 @@ * @rpc_clnt: rpc_clnt that the client established * @cred: cred that the client established * @nfs_fh: filehandle to lookup - * @nfp: place to find the nfsd_file, or store it if it was non-NULL + * @pnf: place to find the nfsd_file, or store it if it was non-NULL * @fmode: fmode_t to use for open * * This function maps a local fh to a path on a local filesystem. diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index a817d8485d21..b6d03e1ef5f7 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -561,7 +561,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp, buf->pages = rqstp->rq_next_page; rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; - xdr_init_encode_pages(xdr, buf, buf->pages, NULL); + xdr_init_encode_pages(xdr, buf); } /* diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 290271ac4245..aea905fcaf87 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -65,7 +65,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp) return; map->fsid_type = fh->fh_fsid_type; - memcpy(&map->fsid, fh->fh_fsid, fsid_len); + memcpy(&map->fsid, fh_fsid(fh), fsid_len); spin_lock(&nfsd_devid_lock); if (fhp->fh_export->ex_devid_map) @@ -75,7 +75,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp) list_for_each_entry(old, &nfsd_devid_hash[i], hash) { if (old->fsid_type != fh->fh_fsid_type) continue; - if (memcmp(old->fsid, fh->fh_fsid, + if (memcmp(old->fsid, fh_fsid(fh), key_len(old->fsid_type))) continue; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index f13abbb13b38..71b428efcbb5 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1917,13 +1917,6 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd42_write_res *result; __be32 status; - /* - * Currently, async COPY is not reliable. Force all COPY - * requests to be synchronous to avoid client application - * hangs waiting for COPY completion. - */ - nfsd4_copy_set_sync(copy, true); - result = ©->cp_res; nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn); @@ -2842,20 +2835,10 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) rqstp->rq_lease_breaker = (void **)&cstate->clp; - trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt); + trace_nfsd_compound(rqstp, args->tag, args->taglen, args->opcnt); while (!status && resp->opcnt < args->opcnt) { op = &args->ops[resp->opcnt++]; - if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) { - /* If there are still more operations to process, - * stop here and report NFS4ERR_RESOURCE. */ - if (cstate->minorversion == 0 && - args->client_opcnt > resp->opcnt) { - op->status = nfserr_resource; - goto encode_op; - } - } - /* * The XDR decode routines may have pre-set op->status; * for example, if there is a miscellaneous XDR error @@ -2932,7 +2915,7 @@ encode_op: status = op->status; } - trace_nfsd_compound_status(args->client_opcnt, resp->opcnt, + trace_nfsd_compound_status(args->opcnt, resp->opcnt, status, nfsd4_op_name(op->opnum)); nfsd4_cstate_clear_replay(cstate); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 82785db730d9..2231192ec33f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -950,38 +950,32 @@ static const struct rpc_pipe_ops cld_upcall_ops = { .destroy_msg = cld_pipe_destroy_msg, }; -static struct dentry * +static int nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe) { - struct dentry *dir, *dentry; + struct dentry *dir; + int err; dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR); if (dir == NULL) - return ERR_PTR(-ENOENT); - dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe); + return -ENOENT; + err = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe); dput(dir); - return dentry; + return err; } -static void -nfsd4_cld_unregister_sb(struct rpc_pipe *pipe) -{ - if (pipe->dentry) - rpc_unlink(pipe->dentry); -} - -static struct dentry * +static int nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe) { struct super_block *sb; - struct dentry *dentry; + int err; sb = rpc_get_sb_net(net); if (!sb) - return NULL; - dentry = nfsd4_cld_register_sb(sb, pipe); + return 0; + err = nfsd4_cld_register_sb(sb, pipe); rpc_put_sb_net(net); - return dentry; + return err; } static void @@ -991,7 +985,7 @@ nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe) sb = rpc_get_sb_net(net); if (sb) { - nfsd4_cld_unregister_sb(pipe); + rpc_unlink(pipe); rpc_put_sb_net(net); } } @@ -1001,7 +995,6 @@ static int __nfsd4_init_cld_pipe(struct net *net) { int ret; - struct dentry *dentry; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct cld_net *cn; @@ -1022,13 +1015,10 @@ __nfsd4_init_cld_pipe(struct net *net) spin_lock_init(&cn->cn_lock); INIT_LIST_HEAD(&cn->cn_list); - dentry = nfsd4_cld_register_net(net, cn->cn_pipe); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); + ret = nfsd4_cld_register_net(net, cn->cn_pipe); + if (unlikely(ret)) goto err_destroy_data; - } - cn->cn_pipe->dentry = dentry; #ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING cn->cn_has_legacy = false; #endif @@ -2121,7 +2111,6 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) struct net *net = sb->s_fs_info; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct cld_net *cn = nn->cld_net; - struct dentry *dentry; int ret = 0; if (!try_module_get(THIS_MODULE)) @@ -2134,16 +2123,10 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) switch (event) { case RPC_PIPEFS_MOUNT: - dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - break; - } - cn->cn_pipe->dentry = dentry; + ret = nfsd4_cld_register_sb(sb, cn->cn_pipe); break; case RPC_PIPEFS_UMOUNT: - if (cn->cn_pipe->dentry) - nfsd4_cld_unregister_sb(cn->cn_pipe); + rpc_unlink(cn->cn_pipe); break; default: ret = -ENOTSUPP; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d5694987f86f..88c347957da5 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -633,18 +633,6 @@ find_readable_file(struct nfs4_file *f) return ret; } -static struct nfsd_file * -find_rw_file(struct nfs4_file *f) -{ - struct nfsd_file *ret; - - spin_lock(&f->fi_lock); - ret = nfsd_file_get(f->fi_fds[O_RDWR]); - spin_unlock(&f->fi_lock); - - return ret; -} - struct nfsd_file * find_any_file(struct nfs4_file *f) { @@ -1218,15 +1206,20 @@ nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) static void put_deleg_file(struct nfs4_file *fp) { + struct nfsd_file *rnf = NULL; struct nfsd_file *nf = NULL; spin_lock(&fp->fi_lock); - if (--fp->fi_delegees == 0) + if (--fp->fi_delegees == 0) { swap(nf, fp->fi_deleg_file); + swap(rnf, fp->fi_rdeleg_file); + } spin_unlock(&fp->fi_lock); if (nf) nfsd_file_put(nf); + if (rnf) + nfs4_file_put_access(fp, NFS4_SHARE_ACCESS_READ); } static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) @@ -3872,7 +3865,6 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs ca->headerpadsz = 0; ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); - ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); ca->maxresp_cached = min_t(u32, ca->maxresp_cached, NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); @@ -4697,10 +4689,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, } status = nfs_ok; if (conf) { - old = unconf; - unhash_client_locked(old); - nfsd4_change_callback(conf, &unconf->cl_cb_conn); - } else { + if (get_client_locked(conf) == nfs_ok) { + old = unconf; + unhash_client_locked(old); + nfsd4_change_callback(conf, &unconf->cl_cb_conn); + } else { + conf = NULL; + } + } + + if (!conf) { old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = nfserr_clid_inuse; @@ -4717,10 +4715,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, } trace_nfsd_clid_replaced(&old->cl_clientid); } + status = get_client_locked(unconf); + if (status != nfs_ok) { + old = NULL; + goto out; + } move_to_confirmed(unconf); conf = unconf; } - get_client_locked(conf); spin_unlock(&nn->client_lock); if (conf == unconf) fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); @@ -4750,6 +4752,7 @@ static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp) INIT_LIST_HEAD(&fp->fi_clnt_odstate); fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); fp->fi_deleg_file = NULL; + fp->fi_rdeleg_file = NULL; fp->fi_had_conflict = false; fp->fi_share_deny = 0; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); @@ -6000,14 +6003,19 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, * "An OPEN_DELEGATE_WRITE delegation allows the client to handle, * on its own, all opens." * - * Furthermore the client can use a write delegation for most READ - * operations as well, so we require a O_RDWR file here. + * Furthermore, section 9.1.2 says: + * + * "In the case of READ, the server may perform the corresponding + * check on the access mode, or it may choose to allow READ for + * OPEN4_SHARE_ACCESS_WRITE, to accommodate clients whose WRITE + * implementation may unavoidably do reads (e.g., due to buffer + * cache constraints)." * - * Offer a write delegation in the case of a BOTH open, and ensure - * we get the O_RDWR descriptor. + * We choose to offer a write delegation for OPEN with the + * OPEN4_SHARE_ACCESS_WRITE access mode to accommodate such clients. */ - if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { - nf = find_rw_file(fp); + if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { + nf = find_writeable_file(fp); dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE; } @@ -6138,7 +6146,7 @@ static bool nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh, struct kstat *stat) { - struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file); + struct nfsd_file *nf = find_writeable_file(dp->dl_stid.sc_file); struct path path; int rc; @@ -6157,6 +6165,34 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh, } /* + * Add NFS4_SHARE_ACCESS_READ to the write delegation granted on OPEN + * with NFS4_SHARE_ACCESS_WRITE by allocating separate nfsd_file and + * struct file to be used for read with delegation stateid. + * + */ +static bool +nfsd4_add_rdaccess_to_wrdeleg(struct svc_rqst *rqstp, struct nfsd4_open *open, + struct svc_fh *fh, struct nfs4_ol_stateid *stp) +{ + struct nfs4_file *fp; + struct nfsd_file *nf = NULL; + + if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == + NFS4_SHARE_ACCESS_WRITE) { + if (nfsd_file_acquire_opened(rqstp, fh, NFSD_MAY_READ, NULL, &nf)) + return (false); + fp = stp->st_stid.sc_file; + spin_lock(&fp->fi_lock); + __nfs4_file_get_access(fp, NFS4_SHARE_ACCESS_READ); + fp = stp->st_stid.sc_file; + fp->fi_fds[O_RDONLY] = nf; + fp->fi_rdeleg_file = nf; + spin_unlock(&fp->fi_lock); + } + return true; +} + +/* * The Linux NFS server does not offer write delegations to NFSv4.0 * clients in order to avoid conflicts between write delegations and * GETATTRs requesting CHANGE or SIZE attributes. @@ -6181,8 +6217,9 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh, * open or lock state. */ static void -nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, - struct svc_fh *currentfh) +nfs4_open_delegation(struct svc_rqst *rqstp, struct nfsd4_open *open, + struct nfs4_ol_stateid *stp, struct svc_fh *currentfh, + struct svc_fh *fh) { struct nfs4_openowner *oo = openowner(stp->st_stateowner); bool deleg_ts = nfsd4_want_deleg_timestamps(open); @@ -6227,7 +6264,8 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { - if (!nfs4_delegation_stat(dp, currentfh, &stat)) { + if (!nfsd4_add_rdaccess_to_wrdeleg(rqstp, open, fh, stp) || + !nfs4_delegation_stat(dp, currentfh, &stat)) { nfs4_put_stid(&dp->dl_stid); destroy_delegation(dp); goto out_no_deleg; @@ -6322,6 +6360,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; + if (dp && nfsd4_is_deleg_cur(open) && + (dp->dl_stid.sc_file != fp)) { + /* + * RFC8881 section 8.2.4 mandates the server to return + * NFS4ERR_BAD_STATEID if the selected table entry does + * not match the current filehandle. However returning + * NFS4ERR_BAD_STATEID in the OPEN can cause the client + * to repeatedly retry the operation with the same + * stateid, since the stateid itself is valid. To avoid + * this situation NFSD returns NFS4ERR_INVAL instead. + */ + status = nfserr_inval; + goto out; + } stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; @@ -6383,7 +6435,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf * Attempt to hand out a delegation. No error return, because the * OPEN succeeds even if we fail. */ - nfs4_open_delegation(open, stp, &resp->cstate.current_fh); + nfs4_open_delegation(rqstp, open, stp, + &resp->cstate.current_fh, current_fh); /* * If there is an existing open stateid, it must be updated and @@ -7076,7 +7129,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, return_revoked = true; if (typemask & SC_TYPE_DELEG) /* Always allow REVOKED for DELEG so we can - * retturn the appropriate error. + * return the appropriate error. */ statusmask |= SC_STATUS_REVOKED; @@ -7119,10 +7172,6 @@ nfs4_find_file(struct nfs4_stid *s, int flags) switch (s->sc_type) { case SC_TYPE_DELEG: - spin_lock(&s->sc_file->fi_lock); - ret = nfsd_file_get(s->sc_file->fi_deleg_file); - spin_unlock(&s->sc_file->fi_lock); - break; case SC_TYPE_OPEN: case SC_TYPE_LOCK: if (flags & RD_STATE) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 3afcdbed6e14..ea91bad4eee2 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2500,10 +2500,8 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0) return false; - if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0) + if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0) return false; - argp->opcnt = min_t(u32, argp->client_opcnt, - NFSD_MAX_OPS_PER_COMPOUND); if (argp->opcnt > ARRAY_SIZE(argp->iops)) { argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops)); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 6a42cc7a845a..2909d70de559 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1436,7 +1436,7 @@ unsigned int nfsd_net_id; static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, struct netlink_callback *cb, - struct nfsd_genl_rqstp *rqstp) + struct nfsd_genl_rqstp *genl_rqstp) { void *hdr; u32 i; @@ -1446,22 +1446,22 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, if (!hdr) return -ENOBUFS; - if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, rqstp->rq_xid) || - nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, rqstp->rq_flags) || - nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, rqstp->rq_prog) || - nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, rqstp->rq_proc) || - nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, rqstp->rq_vers) || + if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, genl_rqstp->rq_xid) || + nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, genl_rqstp->rq_flags) || + nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, genl_rqstp->rq_prog) || + nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, genl_rqstp->rq_proc) || + nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, genl_rqstp->rq_vers) || nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME, - ktime_to_us(rqstp->rq_stime), + ktime_to_us(genl_rqstp->rq_stime), NFSD_A_RPC_STATUS_PAD)) return -ENOBUFS; - switch (rqstp->rq_saddr.sa_family) { + switch (genl_rqstp->rq_saddr.sa_family) { case AF_INET: { const struct sockaddr_in *s_in, *d_in; - s_in = (const struct sockaddr_in *)&rqstp->rq_saddr; - d_in = (const struct sockaddr_in *)&rqstp->rq_daddr; + s_in = (const struct sockaddr_in *)&genl_rqstp->rq_saddr; + d_in = (const struct sockaddr_in *)&genl_rqstp->rq_daddr; if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4, s_in->sin_addr.s_addr) || nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4, @@ -1476,8 +1476,8 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, case AF_INET6: { const struct sockaddr_in6 *s_in, *d_in; - s_in = (const struct sockaddr_in6 *)&rqstp->rq_saddr; - d_in = (const struct sockaddr_in6 *)&rqstp->rq_daddr; + s_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_saddr; + d_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_daddr; if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6, &s_in->sin6_addr) || nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6, @@ -1491,9 +1491,9 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, } } - for (i = 0; i < rqstp->rq_opcnt; i++) + for (i = 0; i < genl_rqstp->rq_opcnt; i++) if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS, - rqstp->rq_opnum[i])) + genl_rqstp->rq_opnum[i])) return -ENOBUFS; genlmsg_end(skb, hdr); @@ -1569,7 +1569,8 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb, int j; args = rqstp->rq_argp; - genl_rqstp.rq_opcnt = args->opcnt; + genl_rqstp.rq_opcnt = min_t(u32, args->opcnt, + ARRAY_SIZE(genl_rqstp.rq_opnum)); for (j = 0; j < genl_rqstp.rq_opcnt; j++) genl_rqstp.rq_opnum[j] = args->ops[j].opnum; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 1bfd0b4e9af7..1cd0bed57bc2 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -57,9 +57,6 @@ struct readdir_cd { __be32 err; /* 0, nfserr, or nfserr_eof */ }; -/* Maximum number of operations per session compound */ -#define NFSD_MAX_OPS_PER_COMPOUND 50 - struct nfsd_genl_rqstp { struct sockaddr rq_daddr; struct sockaddr rq_saddr; @@ -72,7 +69,7 @@ struct nfsd_genl_rqstp { /* NFSv4 compound */ u32 rq_opcnt; - u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND]; + u32 rq_opnum[16]; }; extern struct svc_program nfsd_programs[]; @@ -283,6 +280,7 @@ void nfsd_lockd_shutdown(void); #define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) #define nfserr_locked cpu_to_be32(NFSERR_LOCKED) #define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) +#define nfserr_delay cpu_to_be32(NFS4ERR_DELAY) #define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) #define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) #define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index aef474f1b84b..74cf1f4de174 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -172,6 +172,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net, if (len == 0) return error; if (fh->fh_fsid_type == FSID_MAJOR_MINOR) { + u32 *fsid = fh_fsid(fh); + /* deprecated, convert to type 3 */ len = key_len(FSID_ENCODE_DEV)/4; fh->fh_fsid_type = FSID_ENCODE_DEV; @@ -181,17 +183,17 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net, * confuses sparse, so we must use __force here to * keep it from complaining. */ - fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fh->fh_fsid[0]), - ntohl((__force __be32)fh->fh_fsid[1]))); - fh->fh_fsid[1] = fh->fh_fsid[2]; + fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fsid[0]), + ntohl((__force __be32)fsid[1]))); + fsid[1] = fsid[2]; } data_left -= len; if (data_left < 0) return error; exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL, net, client, gssclient, - fh->fh_fsid_type, fh->fh_fsid); - fid = (struct fid *)(fh->fh_fsid + len); + fh->fh_fsid_type, fh_fsid(fh)); + fid = (struct fid *)(fh_fsid(fh) + len); error = nfserr_stale; if (IS_ERR(exp)) { @@ -463,7 +465,7 @@ static void _fh_update(struct svc_fh *fhp, struct svc_export *exp, { if (dentry != exp->ex_path.dentry) { struct fid *fid = (struct fid *) - (fhp->fh_handle.fh_fsid + fhp->fh_handle.fh_size/4 - 1); + (fh_fsid(&fhp->fh_handle) + fhp->fh_handle.fh_size/4 - 1); int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4; int fh_flags = (exp->ex_flags & NFSEXP_NOSUBTREECHECK) ? 0 : EXPORT_FH_CONNECTABLE; @@ -614,7 +616,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, fhp->fh_handle.fh_auth_type = 0; mk_fsid(fhp->fh_handle.fh_fsid_type, - fhp->fh_handle.fh_fsid, + fh_fsid(&fhp->fh_handle), ex_dev, d_inode(exp->ex_path.dentry)->i_ino, exp->ex_fsid, exp->ex_uuid); diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 5103c2f4d225..1cf979722521 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -49,18 +49,19 @@ struct knfsd_fh { * Points to the current size while * building a new file handle. */ - union { - char fh_raw[NFS4_FHSIZE]; - struct { - u8 fh_version; /* == 1 */ - u8 fh_auth_type; /* deprecated */ - u8 fh_fsid_type; - u8 fh_fileid_type; - u32 fh_fsid[]; /* flexible-array member */ - }; - }; + u8 fh_raw[NFS4_FHSIZE]; }; +#define fh_version fh_raw[0] +#define fh_auth_type fh_raw[1] +#define fh_fsid_type fh_raw[2] +#define fh_fileid_type fh_raw[3] + +static inline u32 *fh_fsid(const struct knfsd_fh *fh) +{ + return (u32 *)&fh->fh_raw[4]; +} + static inline __u32 ino_t_to_u32(ino_t ino) { return (__u32) ino; @@ -260,9 +261,12 @@ static inline bool fh_match(const struct knfsd_fh *fh1, static inline bool fh_fsid_match(const struct knfsd_fh *fh1, const struct knfsd_fh *fh2) { + u32 *fsid1 = fh_fsid(fh1); + u32 *fsid2 = fh_fsid(fh2); + if (fh1->fh_fsid_type != fh2->fh_fsid_type) return false; - if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type)) != 0) + if (memcmp(fsid1, fsid2, key_len(fh1->fh_fsid_type)) != 0) return false; return true; } diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index c10fa8128a8a..8f71f5748c75 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -575,7 +575,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp, buf->pages = rqstp->rq_next_page; rqstp->rq_next_page++; - xdr_init_encode_pages(xdr, buf, buf->pages, NULL); + xdr_init_encode_pages(xdr, buf); } /* diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 1995bca158b8..8adc2550129e 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -665,6 +665,7 @@ struct nfs4_file { atomic_t fi_access[2]; u32 fi_share_deny; struct nfsd_file *fi_deleg_file; + struct nfsd_file *fi_rdeleg_file; int fi_delegees; struct knfsd_fh fi_fhandle; bool fi_had_conflict; diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 3c5505ef5e3a..a664fdf1161e 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -344,7 +344,7 @@ TRACE_EVENT(nfsd_exp_find_key, int status), TP_ARGS(key, status), TP_STRUCT__entry( - __field(int, fsidtype) + __field(u8, fsidtype) __array(u32, fsid, 6) __string(auth_domain, key->ek_client->name) __field(int, status) @@ -367,7 +367,7 @@ TRACE_EVENT(nfsd_expkey_update, TP_PROTO(const struct svc_expkey *key, const char *exp_path), TP_ARGS(key, exp_path), TP_STRUCT__entry( - __field(int, fsidtype) + __field(u8, fsidtype) __array(u32, fsid, 6) __string(auth_domain, key->ek_client->name) __string(path, exp_path) @@ -1108,7 +1108,6 @@ DEFINE_NFSD_FILE_EVENT(nfsd_file_free); DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash); DEFINE_NFSD_FILE_EVENT(nfsd_file_put); DEFINE_NFSD_FILE_EVENT(nfsd_file_closing); -DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_queue); TRACE_EVENT(nfsd_file_alloc, TP_PROTO( @@ -1344,9 +1343,7 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \ TP_ARGS(nf)) DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add); -DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del); -DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced); @@ -1380,7 +1377,6 @@ DEFINE_EVENT(nfsd_file_lruwalk_class, name, \ TP_ARGS(removed, remaining)) DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed); -DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_recent); DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed); TRACE_EVENT(nfsd_file_close, @@ -2103,25 +2099,6 @@ TRACE_EVENT(nfsd_ctl_maxblksize, ) ); -TRACE_EVENT(nfsd_ctl_maxconn, - TP_PROTO( - const struct net *net, - int maxconn - ), - TP_ARGS(net, maxconn), - TP_STRUCT__entry( - __field(unsigned int, netns_ino) - __field(int, maxconn) - ), - TP_fast_assign( - __entry->netns_ino = net->ns.inum; - __entry->maxconn = maxconn; - ), - TP_printk("maxconn=%d", - __entry->maxconn - ) -); - TRACE_EVENT(nfsd_ctl_time, TP_PROTO( const struct net *net, diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index cd689df2ca5d..98ab55ba3ced 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1086,10 +1086,13 @@ __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp, { unsigned long v, total; struct iov_iter iter; - loff_t ppos = offset; + struct kiocb kiocb; ssize_t host_err; size_t len; + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = offset; + v = 0; total = *count; while (total) { @@ -1104,7 +1107,7 @@ __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp, trace_nfsd_read_vector(rqstp, fhp, offset, *count); iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count); - host_err = vfs_iter_read(file, &iter, &ppos, 0); + host_err = vfs_iocb_iter_read(file, &kiocb, &iter); return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); } @@ -1170,15 +1173,14 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct file *file = nf->nf_file; struct super_block *sb = file_inode(file)->i_sb; + struct kiocb kiocb; struct svc_export *exp; struct iov_iter iter; errseq_t since; __be32 nfserr; int host_err; - loff_t pos = offset; unsigned long exp_op_flags = 0; unsigned int pflags = current->flags; - rwf_t flags = 0; bool restore_flags = false; unsigned int nvecs; @@ -1204,16 +1206,17 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, if (!EX_ISSYNC(exp)) stable = NFS_UNSTABLE; - + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = offset; if (stable && !fhp->fh_use_wgather) - flags |= RWF_SYNC; + kiocb.ki_flags |= IOCB_DSYNC; nvecs = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, payload); iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt); since = READ_ONCE(file->f_wb_err); if (verf) nfsd_copy_write_verifier(verf, nn); - host_err = vfs_iter_write(file, &iter, &pos, flags); + host_err = vfs_iocb_iter_write(file, &kiocb, &iter); if (host_err < 0) { commit_reset_write_verifier(nn, rqstp, host_err); goto out_nfserr; @@ -1864,7 +1867,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, struct svc_fh *tfhp, char *tname, int tlen) { struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap; - struct inode *fdir, *tdir; int type = S_IFDIR; __be32 err; int host_err; @@ -1880,10 +1882,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, goto out; fdentry = ffhp->fh_dentry; - fdir = d_inode(fdentry); tdentry = tfhp->fh_dentry; - tdir = d_inode(tdentry); err = nfserr_perm; if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) @@ -1944,10 +1944,10 @@ retry: } else { struct renamedata rd = { .old_mnt_idmap = &nop_mnt_idmap, - .old_dir = fdir, + .old_parent = fdentry, .old_dentry = odentry, .new_mnt_idmap = &nop_mnt_idmap, - .new_dir = tdir, + .new_parent = tdentry, .new_dentry = ndentry, }; int retries; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index aa2a356da784..a23bc56051ca 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -870,7 +870,6 @@ struct nfsd4_compoundargs { char * tag; u32 taglen; u32 minorversion; - u32 client_opcnt; u32 opcnt; bool splice_ok; struct nfsd4_op *ops; diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 9b7f8e9655a2..6ca3d74be1e1 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio, int err; nr_dirty = nilfs_page_count_clean_buffers(folio, from, to); - copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL); + copied = block_write_end(pos, len, len, folio); if (pos + copied > dir->i_size) i_size_write(dir, pos + copied); if (IS_DIRSYNC(dir)) diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 0e3fc5ba33c7..1b8d754db44d 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -125,10 +125,10 @@ static const struct vm_operations_struct nilfs_file_vm_ops = { .page_mkwrite = nilfs_page_mkwrite, }; -static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int nilfs_file_mmap_prepare(struct vm_area_desc *desc) { - file_accessed(file); - vma->vm_ops = &nilfs_file_vm_ops; + file_accessed(desc->file); + desc->vm_ops = &nilfs_file_vm_ops; return 0; } @@ -144,7 +144,7 @@ const struct file_operations nilfs_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, #endif /* CONFIG_COMPAT */ - .mmap = nilfs_file_mmap, + .mmap_prepare = nilfs_file_mmap_prepare, .open = generic_file_open, /* .release = nilfs_release_file, */ .fsync = nilfs_sync_file, diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 5cf7328d5360..87ddde159f0c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -218,7 +218,8 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to) } } -static int nilfs_write_begin(struct file *file, struct address_space *mapping, +static int nilfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) @@ -237,7 +238,8 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping, return err; } -static int nilfs_write_end(struct file *file, struct address_space *mapping, +static int nilfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { @@ -248,7 +250,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, nr_dirty = nilfs_page_count_clean_buffers(folio, start, start + copied); - copied = generic_write_end(file, mapping, pos, len, copied, folio, + copied = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); nilfs_set_file_dirty(inode, nr_dirty); err = nilfs_transaction_commit(inode->i_sb); diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index a66d62a51f77..3288c3b4be9e 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -118,7 +118,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, * * Return: always 0 as success. */ -int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int nilfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); @@ -136,7 +136,7 @@ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) * Return: 0 on success, or a negative error code on failure. */ int nilfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct nilfs_transaction_info ti; diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index cb6ed54accd7..f466daa39440 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -268,9 +268,9 @@ int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); /* ioctl.c */ -int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *m); +int nilfs_fileattr_get(struct dentry *dentry, struct file_kattr *m); int nilfs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); long nilfs_ioctl(struct file *, unsigned int, unsigned long); long nilfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *, diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 22aecf6e2344..a9c61d0492cb 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -560,8 +560,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, if (unlikely(err)) goto failed_folio; - block_write_end(NULL, inode->i_mapping, pos, blocksize, - blocksize, folio, NULL); + block_write_end(pos, blocksize, blocksize, folio); folio_unlock(folio); folio_put(folio); diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c index b6da80c69ca6..1b5c865a0339 100644 --- a/fs/ntfs3/dir.c +++ b/fs/ntfs3/dir.c @@ -304,6 +304,9 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN)) return true; + if (fname->name_len + sizeof(struct NTFS_DE) > le16_to_cpu(e->size)) + return true; + name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name, PATH_MAX); if (name_len <= 0) { @@ -329,8 +332,7 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, * It does additional locks/reads just to get the type of name. * Should we use additional mount option to enable branch below? */ - if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) || - fname->dup.ea_size) && + if (fname->dup.extend_data && ino != ni->mi.rno) { struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL); if (!IS_ERR_OR_NULL(inode)) { diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 1e99a35691cd..c1ece707b195 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -57,6 +57,10 @@ long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) struct inode *inode = file_inode(filp); struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(inode)))) + return -EINVAL; + switch (cmd) { case FITRIM: return ntfs_ioctl_fitrim(sbi, arg); @@ -81,6 +85,10 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct inode *inode = d_inode(path->dentry); struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + stat->result_mask |= STATX_BTIME; stat->btime = ni->i_crtime; stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ @@ -154,13 +162,13 @@ static int ntfs_extend_initialized_size(struct file *file, if (pos + len > new_valid) len = new_valid - pos; - err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL); + err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); if (err) goto out; folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); - err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); + err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); if (err < 0) goto out; pos += len; @@ -261,16 +269,21 @@ out: } /* - * ntfs_file_mmap - file_operations::mmap + * ntfs_file_mmap_prepare - file_operations::mmap_prepare */ -static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); - u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); - bool rw = vma->vm_flags & VM_WRITE; + u64 from = ((u64)desc->pgoff << PAGE_SHIFT); + bool rw = desc->vm_flags & VM_WRITE; int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -291,7 +304,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) if (rw) { u64 to = min_t(loff_t, i_size_read(inode), - from + vma->vm_end - vma->vm_start); + from + desc->end - desc->start); if (is_sparsed(ni)) { /* Allocate clusters for rw map. */ @@ -310,7 +323,10 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } if (ni->i_valid < to) { - inode_lock(inode); + if (!inode_trylock(inode)) { + err = -EAGAIN; + goto out; + } err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to); inode_unlock(inode); @@ -319,7 +335,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } } - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); out: return err; } @@ -735,6 +751,10 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, umode_t mode = inode->i_mode; int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -795,6 +815,10 @@ static int check_read_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -1130,6 +1154,10 @@ static int check_write_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -1212,6 +1240,10 @@ int ntfs_file_open(struct inode *inode, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -1281,6 +1313,10 @@ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int err; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); if (err) return err; @@ -1331,7 +1367,7 @@ const struct file_operations ntfs_file_operations = { #endif .splice_read = ntfs_file_splice_read, .splice_write = ntfs_file_splice_write, - .mmap = ntfs_file_mmap, + .mmap_prepare = ntfs_file_mmap_prepare, .open = ntfs_file_open, .fsync = generic_file_fsync, .fallocate = ntfs_fallocate, diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 756e1306fe6c..8f9fe1d7a690 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -3003,8 +3003,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, * ni_rename - Remove one name and insert new name. */ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni, - struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de, - bool *is_bad) + struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de) { int err; struct NTFS_DE *de2 = NULL; @@ -3027,8 +3026,8 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni, err = ni_add_name(new_dir_ni, ni, new_de); if (!err) { err = ni_remove_name(dir_ni, ni, de, &de2, &undo); - if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo)) - *is_bad = true; + WARN_ON(err && ni_remove_name(new_dir_ni, ni, new_de, &de2, + &undo)); } /* @@ -3119,11 +3118,21 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup, } } - /* TODO: Fill reparse info. */ - dup->reparse = 0; - dup->ea_size = 0; + dup->extend_data = 0; - if (ni->ni_flags & NI_FLAG_EA) { + if (dup->fa & FILE_ATTRIBUTE_REPARSE_POINT) { + attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, + NULL); + + if (attr) { + const struct REPARSE_POINT *rp; + + rp = resident_data_ex(attr, sizeof(struct REPARSE_POINT)); + /* If ATTR_REPARSE exists 'rp' can't be NULL. */ + if (rp) + dup->extend_data = rp->ReparseTag; + } + } else if (ni->ni_flags & NI_FLAG_EA) { attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL); if (attr) { @@ -3132,7 +3141,7 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup, info = resident_data_ex(attr, sizeof(struct EA_INFO)); /* If ATTR_EA_INFO exists 'info' can't be NULL. */ if (info) - dup->ea_size = info->size_pack; + dup->extend_data = info->size; } } @@ -3199,6 +3208,10 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint) if (is_bad_inode(inode) || sb_rdonly(sb)) return 0; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(sb))) return -EIO; diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index df81f1f7330c..c7a2f191254d 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -905,9 +905,13 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait) void ntfs_bad_inode(struct inode *inode, const char *hint) { struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; + struct ntfs_inode *ni = ntfs_i(inode); ntfs_inode_err(inode, "%s", hint); - make_bad_inode(inode); + + /* Do not call make_bad_inode()! */ + ni->ni_bad = true; + /* Avoid recursion if bad inode is $Volume. */ if (inode->i_ino != MFT_REC_VOL && !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) { diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 0f0d27d4644a..37cbbee7fa58 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -878,6 +878,10 @@ static int ntfs_resident_writepage(struct folio *folio, struct ntfs_inode *ni = ntfs_i(inode); int ret; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -896,6 +900,10 @@ static int ntfs_writepages(struct address_space *mapping, { struct inode *inode = mapping->host; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(inode)))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -912,13 +920,17 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn, bh_result, create, GET_BLOCK_WRITE_BEGIN); } -int ntfs_write_begin(struct file *file, struct address_space *mapping, +int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, u32 len, struct folio **foliop, void **fsdata) { int err; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -957,7 +969,8 @@ out: /* * ntfs_write_end - Address_space_operations::write_end. */ -int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, +int ntfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, u32 len, u32 copied, struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; @@ -989,7 +1002,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, folio_unlock(folio); folio_put(folio); } else { - err = generic_write_end(file, mapping, pos, len, copied, folio, + err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); } @@ -1062,10 +1075,10 @@ int inode_read_data(struct inode *inode, void *data, size_t bytes) * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK) * for unicode string of @uni_len length. */ -static inline u32 ntfs_reparse_bytes(u32 uni_len) +static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute) { /* Header + unicode string + decorated unicode string. */ - return sizeof(short) * (2 * uni_len + 4) + + return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) + offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer.PathBuffer); } @@ -1078,8 +1091,11 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, struct REPARSE_DATA_BUFFER *rp; __le16 *rp_name; typeof(rp->SymbolicLinkReparseBuffer) *rs; + bool is_absolute; - rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS); + is_absolute = (strlen(symname) > 1 && symname[1] == ':'); + + rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS); if (!rp) return ERR_PTR(-ENOMEM); @@ -1094,7 +1110,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, goto out; /* err = the length of unicode name of symlink. */ - *nsize = ntfs_reparse_bytes(err); + *nsize = ntfs_reparse_bytes(err, is_absolute); if (*nsize > sbi->reparse.max_size) { err = -EFBIG; @@ -1114,7 +1130,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, /* PrintName + SubstituteName. */ rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); - rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8); + rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0)); rs->PrintNameLength = rs->SubstituteNameOffset; /* @@ -1122,16 +1138,18 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, * parse this path. * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). */ - rs->Flags = 0; + rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE); - memmove(rp_name + err + 4, rp_name, sizeof(short) * err); + memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err); - /* Decorate SubstituteName. */ - rp_name += err; - rp_name[0] = cpu_to_le16('\\'); - rp_name[1] = cpu_to_le16('?'); - rp_name[2] = cpu_to_le16('?'); - rp_name[3] = cpu_to_le16('\\'); + if (is_absolute) { + /* Decorate SubstituteName. */ + rp_name += err; + rp_name[0] = cpu_to_le16('\\'); + rp_name[1] = cpu_to_le16('?'); + rp_name[2] = cpu_to_le16('?'); + rp_name[3] = cpu_to_le16('\\'); + } return rp; out: @@ -1260,6 +1278,12 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, goto out1; } + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(dir_ni))) { + err = -EINVAL; + goto out2; + } + if (unlikely(ntfs3_forced_shutdown(sb))) { err = -EIO; goto out2; @@ -1350,7 +1374,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, fname->dup.a_time = std5->cr_time; fname->dup.alloc_size = fname->dup.data_size = 0; fname->dup.fa = std5->fa; - fname->dup.ea_size = fname->dup.reparse = 0; + fname->dup.extend_data = S_ISLNK(mode) ? IO_REPARSE_TAG_SYMLINK : 0; dsize = le16_to_cpu(new_de->key_size); asize = ALIGN(SIZEOF_RESIDENT + dsize, 8); @@ -1590,27 +1614,29 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, inode->i_flags |= S_NOSEC; } - /* - * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute. - * The packed size of extended attribute is stored in direntry too. - * 'fname' here points to inside new_de. - */ - err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size); - if (err) - goto out6; + if (!S_ISLNK(mode)) { + /* + * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute. + * The packed size of extended attribute is stored in direntry too. + * 'fname' here points to inside new_de. + */ + err = ntfs_save_wsl_perm(inode, &fname->dup.extend_data); + if (err) + goto out6; - /* - * update ea_size in file_name attribute too. - * Use ni_find_attr cause layout of MFT record may be changed - * in ntfs_init_acl and ntfs_save_wsl_perm. - */ - attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL); - if (attr) { - struct ATTR_FILE_NAME *fn; + /* + * update ea_size in file_name attribute too. + * Use ni_find_attr cause layout of MFT record may be changed + * in ntfs_init_acl and ntfs_save_wsl_perm. + */ + attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL); + if (attr) { + struct ATTR_FILE_NAME *fn; - fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); - if (fn) - fn->dup.ea_size = fname->dup.ea_size; + fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); + if (fn) + fn->dup.extend_data = fname->dup.extend_data; + } } /* We do not need to update parent directory later */ diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c index b807744fc6a9..82c8ae56beee 100644 --- a/fs/ntfs3/namei.c +++ b/fs/ntfs3/namei.c @@ -171,6 +171,10 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry) struct ntfs_inode *ni = ntfs_i(dir); int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) return -EIO; @@ -191,6 +195,10 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir, { u32 size = strlen(symname); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(dir)))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) return -EIO; @@ -216,6 +224,10 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry) struct ntfs_inode *ni = ntfs_i(dir); int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(dir->i_sb))) return -EIO; @@ -244,7 +256,7 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir, struct ntfs_inode *ni = ntfs_i(inode); struct inode *new_inode = d_inode(new_dentry); struct NTFS_DE *de, *new_de; - bool is_same, is_bad; + bool is_same; /* * de - memory of PATH_MAX bytes: * [0-1024) - original name (dentry->d_name) @@ -256,6 +268,10 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir, 1024); static_assert(PATH_MAX >= 4 * 1024); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(sb))) return -EIO; @@ -313,12 +329,8 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir, if (dir_ni != new_dir_ni) ni_lock_dir2(new_dir_ni); - is_bad = false; - err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad); - if (is_bad) { - /* Restore after failed rename failed too. */ - _ntfs_bad_inode(inode); - } else if (!err) { + err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de); + if (!err) { simple_rename_timestamp(dir, dentry, new_dir, new_dentry); mark_inode_dirty(inode); mark_inode_dirty(dir); diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 1ff13b6f9613..552b97905813 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -561,8 +561,7 @@ struct NTFS_DUP_INFO { __le64 alloc_size; // 0x20: Data attribute allocated size, multiple of cluster size. __le64 data_size; // 0x28: Data attribute size <= Dataalloc_size. enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more. - __le16 ea_size; // 0x34: Packed EAs. - __le16 reparse; // 0x36: Used by Reparse. + __le32 extend_data; // 0x34: Extended data. }; // 0x38 diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index 36b8052660d5..1296e6fcc779 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -377,6 +377,13 @@ struct ntfs_inode { */ u8 mi_loaded; + /* + * Use this field to avoid any write(s). + * If inode is bad during initialization - use make_bad_inode + * If inode is bad during operations - use this field + */ + u8 ni_bad; + union { struct ntfs_index dir; struct { @@ -577,8 +584,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, struct NTFS_DE *de); int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni, - struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de, - bool *is_bad); + struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de); bool ni_is_dirty(struct inode *inode); @@ -702,10 +708,12 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, int ntfs_set_size(struct inode *inode, u64 new_size); int ntfs_get_block(struct inode *inode, sector_t vbn, struct buffer_head *bh_result, int create); -int ntfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, u32 len, struct folio **foliop, void **fsdata); -int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, - u32 len, u32 copied, struct folio *folio, void *fsdata); +int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, + loff_t pos, u32 len, struct folio **foliop, + void **fsdata); +int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping, + loff_t pos, u32 len, u32 copied, struct folio *folio, + void *fsdata); int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); int ntfs_sync_inode(struct inode *inode); int inode_read_data(struct inode *inode, void *data, size_t bytes); @@ -874,7 +882,7 @@ int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry); ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size); extern const struct xattr_handler *const ntfs_xattr_handlers[]; -int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size); +int ntfs_save_wsl_perm(struct inode *inode, __le32 *ea_size); void ntfs_get_wsl_perm(struct inode *inode); /* globals from lznt.c */ @@ -1025,6 +1033,11 @@ static inline bool is_compressed(const struct ntfs_inode *ni) (ni->ni_flags & NI_FLAG_COMPRESSED_MASK); } +static inline bool is_bad_ni(const struct ntfs_inode *ni) +{ + return ni->ni_bad; +} + static inline int ni_ext_compress_bits(const struct ntfs_inode *ni) { return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK); diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index 920a1ab47b63..ddff94c091b8 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -1223,7 +1223,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_export_op = &ntfs_export_ops; sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec sb->s_xattr = ntfs_xattr_handlers; - sb->s_d_op = options->nocase ? &ntfs_dentry_ops : NULL; + if (options->nocase) + set_default_d_op(sb, &ntfs_dentry_ops); options->nls = ntfs_load_nls(options->nls_name); if (IS_ERR(options->nls)) { diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c index e0055dcf8fe3..e519e21596a7 100644 --- a/fs/ntfs3/xattr.c +++ b/fs/ntfs3/xattr.c @@ -313,7 +313,7 @@ out: static noinline int ntfs_set_ea(struct inode *inode, const char *name, size_t name_len, const void *value, size_t val_size, int flags, bool locked, - __le16 *ea_size) + __le32 *ea_size) { struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_sb_info *sbi = ni->mi.sbi; @@ -522,7 +522,7 @@ update_ea: if (ea_info.size_pack != size_pack) ni->ni_flags |= NI_FLAG_UPDATE_PARENT; if (ea_size) - *ea_size = ea_info.size_pack; + *ea_size = ea_info.size; mark_inode_dirty(&ni->vfs_inode); out: @@ -552,6 +552,10 @@ struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int err; void *buf; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return ERR_PTR(-EINVAL); + /* Allocate PATH_MAX bytes. */ buf = __getname(); if (!buf) @@ -600,6 +604,10 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap, int flags; umode_t mode; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(inode)))) + return -EINVAL; + if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; @@ -730,6 +738,10 @@ ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size) struct ntfs_inode *ni = ntfs_i(inode); ssize_t ret; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (!(ni->ni_flags & NI_FLAG_EA)) { /* no xattr in file */ return 0; @@ -751,6 +763,10 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de, int err; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; @@ -950,7 +966,7 @@ out: * * save uid/gid/mode in xattr */ -int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size) +int ntfs_save_wsl_perm(struct inode *inode, __le32 *ea_size) { int err; __le32 value; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 40b6bce12951..2203438738f6 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1856,7 +1856,8 @@ out: return ret; } -static int ocfs2_write_begin(struct file *file, struct address_space *mapping, +static int ocfs2_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -2047,7 +2048,8 @@ out: return copied; } -static int ocfs2_write_end(struct file *file, struct address_space *mapping, +static int ocfs2_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 2056cf08ac1e..21d797ccccd0 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2800,7 +2800,7 @@ const struct inode_operations ocfs2_special_file_iops = { */ const struct file_operations ocfs2_fops = { .llseek = ocfs2_file_llseek, - .mmap = ocfs2_mmap, + .mmap_prepare = ocfs2_mmap_prepare, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, .open = ocfs2_file_open, @@ -2850,7 +2850,7 @@ const struct file_operations ocfs2_dops = { */ const struct file_operations ocfs2_fops_no_plocks = { .llseek = ocfs2_file_llseek, - .mmap = ocfs2_mmap, + .mmap_prepare = ocfs2_mmap_prepare, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, .open = ocfs2_file_open, diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 7ae96fb8807a..db14c92302a1 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -62,7 +62,7 @@ static inline int o2info_coherent(struct ocfs2_info_request *req) return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); } -int ocfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int ocfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); unsigned int flags; @@ -83,7 +83,7 @@ int ocfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int ocfs2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); unsigned int flags = fa->flags; diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h index 48a5fdfe87a1..4a1c2313b429 100644 --- a/fs/ocfs2/ioctl.h +++ b/fs/ocfs2/ioctl.h @@ -11,9 +11,9 @@ #ifndef OCFS2_IOCTL_PROTO_H #define OCFS2_IOCTL_PROTO_H -int ocfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int ocfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int ocfs2_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 6a314e9f2b49..50e2faf64c19 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -159,8 +159,9 @@ static const struct vm_operations_struct ocfs2_file_vm_ops = { .page_mkwrite = ocfs2_page_mkwrite, }; -int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) +int ocfs2_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; int ret = 0, lock_level = 0; ret = ocfs2_inode_lock_atime(file_inode(file), @@ -171,7 +172,7 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) } ocfs2_inode_unlock(file_inode(file), lock_level); out: - vma->vm_ops = &ocfs2_file_vm_ops; + desc->vm_ops = &ocfs2_file_vm_ops; return 0; } diff --git a/fs/ocfs2/mmap.h b/fs/ocfs2/mmap.h index 1051507cc684..d21c30de6b8c 100644 --- a/fs/ocfs2/mmap.h +++ b/fs/ocfs2/mmap.h @@ -2,6 +2,6 @@ #ifndef OCFS2_MMAP_H #define OCFS2_MMAP_H -int ocfs2_mmap(struct file *file, struct vm_area_struct *vma); +int ocfs2_mmap_prepare(struct vm_area_desc *desc); #endif /* OCFS2_MMAP_H */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 3d2533950bae..53daa4482406 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1962,7 +1962,7 @@ static int ocfs2_initialize_super(struct super_block *sb, sb->s_fs_info = osb; sb->s_op = &ocfs2_sops; - sb->s_d_op = &ocfs2_dentry_ops; + set_default_d_op(sb, &ocfs2_dentry_ops); sb->s_export_op = &ocfs2_export_ops; sb->s_qcop = &dquot_quotactl_sysfile_ops; sb->dq_op = &ocfs2_quota_operations; diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 98358d405b6a..49a1de5a827f 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -310,9 +310,10 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to) } } -static int omfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int omfs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; @@ -332,7 +333,7 @@ const struct file_operations omfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .fsync = generic_file_fsync, .splice_read = filemap_splice_read, }; diff --git a/fs/open.c b/fs/open.c index 7828234a7caa..feb21e8cf5ff 100644 --- a/fs/open.c +++ b/fs/open.c @@ -281,6 +281,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) break; case FALLOC_FL_COLLAPSE_RANGE: case FALLOC_FL_INSERT_RANGE: + case FALLOC_FL_WRITE_ZEROES: if (mode & FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; break; @@ -1204,14 +1205,11 @@ struct file *kernel_file_open(const struct path *path, int flags, if (IS_ERR(f)) return f; - f->f_path = *path; - error = do_dentry_open(f, NULL); + error = vfs_open(path, f); if (error) { fput(f); return ERR_PTR(error); } - - fsnotify_open(f); return f; } EXPORT_SYMBOL_GPL(kernel_file_open); diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index 90c49c0de243..919f99b16834 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -398,8 +398,9 @@ static const struct vm_operations_struct orangefs_file_vm_ops = { /* * Memory map a region of a file. */ -static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int orangefs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; int ret; ret = orangefs_revalidate_mapping(file_inode(file)); @@ -410,10 +411,11 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) "orangefs_file_mmap: called on %pD\n", file); /* set the sequential readahead hint */ - vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ); + desc->vm_flags |= VM_SEQ_READ; + desc->vm_flags &= ~VM_RAND_READ; file_accessed(file); - vma->vm_ops = &orangefs_file_vm_ops; + desc->vm_ops = &orangefs_file_vm_ops; return 0; } @@ -574,7 +576,7 @@ const struct file_operations orangefs_file_operations = { .read_iter = orangefs_file_read_iter, .write_iter = orangefs_file_write_iter, .lock = orangefs_lock, - .mmap = orangefs_file_mmap, + .mmap_prepare = orangefs_file_mmap_prepare, .open = generic_file_open, .splice_read = orangefs_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 08a6f372a352..a01400cd41fd 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -285,9 +285,10 @@ static int orangefs_read_folio(struct file *file, struct folio *folio) return ret; } -static int orangefs_write_begin(struct file *file, - struct address_space *mapping, loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int orangefs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned len, struct folio **foliop, + void **fsdata) { struct orangefs_write_range *wr; struct folio *folio; @@ -340,9 +341,10 @@ okay: return 0; } -static int orangefs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, struct folio *folio, - void *fsdata) +static int orangefs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = folio->mapping->host; loff_t last_pos = pos + copied; @@ -372,7 +374,7 @@ static int orangefs_write_end(struct file *file, struct address_space *mapping, folio_unlock(folio); folio_put(folio); - mark_inode_dirty_sync(file_inode(file)); + mark_inode_dirty_sync(file_inode(iocb->ki_filp)); return copied; } @@ -887,7 +889,7 @@ int orangefs_update_time(struct inode *inode, int flags) return __orangefs_setattr(inode, &iattr); } -static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +static int orangefs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { u64 val = 0; int ret; @@ -908,7 +910,7 @@ static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } static int orangefs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { u64 val = 0; diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 64ca9498f550..f3da840758e7 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -416,7 +416,7 @@ static int orangefs_fill_sb(struct super_block *sb, sb->s_xattr = orangefs_xattr_handlers; sb->s_magic = ORANGEFS_SUPER_MAGIC; sb->s_op = &orangefs_s_ops; - sb->s_d_op = &orangefs_dentry_operations; + set_default_d_op(sb, &orangefs_dentry_operations); sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index d7310fcf3888..27396fe63f6d 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -171,14 +171,14 @@ out: static int ovl_copy_fileattr(struct inode *inode, const struct path *old, const struct path *new) { - struct fileattr oldfa = { .flags_valid = true }; - struct fileattr newfa = { .flags_valid = true }; + struct file_kattr oldfa = { .flags_valid = true }; + struct file_kattr newfa = { .flags_valid = true }; int err; err = ovl_real_fileattr_get(old, &oldfa); if (err) { /* Ntfs-3g returns -EINVAL for "no fileattr support" */ - if (err == -ENOTTY || err == -EINVAL) + if (err == -EOPNOTSUPP || err == -EINVAL) return 0; pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n", old->dentry, err); @@ -517,15 +517,12 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, /* * Create and install index entry. - * - * Caller must hold i_mutex on indexdir. */ static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh, struct dentry *upper) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); - struct inode *dir = d_inode(indexdir); struct dentry *index = NULL; struct dentry *temp = NULL; struct qstr name = { }; @@ -559,16 +556,20 @@ static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh, if (err) goto out; + err = ovl_parent_lock(indexdir, temp); + if (err) + goto out; index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); if (IS_ERR(index)) { err = PTR_ERR(index); } else { - err = ovl_do_rename(ofs, dir, temp, dir, index, 0); + err = ovl_do_rename(ofs, indexdir, temp, indexdir, index, 0); dput(index); } + ovl_parent_unlock(indexdir); out: if (err) - ovl_cleanup(ofs, dir, temp); + ovl_cleanup(ofs, indexdir, temp); dput(temp); free_name: kfree(name.name); @@ -762,7 +763,6 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode; - struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir); struct path path = { .mnt = ovl_upper_mnt(ofs) }; struct dentry *temp, *upper, *trap; struct ovl_cu_creds cc; @@ -779,9 +779,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) return err; ovl_start_write(c->dentry); - inode_lock(wdir); temp = ovl_create_temp(ofs, c->workdir, &cattr); - inode_unlock(wdir); ovl_end_write(c->dentry); ovl_revert_cu_creds(&cc); @@ -794,45 +792,47 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) */ path.dentry = temp; err = ovl_copy_up_data(c, &path); + ovl_start_write(c->dentry); + if (err) + goto cleanup_unlocked; + + if (S_ISDIR(c->stat.mode) && c->indexed) { + err = ovl_create_index(c->dentry, c->origin_fh, temp); + if (err) + goto cleanup_unlocked; + } + /* * We cannot hold lock_rename() throughout this helper, because of * lock ordering with sb_writers, which shouldn't be held when calling * ovl_copy_up_data(), so lock workdir and destdir and make sure that * temp wasn't moved before copy up completion or cleanup. */ - ovl_start_write(c->dentry); trap = lock_rename(c->workdir, c->destdir); if (trap || temp->d_parent != c->workdir) { /* temp or workdir moved underneath us? abort without cleanup */ dput(temp); err = -EIO; - if (IS_ERR(trap)) - goto out; - goto unlock; - } else if (err) { - goto cleanup; + if (!IS_ERR(trap)) + unlock_rename(c->workdir, c->destdir); + goto out; } err = ovl_copy_up_metadata(c, temp); if (err) goto cleanup; - if (S_ISDIR(c->stat.mode) && c->indexed) { - err = ovl_create_index(c->dentry, c->origin_fh, temp); - if (err) - goto cleanup; - } - upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto cleanup; - err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0); + err = ovl_do_rename(ofs, c->workdir, temp, c->destdir, upper, 0); + unlock_rename(c->workdir, c->destdir); dput(upper); if (err) - goto cleanup; + goto cleanup_unlocked; inode = d_inode(c->dentry); if (c->metacopy_digest) @@ -846,17 +846,17 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) ovl_inode_update(inode, temp); if (S_ISDIR(inode->i_mode)) ovl_set_flag(OVL_WHITEOUTS, inode); -unlock: - unlock_rename(c->workdir, c->destdir); out: ovl_end_write(c->dentry); return err; cleanup: - ovl_cleanup(ofs, wdir, temp); + unlock_rename(c->workdir, c->destdir); +cleanup_unlocked: + ovl_cleanup(ofs, c->workdir, temp); dput(temp); - goto unlock; + goto out; } /* Copyup using O_TMPFILE which does not require cross dir locking */ diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index fe493f3ed6b6..70b8687dc45e 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -24,7 +24,8 @@ MODULE_PARM_DESC(redirect_max, static int ovl_set_redirect(struct dentry *dentry, bool samedir); -int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry) +static int ovl_cleanup_locked(struct ovl_fs *ofs, struct inode *wdir, + struct dentry *wdentry) { int err; @@ -43,6 +44,21 @@ int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry) return err; } +int ovl_cleanup(struct ovl_fs *ofs, struct dentry *workdir, + struct dentry *wdentry) +{ + int err; + + err = ovl_parent_lock(workdir, wdentry); + if (err) + return err; + + ovl_cleanup_locked(ofs, workdir->d_inode, wdentry); + ovl_parent_unlock(workdir); + + return 0; +} + struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir) { struct dentry *temp; @@ -62,7 +78,6 @@ struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir) return temp; } -/* caller holds i_mutex on workdir */ static struct dentry *ovl_whiteout(struct ovl_fs *ofs) { int err; @@ -70,47 +85,52 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs) struct dentry *workdir = ofs->workdir; struct inode *wdir = workdir->d_inode; + guard(mutex)(&ofs->whiteout_lock); + if (!ofs->whiteout) { + inode_lock_nested(wdir, I_MUTEX_PARENT); whiteout = ovl_lookup_temp(ofs, workdir); - if (IS_ERR(whiteout)) - goto out; - - err = ovl_do_whiteout(ofs, wdir, whiteout); - if (err) { - dput(whiteout); - whiteout = ERR_PTR(err); - goto out; + if (!IS_ERR(whiteout)) { + err = ovl_do_whiteout(ofs, wdir, whiteout); + if (err) { + dput(whiteout); + whiteout = ERR_PTR(err); + } } + inode_unlock(wdir); + if (IS_ERR(whiteout)) + return whiteout; ofs->whiteout = whiteout; } if (!ofs->no_shared_whiteout) { + inode_lock_nested(wdir, I_MUTEX_PARENT); whiteout = ovl_lookup_temp(ofs, workdir); - if (IS_ERR(whiteout)) - goto out; - - err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout); - if (!err) - goto out; - - if (err != -EMLINK) { - pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n", - ofs->whiteout->d_inode->i_nlink, err); + if (!IS_ERR(whiteout)) { + err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout); + if (err) { + dput(whiteout); + whiteout = ERR_PTR(err); + } + } + inode_unlock(wdir); + if (!IS_ERR(whiteout)) + return whiteout; + if (PTR_ERR(whiteout) != -EMLINK) { + pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%lu)\n", + ofs->whiteout->d_inode->i_nlink, + PTR_ERR(whiteout)); ofs->no_shared_whiteout = true; } - dput(whiteout); } whiteout = ofs->whiteout; ofs->whiteout = NULL; -out: return whiteout; } -/* Caller must hold i_mutex on both workdir and dir */ -int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, +int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct dentry *dir, struct dentry *dentry) { - struct inode *wdir = ofs->workdir->d_inode; struct dentry *whiteout; int err; int flags = 0; @@ -123,24 +143,29 @@ int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, if (d_is_dir(dentry)) flags = RENAME_EXCHANGE; - err = ovl_do_rename(ofs, wdir, whiteout, dir, dentry, flags); + err = ovl_lock_rename_workdir(ofs->workdir, whiteout, dir, dentry); + if (!err) { + err = ovl_do_rename(ofs, ofs->workdir, whiteout, dir, dentry, flags); + unlock_rename(ofs->workdir, dir); + } if (err) goto kill_whiteout; if (flags) - ovl_cleanup(ofs, wdir, dentry); + ovl_cleanup(ofs, ofs->workdir, dentry); out: dput(whiteout); return err; kill_whiteout: - ovl_cleanup(ofs, wdir, whiteout); + ovl_cleanup(ofs, ofs->workdir, whiteout); goto out; } -struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir, +struct dentry *ovl_create_real(struct ovl_fs *ofs, struct dentry *parent, struct dentry *newdentry, struct ovl_cattr *attr) { + struct inode *dir = parent->d_inode; int err; if (IS_ERR(newdentry)) @@ -199,8 +224,12 @@ out: struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir, struct ovl_cattr *attr) { - return ovl_create_real(ofs, d_inode(workdir), - ovl_lookup_temp(ofs, workdir), attr); + struct dentry *ret; + inode_lock(workdir->d_inode); + ret = ovl_create_real(ofs, workdir, + ovl_lookup_temp(ofs, workdir), attr); + inode_unlock(workdir->d_inode); + return ret; } static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, @@ -303,13 +332,13 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode, int err; inode_lock_nested(udir, I_MUTEX_PARENT); - newdentry = ovl_create_real(ofs, udir, + newdentry = ovl_create_real(ofs, upperdir, ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len), attr); - err = PTR_ERR(newdentry); + inode_unlock(udir); if (IS_ERR(newdentry)) - goto out_unlock; + return PTR_ERR(newdentry); if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry) && !ovl_allow_offline_changes(ofs)) { @@ -321,14 +350,12 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode, err = ovl_instantiate(dentry, inode, newdentry, !!attr->hardlink, NULL); if (err) goto out_cleanup; -out_unlock: - inode_unlock(udir); - return err; + return 0; out_cleanup: - ovl_cleanup(ofs, udir, newdentry); + ovl_cleanup(ofs, upperdir, newdentry); dput(newdentry); - goto out_unlock; + return err; } static struct dentry *ovl_clear_empty(struct dentry *dentry, @@ -336,9 +363,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); - struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); - struct inode *udir = upperdir->d_inode; struct path upperpath; struct dentry *upper; struct dentry *opaquedir; @@ -348,27 +373,25 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, if (WARN_ON(!workdir)) return ERR_PTR(-EROFS); - err = ovl_lock_rename_workdir(workdir, upperdir); - if (err) - goto out; - ovl_path_upper(dentry, &upperpath); err = vfs_getattr(&upperpath, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) - goto out_unlock; + goto out; err = -ESTALE; if (!S_ISDIR(stat.mode)) - goto out_unlock; + goto out; upper = upperpath.dentry; - if (upper->d_parent->d_inode != udir) - goto out_unlock; opaquedir = ovl_create_temp(ofs, workdir, OVL_CATTR(stat.mode)); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) - goto out_unlock; + goto out; + + err = ovl_lock_rename_workdir(workdir, opaquedir, upperdir, upper); + if (err) + goto out_cleanup_unlocked; err = ovl_copy_xattr(dentry->d_sb, &upperpath, opaquedir); if (err) @@ -384,13 +407,13 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, if (err) goto out_cleanup; - err = ovl_do_rename(ofs, wdir, opaquedir, udir, upper, RENAME_EXCHANGE); + err = ovl_do_rename(ofs, workdir, opaquedir, upperdir, upper, RENAME_EXCHANGE); + unlock_rename(workdir, upperdir); if (err) - goto out_cleanup; + goto out_cleanup_unlocked; ovl_cleanup_whiteouts(ofs, upper, list); - ovl_cleanup(ofs, wdir, upper); - unlock_rename(workdir, upperdir); + ovl_cleanup(ofs, workdir, upper); /* dentry's upper doesn't match now, get rid of it */ d_drop(dentry); @@ -398,10 +421,10 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, return opaquedir; out_cleanup: - ovl_cleanup(ofs, wdir, opaquedir); - dput(opaquedir); -out_unlock: unlock_rename(workdir, upperdir); +out_cleanup_unlocked: + ovl_cleanup(ofs, workdir, opaquedir); + dput(opaquedir); out: return ERR_PTR(err); } @@ -420,9 +443,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); - struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); - struct inode *udir = upperdir->d_inode; struct dentry *upper; struct dentry *newdentry; int err; @@ -439,15 +460,11 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, return err; } - err = ovl_lock_rename_workdir(workdir, upperdir); - if (err) - goto out; - - upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, - dentry->d_name.len); + upper = ovl_lookup_upper_unlocked(ofs, dentry->d_name.name, upperdir, + dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) - goto out_unlock; + goto out; err = -ESTALE; if (d_is_negative(upper) || !ovl_upper_is_whiteout(ofs, upper)) @@ -458,6 +475,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, if (IS_ERR(newdentry)) goto out_dput; + err = ovl_lock_rename_workdir(workdir, newdentry, upperdir, upper); + if (err) + goto out_cleanup_unlocked; + /* * mode could have been mutilated due to umask (e.g. sgid directory) */ @@ -491,27 +512,27 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, if (err) goto out_cleanup; - err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, + err = ovl_do_rename(ofs, workdir, newdentry, upperdir, upper, RENAME_EXCHANGE); + unlock_rename(workdir, upperdir); if (err) - goto out_cleanup; + goto out_cleanup_unlocked; - ovl_cleanup(ofs, wdir, upper); + ovl_cleanup(ofs, workdir, upper); } else { - err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, 0); + err = ovl_do_rename(ofs, workdir, newdentry, upperdir, upper, 0); + unlock_rename(workdir, upperdir); if (err) - goto out_cleanup; + goto out_cleanup_unlocked; } ovl_dir_modified(dentry->d_parent, false); err = ovl_instantiate(dentry, inode, newdentry, hardlink, NULL); if (err) { - ovl_cleanup(ofs, udir, newdentry); + ovl_cleanup(ofs, upperdir, newdentry); dput(newdentry); } out_dput: dput(upper); -out_unlock: - unlock_rename(workdir, upperdir); out: if (!hardlink) { posix_acl_release(acl); @@ -520,7 +541,9 @@ out: return err; out_cleanup: - ovl_cleanup(ofs, wdir, newdentry); + unlock_rename(workdir, upperdir); +out_cleanup_unlocked: + ovl_cleanup(ofs, workdir, newdentry); dput(newdentry); goto out_dput; } @@ -757,15 +780,11 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, goto out; } - err = ovl_lock_rename_workdir(workdir, upperdir); - if (err) - goto out_dput; - - upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, - dentry->d_name.len); + upper = ovl_lookup_upper_unlocked(ofs, dentry->d_name.name, upperdir, + dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) - goto out_unlock; + goto out_dput; err = -ESTALE; if ((opaquedir && upper != opaquedir) || @@ -774,17 +793,13 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, goto out_dput_upper; } - err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper); - if (err) - goto out_d_drop; + err = ovl_cleanup_and_whiteout(ofs, upperdir, upper); + if (!err) + ovl_dir_modified(dentry->d_parent, true); - ovl_dir_modified(dentry->d_parent, true); -out_d_drop: d_drop(dentry); out_dput_upper: dput(upper); -out_unlock: - unlock_rename(workdir, upperdir); out_dput: dput(opaquedir); out: @@ -1069,9 +1084,9 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, int err; struct dentry *old_upperdir; struct dentry *new_upperdir; - struct dentry *olddentry; - struct dentry *newdentry; - struct dentry *trap; + struct dentry *olddentry = NULL; + struct dentry *newdentry = NULL; + struct dentry *trap, *de; bool old_opaque; bool new_opaque; bool cleanup_whiteout = false; @@ -1184,21 +1199,23 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, goto out_revert_creds; } - olddentry = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir, - old->d_name.len); - err = PTR_ERR(olddentry); - if (IS_ERR(olddentry)) + de = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir, + old->d_name.len); + err = PTR_ERR(de); + if (IS_ERR(de)) goto out_unlock; + olddentry = de; err = -ESTALE; if (!ovl_matches_upper(old, olddentry)) - goto out_dput_old; + goto out_unlock; - newdentry = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir, - new->d_name.len); - err = PTR_ERR(newdentry); - if (IS_ERR(newdentry)) - goto out_dput_old; + de = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir, + new->d_name.len); + err = PTR_ERR(de); + if (IS_ERR(de)) + goto out_unlock; + newdentry = de; old_opaque = ovl_dentry_is_opaque(old); new_opaque = ovl_dentry_is_opaque(new); @@ -1207,28 +1224,28 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, if (d_inode(new) && ovl_dentry_upper(new)) { if (opaquedir) { if (newdentry != opaquedir) - goto out_dput; + goto out_unlock; } else { if (!ovl_matches_upper(new, newdentry)) - goto out_dput; + goto out_unlock; } } else { if (!d_is_negative(newdentry)) { if (!new_opaque || !ovl_upper_is_whiteout(ofs, newdentry)) - goto out_dput; + goto out_unlock; } else { if (flags & RENAME_EXCHANGE) - goto out_dput; + goto out_unlock; } } if (olddentry == trap) - goto out_dput; + goto out_unlock; if (newdentry == trap) - goto out_dput; + goto out_unlock; if (olddentry->d_inode == newdentry->d_inode) - goto out_dput; + goto out_unlock; err = 0; if (ovl_type_merge_or_lower(old)) @@ -1236,7 +1253,7 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, else if (is_dir && !old_opaque && ovl_type_merge(new->d_parent)) err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); if (err) - goto out_dput; + goto out_unlock; if (!overwrite && ovl_type_merge_or_lower(new)) err = ovl_set_redirect(new, samedir); @@ -1244,15 +1261,16 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, ovl_type_merge(old->d_parent)) err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); if (err) - goto out_dput; + goto out_unlock; - err = ovl_do_rename(ofs, old_upperdir->d_inode, olddentry, - new_upperdir->d_inode, newdentry, flags); + err = ovl_do_rename(ofs, old_upperdir, olddentry, + new_upperdir, newdentry, flags); + unlock_rename(new_upperdir, old_upperdir); if (err) - goto out_dput; + goto out_revert_creds; if (cleanup_whiteout) - ovl_cleanup(ofs, old_upperdir->d_inode, newdentry); + ovl_cleanup(ofs, old_upperdir, newdentry); if (overwrite && d_inode(new)) { if (new_is_dir) @@ -1271,12 +1289,6 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, if (d_inode(new) && ovl_dentry_upper(new)) ovl_copyattr(d_inode(new)); -out_dput: - dput(newdentry); -out_dput_old: - dput(olddentry); -out_unlock: - unlock_rename(new_upperdir, old_upperdir); out_revert_creds: ovl_revert_creds(old_cred); if (update_nlink) @@ -1284,9 +1296,15 @@ out_revert_creds: else ovl_drop_write(old); out: + dput(newdentry); + dput(olddentry); dput(opaquedir); ovl_cache_free(&list); return err; + +out_unlock: + unlock_rename(new_upperdir, old_upperdir); + goto out_revert_creds; } static int ovl_create_tmpfile(struct file *file, struct dentry *dentry, diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index dfea7bd800cb..f5b8877d5fe2 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -48,7 +48,7 @@ static struct file *ovl_open_realfile(const struct file *file, if (!inode_owner_or_capable(real_idmap, realinode)) flags &= ~O_NOATIME; - realfile = backing_file_open(file_user_path((struct file *) file), + realfile = backing_file_open(file_user_path(file), flags, realpath, current_cred()); } ovl_revert_creds(old_cred); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 6f0e15f86c21..ecb9f2019395 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -610,7 +610,7 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * Introducing security_inode_fileattr_get/set() hooks would solve this issue * properly. */ -static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa, +static int ovl_security_fileattr(const struct path *realpath, struct file_kattr *fa, bool set) { struct file *file; @@ -637,7 +637,7 @@ static int ovl_security_fileattr(const struct path *realpath, struct fileattr *f return err; } -int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa) +int ovl_real_fileattr_set(const struct path *realpath, struct file_kattr *fa) { int err; @@ -649,7 +649,7 @@ int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa) } int ovl_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct path upperpath; @@ -697,7 +697,7 @@ out: } /* Convert inode protection flags to fileattr flags */ -static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa) +static void ovl_fileattr_prot_flags(struct inode *inode, struct file_kattr *fa) { BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL); BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); @@ -712,7 +712,7 @@ static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa) } } -int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa) +int ovl_real_fileattr_get(const struct path *realpath, struct file_kattr *fa) { int err; @@ -720,13 +720,10 @@ int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa) if (err) return err; - err = vfs_fileattr_get(realpath->dentry, fa); - if (err == -ENOIOCTLCMD) - err = -ENOTTY; - return err; + return vfs_fileattr_get(realpath->dentry, fa); } -int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int ovl_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct path realpath; diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 2043f0369059..76d6248b625e 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -230,13 +230,26 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, struct dentry **ret, bool drop_negative) { struct ovl_fs *ofs = OVL_FS(d->sb); - struct dentry *this; + struct dentry *this = NULL; + const char *warn; struct path path; int err; bool last_element = !post[0]; bool is_upper = d->layer->idx == 0; char val; + /* + * We allow filesystems that are case-folding capable but deny composing + * ovl stack from case-folded directories. If someone has enabled case + * folding on a directory on underlying layer, the warranty of the ovl + * stack is voided. + */ + if (ovl_dentry_casefolded(base)) { + warn = "case folded parent"; + err = -ESTALE; + goto out_warn; + } + this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative); if (IS_ERR(this)) { err = PTR_ERR(this); @@ -246,10 +259,17 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, goto out_err; } + if (ovl_dentry_casefolded(this)) { + warn = "case folded child"; + err = -EREMOTE; + goto out_warn; + } + if (ovl_dentry_weird(this)) { /* Don't support traversing automounts and other weirdness */ + warn = "unsupported object type"; err = -EREMOTE; - goto out_err; + goto out_warn; } path.dentry = this; @@ -283,8 +303,9 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, } else { if (ovl_lookup_trap_inode(d->sb, this)) { /* Caught in a trap of overlapping layers */ + warn = "overlapping layers"; err = -ELOOP; - goto out_err; + goto out_warn; } if (last_element) @@ -316,6 +337,10 @@ put_and_out: this = NULL; goto out; +out_warn: + pr_warn_ratelimited("failed lookup in %s (%pd2, name='%.*s', err=%i): %s\n", + is_upper ? "upper" : "lower", base, + namelen, name, err, warn); out_err: dput(this); return err; diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 497323128e5f..bb0d7ded8e76 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -355,19 +355,19 @@ static inline int ovl_do_remove_acl(struct ovl_fs *ofs, struct dentry *dentry, return vfs_remove_acl(ovl_upper_mnt_idmap(ofs), dentry, acl_name); } -static inline int ovl_do_rename(struct ovl_fs *ofs, struct inode *olddir, - struct dentry *olddentry, struct inode *newdir, +static inline int ovl_do_rename(struct ovl_fs *ofs, struct dentry *olddir, + struct dentry *olddentry, struct dentry *newdir, struct dentry *newdentry, unsigned int flags) { int err; struct renamedata rd = { .old_mnt_idmap = ovl_upper_mnt_idmap(ofs), - .old_dir = olddir, - .old_dentry = olddentry, + .old_parent = olddir, + .old_dentry = olddentry, .new_mnt_idmap = ovl_upper_mnt_idmap(ofs), - .new_dir = newdir, - .new_dentry = newdentry, - .flags = flags, + .new_parent = newdir, + .new_dentry = newdentry, + .flags = flags, }; pr_debug("rename(%pd2, %pd2, 0x%x)\n", olddentry, newdentry, flags); @@ -407,6 +407,15 @@ static inline struct dentry *ovl_lookup_upper(struct ovl_fs *ofs, return lookup_one(ovl_upper_mnt_idmap(ofs), &QSTR_LEN(name, len), base); } +static inline struct dentry *ovl_lookup_upper_unlocked(struct ovl_fs *ofs, + const char *name, + struct dentry *base, + int len) +{ + return lookup_one_unlocked(ovl_upper_mnt_idmap(ofs), + &QSTR_LEN(name, len), base); +} + static inline bool ovl_open_flags_need_copy_up(int flags) { if (!flags) @@ -416,6 +425,11 @@ static inline bool ovl_open_flags_need_copy_up(int flags) } /* util.c */ +int ovl_parent_lock(struct dentry *parent, struct dentry *child); +static inline void ovl_parent_unlock(struct dentry *parent) +{ + inode_unlock(parent->d_inode); +} int ovl_get_write_access(struct dentry *dentry); void ovl_put_write_access(struct dentry *dentry); void ovl_start_write(struct dentry *dentry); @@ -448,6 +462,12 @@ void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry, void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry, struct ovl_entry *oe, unsigned int mask); bool ovl_dentry_weird(struct dentry *dentry); + +static inline bool ovl_dentry_casefolded(struct dentry *dentry) +{ + return sb_has_encoding(dentry->d_sb) && IS_CASEFOLDED(d_inode(dentry)); +} + enum ovl_path_type ovl_path_type(struct dentry *dentry); void ovl_path_upper(struct dentry *dentry, struct path *path); void ovl_path_lower(struct dentry *dentry, struct path *path); @@ -535,7 +555,8 @@ bool ovl_is_inuse(struct dentry *dentry); bool ovl_need_index(struct dentry *dentry); int ovl_nlink_start(struct dentry *dentry); void ovl_nlink_end(struct dentry *dentry); -int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir); +int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *work, + struct dentry *upperdir, struct dentry *upper); int ovl_check_metacopy_xattr(struct ovl_fs *ofs, const struct path *path, struct ovl_metacopy *data); int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d, @@ -723,7 +744,7 @@ void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, void ovl_cache_free(struct list_head *list); void ovl_dir_cache_free(struct inode *inode); int ovl_check_d_type_supported(const struct path *realpath); -int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir, +int ovl_workdir_cleanup(struct ovl_fs *ofs, struct dentry *parent, struct vfsmount *mnt, struct dentry *dentry, int level); int ovl_indexdir_cleanup(struct ovl_fs *ofs); @@ -817,7 +838,7 @@ void ovl_copyattr(struct inode *to); void ovl_check_protattr(struct inode *inode, struct dentry *upper); int ovl_set_protattr(struct inode *inode, struct dentry *upper, - struct fileattr *fa); + struct file_kattr *fa); static inline void ovl_copyflags(struct inode *from, struct inode *to) { @@ -828,7 +849,7 @@ static inline void ovl_copyflags(struct inode *from, struct inode *to) /* dir.c */ extern const struct inode_operations ovl_dir_inode_operations; -int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, +int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct dentry *dir, struct dentry *dentry); struct ovl_cattr { dev_t rdev; @@ -840,20 +861,20 @@ struct ovl_cattr { #define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) }) struct dentry *ovl_create_real(struct ovl_fs *ofs, - struct inode *dir, struct dentry *newdentry, + struct dentry *parent, struct dentry *newdentry, struct ovl_cattr *attr); -int ovl_cleanup(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry); +int ovl_cleanup(struct ovl_fs *ofs, struct dentry *workdir, struct dentry *dentry); struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir); struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir, struct ovl_cattr *attr); /* file.c */ extern const struct file_operations ovl_file_operations; -int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa); -int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa); -int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int ovl_real_fileattr_get(const struct path *realpath, struct file_kattr *fa); +int ovl_real_fileattr_set(const struct path *realpath, struct file_kattr *fa); +int ovl_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int ovl_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); struct ovl_file; struct ovl_file *ovl_file_alloc(struct file *realfile); void ovl_file_free(struct ovl_file *of); diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index afb7762f873f..4c1bae935ced 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -88,6 +88,7 @@ struct ovl_fs { /* Shared whiteout cache */ struct dentry *whiteout; bool no_shared_whiteout; + struct mutex whiteout_lock; /* r/o snapshot of upperdir sb's only taken on volatile mounts */ errseq_t errseq; }; diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c index f42488c01957..f4e7fff909ac 100644 --- a/fs/overlayfs/params.c +++ b/fs/overlayfs/params.c @@ -282,13 +282,11 @@ static int ovl_mount_dir_check(struct fs_context *fc, const struct path *path, return invalfc(fc, "%s is not a directory", name); /* - * Root dentries of case-insensitive capable filesystems might - * not have the dentry operations set, but still be incompatible - * with overlayfs. Check explicitly to prevent post-mount - * failures. + * Allow filesystems that are case-folding capable but deny composing + * ovl stack from case-folded directories. */ - if (sb_has_encoding(path->mnt->mnt_sb)) - return invalfc(fc, "case-insensitive capable filesystem on %s not supported", name); + if (ovl_dentry_casefolded(path->dentry)) + return invalfc(fc, "case-insensitive directory on %s not supported", name); if (ovl_dentry_weird(path->dentry)) return invalfc(fc, "filesystem on %s not supported", name); @@ -797,6 +795,8 @@ int ovl_init_fs_context(struct fs_context *fc) fc->s_fs_info = ofs; fc->fs_private = ctx; fc->ops = &ovl_context_ops; + + mutex_init(&ofs->whiteout_lock); return 0; out_err: diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 474c80d210d1..b65cdfce31ce 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -1034,14 +1034,13 @@ void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, { struct ovl_cache_entry *p; - inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); list_for_each_entry(p, list, l_node) { struct dentry *dentry; if (WARN_ON(!p->is_whiteout || !p->is_upper)) continue; - dentry = ovl_lookup_upper(ofs, p->name, upper, p->len); + dentry = ovl_lookup_upper_unlocked(ofs, p->name, upper, p->len); if (IS_ERR(dentry)) { pr_err("lookup '%s/%.*s' failed (%i)\n", upper->d_name.name, p->len, p->name, @@ -1049,10 +1048,9 @@ void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, continue; } if (dentry->d_inode) - ovl_cleanup(ofs, upper->d_inode, dentry); + ovl_cleanup(ofs, upper, dentry); dput(dentry); } - inode_unlock(upper->d_inode); } static bool ovl_check_d_type(struct dir_context *ctx, const char *name, @@ -1098,7 +1096,6 @@ static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *pa int level) { int err; - struct inode *dir = path->dentry->d_inode; LIST_HEAD(list); struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { @@ -1124,7 +1121,6 @@ static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *pa if (err) goto out; - inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { struct dentry *dentry; @@ -1139,39 +1135,40 @@ static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *pa err = -EINVAL; break; } - dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len); + dentry = ovl_lookup_upper_unlocked(ofs, p->name, path->dentry, p->len); if (IS_ERR(dentry)) continue; if (dentry->d_inode) - err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level); + err = ovl_workdir_cleanup(ofs, path->dentry, path->mnt, + dentry, level); dput(dentry); if (err) break; } - inode_unlock(dir); out: ovl_cache_free(&list); return err; } -int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir, +int ovl_workdir_cleanup(struct ovl_fs *ofs, struct dentry *parent, struct vfsmount *mnt, struct dentry *dentry, int level) { int err; - if (!d_is_dir(dentry) || level > 1) { - return ovl_cleanup(ofs, dir, dentry); - } + if (!d_is_dir(dentry) || level > 1) + return ovl_cleanup(ofs, parent, dentry); - err = ovl_do_rmdir(ofs, dir, dentry); + err = ovl_parent_lock(parent, dentry); + if (err) + return err; + err = ovl_do_rmdir(ofs, parent->d_inode, dentry); + ovl_parent_unlock(parent); if (err) { struct path path = { .mnt = mnt, .dentry = dentry }; - inode_unlock(dir); err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1); - inode_lock_nested(dir, I_MUTEX_PARENT); if (!err) - err = ovl_cleanup(ofs, dir, dentry); + err = ovl_cleanup(ofs, parent, dentry); } return err; @@ -1182,7 +1179,6 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) int err; struct dentry *indexdir = ofs->workdir; struct dentry *index = NULL; - struct inode *dir = indexdir->d_inode; struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir }; LIST_HEAD(list); struct ovl_cache_entry *p; @@ -1196,7 +1192,6 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) if (err) goto out; - inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { if (p->name[0] == '.') { if (p->len == 1) @@ -1204,7 +1199,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) if (p->len == 2 && p->name[1] == '.') continue; } - index = ovl_lookup_upper(ofs, p->name, indexdir, p->len); + index = ovl_lookup_upper_unlocked(ofs, p->name, indexdir, p->len); if (IS_ERR(index)) { err = PTR_ERR(index); index = NULL; @@ -1212,7 +1207,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) } /* Cleanup leftover from index create/cleanup attempt */ if (index->d_name.name[0] == '#') { - err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1); + err = ovl_workdir_cleanup(ofs, indexdir, path.mnt, index, 1); if (err) break; goto next; @@ -1222,7 +1217,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) goto next; } else if (err == -ESTALE) { /* Cleanup stale index entries */ - err = ovl_cleanup(ofs, dir, index); + err = ovl_cleanup(ofs, indexdir, index); } else if (err != -ENOENT) { /* * Abort mount to avoid corrupting the index if @@ -1235,10 +1230,10 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) * Whiteout orphan index to block future open by * handle after overlay nlink dropped to zero. */ - err = ovl_cleanup_and_whiteout(ofs, dir, index); + err = ovl_cleanup_and_whiteout(ofs, indexdir, index); } else { /* Cleanup orphan index entries */ - err = ovl_cleanup(ofs, dir, index); + err = ovl_cleanup(ofs, indexdir, index); } if (err) @@ -1249,7 +1244,6 @@ next: index = NULL; } dput(index); - inode_unlock(dir); out: ovl_cache_free(&list); if (err) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index e19940d649ca..df85a76597e9 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -299,8 +299,8 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, int err; bool retried = false; - inode_lock_nested(dir, I_MUTEX_PARENT); retry: + inode_lock_nested(dir, I_MUTEX_PARENT); work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name)); if (!IS_ERR(work)) { @@ -311,23 +311,24 @@ retry: if (work->d_inode) { err = -EEXIST; + inode_unlock(dir); if (retried) goto out_dput; if (persist) - goto out_unlock; + return work; retried = true; - err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0); + err = ovl_workdir_cleanup(ofs, ofs->workbasedir, mnt, work, 0); dput(work); - if (err == -EINVAL) { - work = ERR_PTR(err); - goto out_unlock; - } + if (err == -EINVAL) + return ERR_PTR(err); + goto retry; } work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode); + inode_unlock(dir); err = PTR_ERR(work); if (IS_ERR(work)) goto out_err; @@ -365,11 +366,10 @@ retry: if (err) goto out_dput; } else { + inode_unlock(dir); err = PTR_ERR(work); goto out_err; } -out_unlock: - inode_unlock(dir); return work; out_dput: @@ -377,8 +377,7 @@ out_dput: out_err: pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n", ofs->config.workdir, name, -err); - work = NULL; - goto out_unlock; + return NULL; } static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs, @@ -557,37 +556,42 @@ out: static int ovl_check_rename_whiteout(struct ovl_fs *ofs) { struct dentry *workdir = ofs->workdir; - struct inode *dir = d_inode(workdir); struct dentry *temp; struct dentry *dest; struct dentry *whiteout; struct name_snapshot name; int err; - inode_lock_nested(dir, I_MUTEX_PARENT); - temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) - goto out_unlock; + return err; + err = ovl_parent_lock(workdir, temp); + if (err) { + dput(temp); + return err; + } dest = ovl_lookup_temp(ofs, workdir); err = PTR_ERR(dest); if (IS_ERR(dest)) { dput(temp); - goto out_unlock; + ovl_parent_unlock(workdir); + return err; } /* Name is inline and stable - using snapshot as a copy helper */ take_dentry_name_snapshot(&name, temp); - err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT); + err = ovl_do_rename(ofs, workdir, temp, workdir, dest, RENAME_WHITEOUT); + ovl_parent_unlock(workdir); if (err) { if (err == -EINVAL) err = 0; goto cleanup_temp; } - whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len); + whiteout = ovl_lookup_upper_unlocked(ofs, name.name.name, + workdir, name.name.len); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) goto cleanup_temp; @@ -596,18 +600,15 @@ static int ovl_check_rename_whiteout(struct ovl_fs *ofs) /* Best effort cleanup of whiteout and temp file */ if (err) - ovl_cleanup(ofs, dir, whiteout); + ovl_cleanup(ofs, workdir, whiteout); dput(whiteout); cleanup_temp: - ovl_cleanup(ofs, dir, temp); + ovl_cleanup(ofs, workdir, temp); release_dentry_name_snapshot(&name); dput(temp); dput(dest); -out_unlock: - inode_unlock(dir); - return err; } @@ -621,8 +622,7 @@ static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs, inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); child = ovl_lookup_upper(ofs, name, parent, len); if (!IS_ERR(child) && !child->d_inode) - child = ovl_create_real(ofs, parent->d_inode, child, - OVL_CATTR(mode)); + child = ovl_create_real(ofs, parent, child, OVL_CATTR(mode)); inode_unlock(parent->d_inode); dput(parent); @@ -1322,7 +1322,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc) if (WARN_ON(fc->user_ns != current_user_ns())) goto out_err; - sb->s_d_op = &ovl_dentry_operations; + set_default_d_op(sb, &ovl_dentry_operations); err = -ENOMEM; if (!ofs->creator_cred) diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index dcccb4b4a66c..a33115e7384c 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -206,10 +206,17 @@ bool ovl_dentry_weird(struct dentry *dentry) if (!d_can_lookup(dentry) && !d_is_file(dentry) && !d_is_symlink(dentry)) return true; - return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT | - DCACHE_MANAGE_TRANSIT | - DCACHE_OP_HASH | - DCACHE_OP_COMPARE); + if (dentry->d_flags & (DCACHE_NEED_AUTOMOUNT | DCACHE_MANAGE_TRANSIT)) + return true; + + /* + * Allow filesystems that are case-folding capable but deny composing + * ovl stack from case-folded directories. + */ + if (sb_has_encoding(dentry->d_sb)) + return IS_CASEFOLDED(d_inode(dentry)); + + return dentry->d_flags & (DCACHE_OP_HASH | DCACHE_OP_COMPARE); } enum ovl_path_type ovl_path_type(struct dentry *dentry) @@ -959,7 +966,7 @@ void ovl_check_protattr(struct inode *inode, struct dentry *upper) } int ovl_set_protattr(struct inode *inode, struct dentry *upper, - struct fileattr *fa) + struct file_kattr *fa) { struct ovl_fs *ofs = OVL_FS(inode->i_sb); char buf[OVL_PROTATTR_MAX]; @@ -1071,7 +1078,6 @@ static void ovl_cleanup_index(struct dentry *dentry) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); - struct inode *dir = indexdir->d_inode; struct dentry *lowerdentry = ovl_dentry_lower(dentry); struct dentry *upperdentry = ovl_dentry_upper(dentry); struct dentry *index = NULL; @@ -1107,21 +1113,18 @@ static void ovl_cleanup_index(struct dentry *dentry) goto out; } - inode_lock_nested(dir, I_MUTEX_PARENT); - index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); + index = ovl_lookup_upper_unlocked(ofs, name.name, indexdir, name.len); err = PTR_ERR(index); if (IS_ERR(index)) { index = NULL; } else if (ovl_index_all(dentry->d_sb)) { /* Whiteout orphan index to block future open by handle */ err = ovl_cleanup_and_whiteout(OVL_FS(dentry->d_sb), - dir, index); + indexdir, index); } else { /* Cleanup orphan index entries */ - err = ovl_cleanup(ofs, dir, index); + err = ovl_cleanup(ofs, indexdir, index); } - - inode_unlock(dir); if (err) goto fail; @@ -1220,20 +1223,21 @@ void ovl_nlink_end(struct dentry *dentry) ovl_inode_unlock(inode); } -int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir) +int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *work, + struct dentry *upperdir, struct dentry *upper) { struct dentry *trap; - /* Workdir should not be the same as upperdir */ - if (workdir == upperdir) - goto err; - /* Workdir should not be subdir of upperdir and vice versa */ trap = lock_rename(workdir, upperdir); if (IS_ERR(trap)) goto err; if (trap) goto err_unlock; + if (work && work->d_parent != workdir) + goto err_unlock; + if (upper && upper->d_parent != upperdir) + goto err_unlock; return 0; @@ -1544,3 +1548,13 @@ void ovl_copyattr(struct inode *inode) i_size_write(inode, i_size_read(realinode)); spin_unlock(&inode->i_lock); } + +int ovl_parent_lock(struct dentry *parent, struct dentry *child) +{ + inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); + if (!child || child->d_parent == parent) + return 0; + + inode_unlock(parent->d_inode); + return -EINVAL; +} diff --git a/fs/pidfs.c b/fs/pidfs.c index 4625e097e3a0..edc35522d75c 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -21,11 +21,23 @@ #include <linux/utsname.h> #include <net/net_namespace.h> #include <linux/coredump.h> +#include <linux/xattr.h> #include "internal.h" #include "mount.h" -static struct kmem_cache *pidfs_cachep __ro_after_init; +#define PIDFS_PID_DEAD ERR_PTR(-ESRCH) + +static struct kmem_cache *pidfs_attr_cachep __ro_after_init; +static struct kmem_cache *pidfs_xattr_cachep __ro_after_init; + +static struct path pidfs_root_path = {}; + +void pidfs_get_root(struct path *path) +{ + *path = pidfs_root_path; + path_get(path); +} /* * Stashes information that userspace needs to access even after the @@ -37,17 +49,12 @@ struct pidfs_exit_info { __u32 coredump_mask; }; -struct pidfs_inode { +struct pidfs_attr { + struct simple_xattrs *xattrs; struct pidfs_exit_info __pei; struct pidfs_exit_info *exit_info; - struct inode vfs_inode; }; -static inline struct pidfs_inode *pidfs_i(struct inode *inode) -{ - return container_of(inode, struct pidfs_inode, vfs_inode); -} - static struct rb_root pidfs_ino_tree = RB_ROOT; #if BITS_PER_LONG == 32 @@ -125,6 +132,7 @@ void pidfs_add_pid(struct pid *pid) pid->ino = pidfs_ino_nr; pid->stashed = NULL; + pid->attr = NULL; pidfs_ino_nr++; write_seqcount_begin(&pidmap_lock_seq); @@ -139,6 +147,33 @@ void pidfs_remove_pid(struct pid *pid) write_seqcount_end(&pidmap_lock_seq); } +void pidfs_free_pid(struct pid *pid) +{ + struct pidfs_attr *attr __free(kfree) = no_free_ptr(pid->attr); + struct simple_xattrs *xattrs __free(kfree) = NULL; + + /* + * Any dentry must've been wiped from the pid by now. + * Otherwise there's a reference count bug. + */ + VFS_WARN_ON_ONCE(pid->stashed); + + /* + * This if an error occurred during e.g., task creation that + * causes us to never go through the exit path. + */ + if (unlikely(!attr)) + return; + + /* This never had a pidfd created. */ + if (IS_ERR(attr)) + return; + + xattrs = no_free_ptr(attr->xattrs); + if (xattrs) + simple_xattrs_free(xattrs, NULL); +} + #ifdef CONFIG_PROC_FS /** * pidfd_show_fdinfo - print information about a pidfd @@ -261,13 +296,13 @@ static __u32 pidfs_coredump_mask(unsigned long mm_flags) static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) { struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg; - struct inode *inode = file_inode(file); struct pid *pid = pidfd_pid(file); size_t usize = _IOC_SIZE(cmd); struct pidfd_info kinfo = {}; struct pidfs_exit_info *exit_info; struct user_namespace *user_ns; struct task_struct *task; + struct pidfs_attr *attr; const struct cred *c; __u64 mask; @@ -286,8 +321,9 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) if (!pid_in_current_pidns(pid)) return -ESRCH; + attr = READ_ONCE(pid->attr); if (mask & PIDFD_INFO_EXIT) { - exit_info = READ_ONCE(pidfs_i(inode)->exit_info); + exit_info = READ_ONCE(attr->exit_info); if (exit_info) { kinfo.mask |= PIDFD_INFO_EXIT; #ifdef CONFIG_CGROUPS @@ -300,7 +336,7 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) if (mask & PIDFD_INFO_COREDUMP) { kinfo.mask |= PIDFD_INFO_COREDUMP; - kinfo.coredump_mask = READ_ONCE(pidfs_i(inode)->__pei.coredump_mask); + kinfo.coredump_mask = READ_ONCE(attr->__pei.coredump_mask); } task = get_pid_task(pid, PIDTYPE_PID); @@ -552,41 +588,61 @@ struct pid *pidfd_pid(const struct file *file) * task has been reaped which cannot happen until we're out of * release_task(). * - * If this struct pid is referred to by a pidfd then - * stashed_dentry_get() will return the dentry and inode for that struct - * pid. Since we've taken a reference on it there's now an additional - * reference from the exit path on it. Which is fine. We're going to put - * it again in a second and we know that the pid is kept alive anyway. + * If this struct pid has at least once been referred to by a pidfd then + * pid->attr will be allocated. If not we mark the struct pid as dead so + * anyone who is trying to register it with pidfs will fail to do so. + * Otherwise we would hand out pidfs for reaped tasks without having + * exit information available. * - * Worst case is that we've filled in the info and immediately free the - * dentry and inode afterwards since the pidfd has been closed. Since + * Worst case is that we've filled in the info and the pid gets freed + * right away in free_pid() when no one holds a pidfd anymore. Since * pidfs_exit() currently is placed after exit_task_work() we know that - * it cannot be us aka the exiting task holding a pidfd to ourselves. + * it cannot be us aka the exiting task holding a pidfd to itself. */ void pidfs_exit(struct task_struct *tsk) { - struct dentry *dentry; + struct pid *pid = task_pid(tsk); + struct pidfs_attr *attr; + struct pidfs_exit_info *exit_info; +#ifdef CONFIG_CGROUPS + struct cgroup *cgrp; +#endif might_sleep(); - dentry = stashed_dentry_get(&task_pid(tsk)->stashed); - if (dentry) { - struct inode *inode = d_inode(dentry); - struct pidfs_exit_info *exit_info = &pidfs_i(inode)->__pei; -#ifdef CONFIG_CGROUPS - struct cgroup *cgrp; + guard(spinlock_irq)(&pid->wait_pidfd.lock); + attr = pid->attr; + if (!attr) { + /* + * No one ever held a pidfd for this struct pid. + * Mark it as dead so no one can add a pidfs + * entry anymore. We're about to be reaped and + * so no exit information would be available. + */ + pid->attr = PIDFS_PID_DEAD; + return; + } - rcu_read_lock(); - cgrp = task_dfl_cgroup(tsk); - exit_info->cgroupid = cgroup_id(cgrp); - rcu_read_unlock(); + /* + * If @pid->attr is set someone might still legitimately hold a + * pidfd to @pid or someone might concurrently still be getting + * a reference to an already stashed dentry from @pid->stashed. + * So defer cleaning @pid->attr until the last reference to @pid + * is put + */ + + exit_info = &attr->__pei; + +#ifdef CONFIG_CGROUPS + rcu_read_lock(); + cgrp = task_dfl_cgroup(tsk); + exit_info->cgroupid = cgroup_id(cgrp); + rcu_read_unlock(); #endif - exit_info->exit_code = tsk->exit_code; + exit_info->exit_code = tsk->exit_code; - /* Ensure that PIDFD_GET_INFO sees either all or nothing. */ - smp_store_release(&pidfs_i(inode)->exit_info, &pidfs_i(inode)->__pei); - dput(dentry); - } + /* Ensure that PIDFD_GET_INFO sees either all or nothing. */ + smp_store_release(&attr->exit_info, &attr->__pei); } #ifdef CONFIG_COREDUMP @@ -594,16 +650,15 @@ void pidfs_coredump(const struct coredump_params *cprm) { struct pid *pid = cprm->pid; struct pidfs_exit_info *exit_info; - struct dentry *dentry; - struct inode *inode; + struct pidfs_attr *attr; __u32 coredump_mask = 0; - dentry = pid->stashed; - if (WARN_ON_ONCE(!dentry)) - return; + attr = READ_ONCE(pid->attr); - inode = d_inode(dentry); - exit_info = &pidfs_i(inode)->__pei; + VFS_WARN_ON_ONCE(!attr); + VFS_WARN_ON_ONCE(attr == PIDFS_PID_DEAD); + + exit_info = &attr->__pei; /* Note how we were coredumped. */ coredump_mask = pidfs_coredump_mask(cprm->mm_flags); /* Note that we actually did coredump. */ @@ -634,9 +689,24 @@ static int pidfs_getattr(struct mnt_idmap *idmap, const struct path *path, return anon_inode_getattr(idmap, path, stat, request_mask, query_flags); } +static ssize_t pidfs_listxattr(struct dentry *dentry, char *buf, size_t size) +{ + struct inode *inode = d_inode(dentry); + struct pid *pid = inode->i_private; + struct pidfs_attr *attr = pid->attr; + struct simple_xattrs *xattrs; + + xattrs = READ_ONCE(attr->xattrs); + if (!xattrs) + return 0; + + return simple_xattr_list(inode, xattrs, buf, size); +} + static const struct inode_operations pidfs_inode_operations = { - .getattr = pidfs_getattr, - .setattr = pidfs_setattr, + .getattr = pidfs_getattr, + .setattr = pidfs_setattr, + .listxattr = pidfs_listxattr, }; static void pidfs_evict_inode(struct inode *inode) @@ -647,30 +717,9 @@ static void pidfs_evict_inode(struct inode *inode) put_pid(pid); } -static struct inode *pidfs_alloc_inode(struct super_block *sb) -{ - struct pidfs_inode *pi; - - pi = alloc_inode_sb(sb, pidfs_cachep, GFP_KERNEL); - if (!pi) - return NULL; - - memset(&pi->__pei, 0, sizeof(pi->__pei)); - pi->exit_info = NULL; - - return &pi->vfs_inode; -} - -static void pidfs_free_inode(struct inode *inode) -{ - kmem_cache_free(pidfs_cachep, pidfs_i(inode)); -} - static const struct super_operations pidfs_sops = { - .alloc_inode = pidfs_alloc_inode, .drop_inode = generic_delete_inode, .evict_inode = pidfs_evict_inode, - .free_inode = pidfs_free_inode, .statfs = simple_statfs, }; @@ -770,6 +819,8 @@ static struct dentry *pidfs_fh_to_dentry(struct super_block *sb, if (ret < 0) return ERR_PTR(ret); + VFS_WARN_ON_ONCE(!pid->attr); + mntput(path.mnt); return path.dentry; } @@ -796,53 +847,8 @@ static int pidfs_export_permission(struct handle_to_path_ctx *ctx, return 0; } -static inline bool pidfs_pid_valid(struct pid *pid, const struct path *path, - unsigned int flags) -{ - enum pid_type type; - - if (flags & PIDFD_STALE) - return true; - - /* - * Make sure that if a pidfd is created PIDFD_INFO_EXIT - * information will be available. So after an inode for the - * pidfd has been allocated perform another check that the pid - * is still alive. If it is exit information is available even - * if the task gets reaped before the pidfd is returned to - * userspace. The only exception are indicated by PIDFD_STALE: - * - * (1) The kernel is in the middle of task creation and thus no - * task linkage has been established yet. - * (2) The caller knows @pid has been registered in pidfs at a - * time when the task was still alive. - * - * In both cases exit information will have been reported. - */ - if (flags & PIDFD_THREAD) - type = PIDTYPE_PID; - else - type = PIDTYPE_TGID; - - /* - * Since pidfs_exit() is called before struct pid's task linkage - * is removed the case where the task got reaped but a dentry - * was already attached to struct pid and exit information was - * recorded and published can be handled correctly. - */ - if (unlikely(!pid_has_task(pid, type))) { - struct inode *inode = d_inode(path->dentry); - return !!READ_ONCE(pidfs_i(inode)->exit_info); - } - - return true; -} - static struct file *pidfs_export_open(struct path *path, unsigned int oflags) { - if (!pidfs_pid_valid(d_inode(path->dentry)->i_private, path, oflags)) - return ERR_PTR(-ESRCH); - /* * Clear O_LARGEFILE as open_by_handle_at() forces it and raise * O_RDWR as pidfds always are. @@ -864,6 +870,8 @@ static int pidfs_init_inode(struct inode *inode, void *data) inode->i_private = data; inode->i_flags |= S_PRIVATE | S_ANON_INODE; + /* We allow to set xattrs. */ + inode->i_flags &= ~S_IMMUTABLE; inode->i_mode |= S_IRWXU; inode->i_op = &pidfs_inode_operations; inode->i_fop = &pidfs_file_operations; @@ -878,9 +886,127 @@ static void pidfs_put_data(void *data) put_pid(pid); } +/** + * pidfs_register_pid - register a struct pid in pidfs + * @pid: pid to pin + * + * Register a struct pid in pidfs. + * + * Return: On success zero, on error a negative error code is returned. + */ +int pidfs_register_pid(struct pid *pid) +{ + struct pidfs_attr *new_attr __free(kfree) = NULL; + struct pidfs_attr *attr; + + might_sleep(); + + if (!pid) + return 0; + + attr = READ_ONCE(pid->attr); + if (unlikely(attr == PIDFS_PID_DEAD)) + return PTR_ERR(PIDFS_PID_DEAD); + if (attr) + return 0; + + new_attr = kmem_cache_zalloc(pidfs_attr_cachep, GFP_KERNEL); + if (!new_attr) + return -ENOMEM; + + /* Synchronize with pidfs_exit(). */ + guard(spinlock_irq)(&pid->wait_pidfd.lock); + + attr = pid->attr; + if (unlikely(attr == PIDFS_PID_DEAD)) + return PTR_ERR(PIDFS_PID_DEAD); + if (unlikely(attr)) + return 0; + + pid->attr = no_free_ptr(new_attr); + return 0; +} + +static struct dentry *pidfs_stash_dentry(struct dentry **stashed, + struct dentry *dentry) +{ + int ret; + struct pid *pid = d_inode(dentry)->i_private; + + VFS_WARN_ON_ONCE(stashed != &pid->stashed); + + ret = pidfs_register_pid(pid); + if (ret) + return ERR_PTR(ret); + + return stash_dentry(stashed, dentry); +} + static const struct stashed_operations pidfs_stashed_ops = { - .init_inode = pidfs_init_inode, - .put_data = pidfs_put_data, + .stash_dentry = pidfs_stash_dentry, + .init_inode = pidfs_init_inode, + .put_data = pidfs_put_data, +}; + +static int pidfs_xattr_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *suffix, void *value, size_t size) +{ + struct pid *pid = inode->i_private; + struct pidfs_attr *attr = pid->attr; + const char *name; + struct simple_xattrs *xattrs; + + xattrs = READ_ONCE(attr->xattrs); + if (!xattrs) + return 0; + + name = xattr_full_name(handler, suffix); + return simple_xattr_get(xattrs, name, value, size); +} + +static int pidfs_xattr_set(const struct xattr_handler *handler, + struct mnt_idmap *idmap, struct dentry *unused, + struct inode *inode, const char *suffix, + const void *value, size_t size, int flags) +{ + struct pid *pid = inode->i_private; + struct pidfs_attr *attr = pid->attr; + const char *name; + struct simple_xattrs *xattrs; + struct simple_xattr *old_xattr; + + /* Ensure we're the only one to set @attr->xattrs. */ + WARN_ON_ONCE(!inode_is_locked(inode)); + + xattrs = READ_ONCE(attr->xattrs); + if (!xattrs) { + xattrs = kmem_cache_zalloc(pidfs_xattr_cachep, GFP_KERNEL); + if (!xattrs) + return -ENOMEM; + + simple_xattrs_init(xattrs); + smp_store_release(&pid->attr->xattrs, xattrs); + } + + name = xattr_full_name(handler, suffix); + old_xattr = simple_xattr_set(xattrs, name, value, size, flags); + if (IS_ERR(old_xattr)) + return PTR_ERR(old_xattr); + + simple_xattr_free(old_xattr); + return 0; +} + +static const struct xattr_handler pidfs_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = pidfs_xattr_get, + .set = pidfs_xattr_set, +}; + +static const struct xattr_handler *const pidfs_xattr_handlers[] = { + &pidfs_trusted_xattr_handler, + NULL }; static int pidfs_init_fs_context(struct fs_context *fc) @@ -891,9 +1017,12 @@ static int pidfs_init_fs_context(struct fs_context *fc) if (!ctx) return -ENOMEM; + fc->s_iflags |= SB_I_NOEXEC; + fc->s_iflags |= SB_I_NODEV; ctx->ops = &pidfs_sops; ctx->eops = &pidfs_export_operations; ctx->dops = &pidfs_dentry_operations; + ctx->xattr = pidfs_xattr_handlers; fc->s_fs_info = (void *)&pidfs_stashed_ops; return 0; } @@ -921,8 +1050,7 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags) if (ret < 0) return ERR_PTR(ret); - if (!pidfs_pid_valid(pid, &path, flags)) - return ERR_PTR(-ESRCH); + VFS_WARN_ON_ONCE(!pid->attr); flags &= ~PIDFD_STALE; flags |= O_RDWR; @@ -934,79 +1062,21 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags) return pidfd_file; } -/** - * pidfs_register_pid - register a struct pid in pidfs - * @pid: pid to pin - * - * Register a struct pid in pidfs. Needs to be paired with - * pidfs_put_pid() to not risk leaking the pidfs dentry and inode. - * - * Return: On success zero, on error a negative error code is returned. - */ -int pidfs_register_pid(struct pid *pid) -{ - struct path path __free(path_put) = {}; - int ret; - - might_sleep(); - - if (!pid) - return 0; - - ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path); - if (unlikely(ret)) - return ret; - /* Keep the dentry and only put the reference to the mount. */ - path.dentry = NULL; - return 0; -} - -/** - * pidfs_get_pid - pin a struct pid through pidfs - * @pid: pid to pin - * - * Similar to pidfs_register_pid() but only valid if the caller knows - * there's a reference to the @pid through a dentry already that can't - * go away. - */ -void pidfs_get_pid(struct pid *pid) -{ - if (!pid) - return; - WARN_ON_ONCE(!stashed_dentry_get(&pid->stashed)); -} - -/** - * pidfs_put_pid - drop a pidfs reference - * @pid: pid to drop - * - * Drop a reference to @pid via pidfs. This is only safe if the - * reference has been taken via pidfs_get_pid(). - */ -void pidfs_put_pid(struct pid *pid) -{ - might_sleep(); - - if (!pid) - return; - VFS_WARN_ON_ONCE(!pid->stashed); - dput(pid->stashed); -} - -static void pidfs_inode_init_once(void *data) -{ - struct pidfs_inode *pi = data; - - inode_init_once(&pi->vfs_inode); -} - void __init pidfs_init(void) { - pidfs_cachep = kmem_cache_create("pidfs_cache", sizeof(struct pidfs_inode), 0, + pidfs_attr_cachep = kmem_cache_create("pidfs_attr_cache", sizeof(struct pidfs_attr), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_ACCOUNT | SLAB_PANIC), - pidfs_inode_init_once); + SLAB_ACCOUNT | SLAB_PANIC), NULL); + + pidfs_xattr_cachep = kmem_cache_create("pidfs_xattr_cache", + sizeof(struct simple_xattrs), 0, + (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | + SLAB_ACCOUNT | SLAB_PANIC), NULL); + pidfs_mnt = kern_mount(&pidfs_type); if (IS_ERR(pidfs_mnt)) panic("Failed to mount pidfs pseudo filesystem"); + + pidfs_root_path.mnt = pidfs_mnt; + pidfs_root_path.dentry = pidfs_mnt->mnt_root; } diff --git a/fs/pipe.c b/fs/pipe.c index 45077c37bad1..731622d0738d 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -963,6 +963,11 @@ int create_pipe_files(struct file **res, int flags) res[1] = f; stream_open(inode, res[0]); stream_open(inode, res[1]); + + /* pipe groks IOCB_NOWAIT */ + res[0]->f_mode |= FMODE_NOWAIT; + res[1]->f_mode |= FMODE_NOWAIT; + /* * Disable permission and pre-content events, but enable legacy * inotify events for legacy users. @@ -997,9 +1002,6 @@ static int __do_pipe_flags(int *fd, struct file **files, int flags) audit_fd_pair(fdr, fdw); fd[0] = fdr; fd[1] = fdw; - /* pipe groks IOCB_NOWAIT */ - files[0]->f_mode |= FMODE_NOWAIT; - files[1]->f_mode |= FMODE_NOWAIT; return 0; err_fdr: diff --git a/fs/pnode.c b/fs/pnode.c index ffd429b760d5..81f7599bdac4 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -21,17 +21,12 @@ static inline struct mount *next_peer(struct mount *p) static inline struct mount *first_slave(struct mount *p) { - return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); -} - -static inline struct mount *last_slave(struct mount *p) -{ - return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); + return hlist_entry(p->mnt_slave_list.first, struct mount, mnt_slave); } static inline struct mount *next_slave(struct mount *p) { - return list_entry(p->mnt_slave.next, struct mount, mnt_slave); + return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave); } static struct mount *get_peer_under_root(struct mount *mnt, @@ -70,69 +65,90 @@ int get_dominating_id(struct mount *mnt, const struct path *root) return 0; } -static int do_make_slave(struct mount *mnt) +static inline bool will_be_unmounted(struct mount *m) { - struct mount *master, *slave_mnt; + return m->mnt.mnt_flags & MNT_UMOUNT; +} - if (list_empty(&mnt->mnt_share)) { - if (IS_MNT_SHARED(mnt)) { - mnt_release_group_id(mnt); - CLEAR_MNT_SHARED(mnt); - } - master = mnt->mnt_master; - if (!master) { - struct list_head *p = &mnt->mnt_slave_list; - while (!list_empty(p)) { - slave_mnt = list_first_entry(p, - struct mount, mnt_slave); - list_del_init(&slave_mnt->mnt_slave); - slave_mnt->mnt_master = NULL; - } - return 0; - } - } else { +static struct mount *propagation_source(struct mount *mnt) +{ + do { struct mount *m; - /* - * slave 'mnt' to a peer mount that has the - * same root dentry. If none is available then - * slave it to anything that is available. - */ - for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) { - if (m->mnt.mnt_root == mnt->mnt.mnt_root) { - master = m; - break; - } + for (m = next_peer(mnt); m != mnt; m = next_peer(m)) { + if (!will_be_unmounted(m)) + return m; } - list_del_init(&mnt->mnt_share); - mnt->mnt_group_id = 0; - CLEAR_MNT_SHARED(mnt); + mnt = mnt->mnt_master; + } while (mnt && will_be_unmounted(mnt)); + return mnt; +} + +static void transfer_propagation(struct mount *mnt, struct mount *to) +{ + struct hlist_node *p = NULL, *n; + struct mount *m; + + hlist_for_each_entry_safe(m, n, &mnt->mnt_slave_list, mnt_slave) { + m->mnt_master = to; + if (!to) + hlist_del_init(&m->mnt_slave); + else + p = &m->mnt_slave; } - list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) - slave_mnt->mnt_master = master; - list_move(&mnt->mnt_slave, &master->mnt_slave_list); - list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); - INIT_LIST_HEAD(&mnt->mnt_slave_list); - mnt->mnt_master = master; - return 0; + if (p) + hlist_splice_init(&mnt->mnt_slave_list, p, &to->mnt_slave_list); } /* - * vfsmount lock must be held for write + * EXCL[namespace_sem] */ void change_mnt_propagation(struct mount *mnt, int type) { + struct mount *m = mnt->mnt_master; + if (type == MS_SHARED) { set_mnt_shared(mnt); return; } - do_make_slave(mnt); - if (type != MS_SLAVE) { - list_del_init(&mnt->mnt_slave); + if (IS_MNT_SHARED(mnt)) { + m = propagation_source(mnt); + if (list_empty(&mnt->mnt_share)) { + mnt_release_group_id(mnt); + } else { + list_del_init(&mnt->mnt_share); + mnt->mnt_group_id = 0; + } + CLEAR_MNT_SHARED(mnt); + transfer_propagation(mnt, m); + } + hlist_del_init(&mnt->mnt_slave); + if (type == MS_SLAVE) { + mnt->mnt_master = m; + if (m) + hlist_add_head(&mnt->mnt_slave, &m->mnt_slave_list); + } else { mnt->mnt_master = NULL; if (type == MS_UNBINDABLE) - mnt->mnt.mnt_flags |= MNT_UNBINDABLE; + mnt->mnt_t_flags |= T_UNBINDABLE; else - mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; + mnt->mnt_t_flags &= ~T_UNBINDABLE; + } +} + +static struct mount *__propagation_next(struct mount *m, + struct mount *origin) +{ + while (1) { + struct mount *master = m->mnt_master; + + if (master == origin->mnt_master) { + struct mount *next = next_peer(m); + return (next == origin) ? NULL : next; + } else if (m->mnt_slave.next) + return next_slave(m); + + /* back at master */ + m = master; } } @@ -150,34 +166,24 @@ static struct mount *propagation_next(struct mount *m, struct mount *origin) { /* are there any slaves of this mount? */ - if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) + if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list)) return first_slave(m); - while (1) { - struct mount *master = m->mnt_master; - - if (master == origin->mnt_master) { - struct mount *next = next_peer(m); - return (next == origin) ? NULL : next; - } else if (m->mnt_slave.next != &master->mnt_slave_list) - return next_slave(m); - - /* back at master */ - m = master; - } + return __propagation_next(m, origin); } static struct mount *skip_propagation_subtree(struct mount *m, struct mount *origin) { /* - * Advance m such that propagation_next will not return - * the slaves of m. + * Advance m past everything that gets propagation from it. */ - if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) - m = last_slave(m); + struct mount *p = __propagation_next(m, origin); + + while (p && peers(m, p)) + p = __propagation_next(p, origin); - return m; + return p; } static struct mount *next_group(struct mount *m, struct mount *origin) @@ -185,7 +191,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) while (1) { while (1) { struct mount *next; - if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) + if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list)) return first_slave(m); next = next_peer(m); if (m->mnt_group_id == origin->mnt_group_id) { @@ -198,7 +204,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) /* m is the last peer */ while (1) { struct mount *master = m->mnt_master; - if (m->mnt_slave.next != &master->mnt_slave_list) + if (m->mnt_slave.next) return next_slave(m); m = next_peer(master); if (master->mnt_group_id == origin->mnt_group_id) @@ -212,142 +218,113 @@ static struct mount *next_group(struct mount *m, struct mount *origin) } } -/* all accesses are serialized by namespace_sem */ -static struct mount *last_dest, *first_source, *last_source, *dest_master; -static struct hlist_head *list; - -static inline bool peers(const struct mount *m1, const struct mount *m2) -{ - return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; -} - -static int propagate_one(struct mount *m, struct mountpoint *dest_mp) +static bool need_secondary(struct mount *m, struct mountpoint *dest_mp) { - struct mount *child; - int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) - return 0; + return false; /* skip if mountpoint isn't visible in m */ if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) - return 0; + return false; /* skip if m is in the anon_ns */ if (is_anon_ns(m->mnt_ns)) - return 0; + return false; + return true; +} - if (peers(m, last_dest)) { - type = CL_MAKE_SHARED; - } else { - struct mount *n, *p; - bool done; - for (n = m; ; n = p) { - p = n->mnt_master; - if (p == dest_master || IS_MNT_MARKED(p)) - break; +static struct mount *find_master(struct mount *m, + struct mount *last_copy, + struct mount *original) +{ + struct mount *p; + + // ascend until there's a copy for something with the same master + for (;;) { + p = m->mnt_master; + if (!p || IS_MNT_MARKED(p)) + break; + m = p; + } + while (!peers(last_copy, original)) { + struct mount *parent = last_copy->mnt_parent; + if (parent->mnt_master == p) { + if (!peers(parent, m)) + last_copy = last_copy->mnt_master; + break; } - do { - struct mount *parent = last_source->mnt_parent; - if (peers(last_source, first_source)) - break; - done = parent->mnt_master == p; - if (done && peers(n, parent)) - break; - last_source = last_source->mnt_master; - } while (!done); - - type = CL_SLAVE; - /* beginning of peer group among the slaves? */ - if (IS_MNT_SHARED(m)) - type |= CL_MAKE_SHARED; + last_copy = last_copy->mnt_master; } - - child = copy_tree(last_source, last_source->mnt.mnt_root, type); - if (IS_ERR(child)) - return PTR_ERR(child); - read_seqlock_excl(&mount_lock); - mnt_set_mountpoint(m, dest_mp, child); - if (m->mnt_master != dest_master) - SET_MNT_MARK(m->mnt_master); - read_sequnlock_excl(&mount_lock); - last_dest = m; - last_source = child; - hlist_add_head(&child->mnt_hash, list); - return count_mounts(m->mnt_ns, child); + return last_copy; } -/* - * mount 'source_mnt' under the destination 'dest_mnt' at - * dentry 'dest_dentry'. And propagate that mount to - * all the peer and slave mounts of 'dest_mnt'. - * Link all the new mounts into a propagation tree headed at - * source_mnt. Also link all the new mounts using ->mnt_list - * headed at source_mnt's ->mnt_list +/** + * propagate_mnt() - create secondary copies for tree attachment + * @dest_mnt: destination mount. + * @dest_mp: destination mountpoint. + * @source_mnt: source mount. + * @tree_list: list of secondaries to be attached. * - * @dest_mnt: destination mount. - * @dest_dentry: destination dentry. - * @source_mnt: source mount. - * @tree_list : list of heads of trees to be attached. + * Create secondary copies for attaching a tree with root @source_mnt + * at mount @dest_mnt with mountpoint @dest_mp. Link all new mounts + * into a propagation graph. Set mountpoints for all secondaries, + * link their roots into @tree_list via ->mnt_hash. */ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, - struct mount *source_mnt, struct hlist_head *tree_list) + struct mount *source_mnt, struct hlist_head *tree_list) { - struct mount *m, *n; - int ret = 0; - - /* - * we don't want to bother passing tons of arguments to - * propagate_one(); everything is serialized by namespace_sem, - * so globals will do just fine. - */ - last_dest = dest_mnt; - first_source = source_mnt; - last_source = source_mnt; - list = tree_list; - dest_master = dest_mnt->mnt_master; - - /* all peers of dest_mnt, except dest_mnt itself */ - for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { - ret = propagate_one(n, dest_mp); - if (ret) - goto out; - } - - /* all slave groups */ - for (m = next_group(dest_mnt, dest_mnt); m; - m = next_group(m, dest_mnt)) { - /* everything in that slave group */ - n = m; + struct mount *m, *n, *copy, *this; + int err = 0, type; + + if (dest_mnt->mnt_master) + SET_MNT_MARK(dest_mnt->mnt_master); + + /* iterate over peer groups, depth first */ + for (m = dest_mnt; m && !err; m = next_group(m, dest_mnt)) { + if (m == dest_mnt) { // have one for dest_mnt itself + copy = source_mnt; + type = CL_MAKE_SHARED; + n = next_peer(m); + if (n == m) + continue; + } else { + type = CL_SLAVE; + /* beginning of peer group among the slaves? */ + if (IS_MNT_SHARED(m)) + type |= CL_MAKE_SHARED; + n = m; + } do { - ret = propagate_one(n, dest_mp); - if (ret) - goto out; - n = next_peer(n); - } while (n != m); + if (!need_secondary(n, dest_mp)) + continue; + if (type & CL_SLAVE) // first in this peer group + copy = find_master(n, copy, source_mnt); + this = copy_tree(copy, copy->mnt.mnt_root, type); + if (IS_ERR(this)) { + err = PTR_ERR(this); + break; + } + read_seqlock_excl(&mount_lock); + mnt_set_mountpoint(n, dest_mp, this); + read_sequnlock_excl(&mount_lock); + if (n->mnt_master) + SET_MNT_MARK(n->mnt_master); + copy = this; + hlist_add_head(&this->mnt_hash, tree_list); + err = count_mounts(n->mnt_ns, this); + if (err) + break; + type = CL_MAKE_SHARED; + } while ((n = next_peer(n)) != m); } -out: - read_seqlock_excl(&mount_lock); + hlist_for_each_entry(n, tree_list, mnt_hash) { m = n->mnt_parent; - if (m->mnt_master != dest_mnt->mnt_master) + if (m->mnt_master) CLEAR_MNT_MARK(m->mnt_master); } - read_sequnlock_excl(&mount_lock); - return ret; -} - -static struct mount *find_topper(struct mount *mnt) -{ - /* If there is exactly one mount covering mnt completely return it. */ - struct mount *child; - - if (!list_is_singular(&mnt->mnt_mounts)) - return NULL; - - child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); - if (child->mnt_mountpoint != mnt->mnt.mnt_root) - return NULL; - - return child; + if (dest_mnt->mnt_master) + CLEAR_MNT_MARK(dest_mnt->mnt_master); + return err; } /* @@ -407,12 +384,8 @@ bool propagation_would_overmount(const struct mount *from, */ int propagate_mount_busy(struct mount *mnt, int refcnt) { - struct mount *m, *child, *topper; struct mount *parent = mnt->mnt_parent; - if (mnt == parent) - return do_refcount_check(mnt, refcnt); - /* * quickly check if the current mount can be unmounted. * If not, we don't have to go checking for all other @@ -421,23 +394,27 @@ int propagate_mount_busy(struct mount *mnt, int refcnt) if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) return 1; - for (m = propagation_next(parent, parent); m; + if (mnt == parent) + return 0; + + for (struct mount *m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { - int count = 1; - child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); - if (!child) - continue; + struct list_head *head; + struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); - /* Is there exactly one mount on the child that covers - * it completely whose reference should be ignored? - */ - topper = find_topper(child); - if (topper) - count += 1; - else if (!list_empty(&child->mnt_mounts)) + if (!child) continue; - if (do_refcount_check(child, count)) + head = &child->mnt_mounts; + if (!list_empty(head)) { + /* + * a mount that covers child completely wouldn't prevent + * it being pulled out; any other would. + */ + if (!list_is_singular(head) || !child->overmount) + continue; + } + if (do_refcount_check(child, 1)) return 1; } return 0; @@ -463,181 +440,209 @@ void propagate_mount_unlock(struct mount *mnt) } } -static void umount_one(struct mount *mnt, struct list_head *to_umount) +static inline bool is_candidate(struct mount *m) { - CLEAR_MNT_MARK(mnt); - mnt->mnt.mnt_flags |= MNT_UMOUNT; - list_del_init(&mnt->mnt_child); - list_del_init(&mnt->mnt_umounting); - move_from_ns(mnt, to_umount); + return m->mnt_t_flags & T_UMOUNT_CANDIDATE; } -/* - * NOTE: unmounting 'mnt' naturally propagates to all other mounts its - * parent propagates to. - */ -static bool __propagate_umount(struct mount *mnt, - struct list_head *to_umount, - struct list_head *to_restore) +static void umount_one(struct mount *m, struct list_head *to_umount) { - bool progress = false; - struct mount *child; - - /* - * The state of the parent won't change if this mount is - * already unmounted or marked as without children. - */ - if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) - goto out; + m->mnt.mnt_flags |= MNT_UMOUNT; + list_del_init(&m->mnt_child); + move_from_ns(m); + list_add_tail(&m->mnt_list, to_umount); +} - /* Verify topper is the only grandchild that has not been - * speculatively unmounted. - */ - list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { - if (child->mnt_mountpoint == mnt->mnt.mnt_root) - continue; - if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) - continue; - /* Found a mounted child */ - goto children; - } +static void remove_from_candidate_list(struct mount *m) +{ + m->mnt_t_flags &= ~(T_MARKED | T_UMOUNT_CANDIDATE); + list_del_init(&m->mnt_list); +} - /* Mark mounts that can be unmounted if not locked */ - SET_MNT_MARK(mnt); - progress = true; +static void gather_candidates(struct list_head *set, + struct list_head *candidates) +{ + struct mount *m, *p, *q; - /* If a mount is without children and not locked umount it. */ - if (!IS_MNT_LOCKED(mnt)) { - umount_one(mnt, to_umount); - } else { -children: - list_move_tail(&mnt->mnt_umounting, to_restore); + list_for_each_entry(m, set, mnt_list) { + if (is_candidate(m)) + continue; + m->mnt_t_flags |= T_UMOUNT_CANDIDATE; + p = m->mnt_parent; + q = propagation_next(p, p); + while (q) { + struct mount *child = __lookup_mnt(&q->mnt, + m->mnt_mountpoint); + if (child) { + /* + * We might've already run into this one. That + * must've happened on earlier iteration of the + * outer loop; in that case we can skip those + * parents that get propagation from q - there + * will be nothing new on those as well. + */ + if (is_candidate(child)) { + q = skip_propagation_subtree(q, p); + continue; + } + child->mnt_t_flags |= T_UMOUNT_CANDIDATE; + if (!will_be_unmounted(child)) + list_add(&child->mnt_list, candidates); + } + q = propagation_next(q, p); + } } -out: - return progress; + list_for_each_entry(m, set, mnt_list) + m->mnt_t_flags &= ~T_UMOUNT_CANDIDATE; } -static void umount_list(struct list_head *to_umount, - struct list_head *to_restore) +/* + * We know that some child of @m can't be unmounted. In all places where the + * chain of descent of @m has child not overmounting the root of parent, + * the parent can't be unmounted either. + */ +static void trim_ancestors(struct mount *m) { - struct mount *mnt, *child, *tmp; - list_for_each_entry(mnt, to_umount, mnt_list) { - list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { - /* topper? */ - if (child->mnt_mountpoint == mnt->mnt.mnt_root) - list_move_tail(&child->mnt_umounting, to_restore); - else - umount_one(child, to_umount); - } + struct mount *p; + + for (p = m->mnt_parent; is_candidate(p); m = p, p = p->mnt_parent) { + if (IS_MNT_MARKED(m)) // all candidates beneath are overmounts + return; + SET_MNT_MARK(m); + if (m != p->overmount) + p->mnt_t_flags &= ~T_UMOUNT_CANDIDATE; } } -static void restore_mounts(struct list_head *to_restore) +/* + * Find and exclude all umount candidates forbidden by @m + * (see Documentation/filesystems/propagate_umount.txt) + * If we can immediately tell that @m is OK to unmount (unlocked + * and all children are already committed to unmounting) commit + * to unmounting it. + * Only @m itself might be taken from the candidates list; + * anything found by trim_ancestors() is marked non-candidate + * and left on the list. + */ +static void trim_one(struct mount *m, struct list_head *to_umount) { - /* Restore mounts to a clean working state */ - while (!list_empty(to_restore)) { - struct mount *mnt, *parent; - struct mountpoint *mp; - - mnt = list_first_entry(to_restore, struct mount, mnt_umounting); - CLEAR_MNT_MARK(mnt); - list_del_init(&mnt->mnt_umounting); - - /* Should this mount be reparented? */ - mp = mnt->mnt_mp; - parent = mnt->mnt_parent; - while (parent->mnt.mnt_flags & MNT_UMOUNT) { - mp = parent->mnt_mp; - parent = parent->mnt_parent; - } - if (parent != mnt->mnt_parent) { - mnt_change_mountpoint(parent, mp, mnt); - mnt_notify_add(mnt); + bool remove_this = false, found = false, umount_this = false; + struct mount *n; + + if (!is_candidate(m)) { // trim_ancestors() left it on list + remove_from_candidate_list(m); + return; + } + + list_for_each_entry(n, &m->mnt_mounts, mnt_child) { + if (!is_candidate(n)) { + found = true; + if (n != m->overmount) { + remove_this = true; + break; + } } } + if (found) { + trim_ancestors(m); + } else if (!IS_MNT_LOCKED(m) && list_empty(&m->mnt_mounts)) { + remove_this = true; + umount_this = true; + } + if (remove_this) { + remove_from_candidate_list(m); + if (umount_this) + umount_one(m, to_umount); + } } -static void cleanup_umount_visitations(struct list_head *visited) +static void handle_locked(struct mount *m, struct list_head *to_umount) { - while (!list_empty(visited)) { - struct mount *mnt = - list_first_entry(visited, struct mount, mnt_umounting); - list_del_init(&mnt->mnt_umounting); + struct mount *cutoff = m, *p; + + if (!is_candidate(m)) { // trim_ancestors() left it on list + remove_from_candidate_list(m); + return; + } + for (p = m; is_candidate(p); p = p->mnt_parent) { + remove_from_candidate_list(p); + if (!IS_MNT_LOCKED(p)) + cutoff = p->mnt_parent; + } + if (will_be_unmounted(p)) + cutoff = p; + while (m != cutoff) { + umount_one(m, to_umount); + m = m->mnt_parent; } } /* - * collect all mounts that receive propagation from the mount in @list, - * and return these additional mounts in the same list. - * @list: the list of mounts to be unmounted. + * @m is not to going away, and it overmounts the top of a stack of mounts + * that are going away. We know that all of those are fully overmounted + * by the one above (@m being the topmost of the chain), so @m can be slid + * in place where the bottom of the stack is attached. * - * vfsmount lock must be held for write + * NOTE: here we temporarily violate a constraint - two mounts end up with + * the same parent and mountpoint; that will be remedied as soon as we + * return from propagate_umount() - its caller (umount_tree()) will detach + * the stack from the parent it (and now @m) is attached to. umount_tree() + * might choose to keep unmounted pieces stuck to each other, but it always + * detaches them from the mounts that remain in the tree. */ -int propagate_umount(struct list_head *list) +static void reparent(struct mount *m) { - struct mount *mnt; - LIST_HEAD(to_restore); - LIST_HEAD(to_umount); - LIST_HEAD(visited); - - /* Find candidates for unmounting */ - list_for_each_entry_reverse(mnt, list, mnt_list) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; + struct mount *p = m; + struct mountpoint *mp; - /* - * If this mount has already been visited it is known that it's - * entire peer group and all of their slaves in the propagation - * tree for the mountpoint has already been visited and there is - * no need to visit them again. - */ - if (!list_empty(&mnt->mnt_umounting)) - continue; + do { + mp = p->mnt_mp; + p = p->mnt_parent; + } while (will_be_unmounted(p)); - list_add_tail(&mnt->mnt_umounting, &visited); - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - if (!child) - continue; + mnt_change_mountpoint(p, mp, m); + mnt_notify_add(m); +} - if (!list_empty(&child->mnt_umounting)) { - /* - * If the child has already been visited it is - * know that it's entire peer group and all of - * their slaves in the propgation tree for the - * mountpoint has already been visited and there - * is no need to visit this subtree again. - */ - m = skip_propagation_subtree(m, parent); - continue; - } else if (child->mnt.mnt_flags & MNT_UMOUNT) { - /* - * We have come across a partially unmounted - * mount in a list that has not been visited - * yet. Remember it has been visited and - * continue about our merry way. - */ - list_add_tail(&child->mnt_umounting, &visited); - continue; - } +/** + * propagate_umount - apply propagation rules to the set of mounts for umount() + * @set: the list of mounts to be unmounted. + * + * Collect all mounts that receive propagation from the mount in @set and have + * no obstacles to being unmounted. Add these additional mounts to the set. + * + * See Documentation/filesystems/propagate_umount.txt if you do anything in + * this area. + * + * Locks held: + * mount_lock (write_seqlock), namespace_sem (exclusive). + */ +void propagate_umount(struct list_head *set) +{ + struct mount *m, *p; + LIST_HEAD(to_umount); // committed to unmounting + LIST_HEAD(candidates); // undecided umount candidates - /* Check the child and parents while progress is made */ - while (__propagate_umount(child, - &to_umount, &to_restore)) { - /* Is the parent a umount candidate? */ - child = child->mnt_parent; - if (list_empty(&child->mnt_umounting)) - break; - } - } + // collect all candidates + gather_candidates(set, &candidates); + + // reduce the set until it's non-shifting + list_for_each_entry_safe(m, p, &candidates, mnt_list) + trim_one(m, &to_umount); + + // ... and non-revealing + while (!list_empty(&candidates)) { + m = list_first_entry(&candidates,struct mount, mnt_list); + handle_locked(m, &to_umount); } - umount_list(&to_umount, &to_restore); - restore_mounts(&to_restore); - cleanup_umount_visitations(&visited); - list_splice_tail(&to_umount, list); + // now to_umount consists of all acceptable candidates + // deal with reparenting of remaining overmounts on those + list_for_each_entry(m, &to_umount, mnt_list) { + if (m->overmount) + reparent(m->overmount); + } - return 0; + // and fold them into the set + list_splice_tail_init(&to_umount, set); } diff --git a/fs/pnode.h b/fs/pnode.h index 2d026fb98b18..00ab153e3e9d 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -10,14 +10,14 @@ #include <linux/list.h> #include "mount.h" -#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED) +#define IS_MNT_SHARED(m) ((m)->mnt_t_flags & T_SHARED) #define IS_MNT_SLAVE(m) ((m)->mnt_master) #define IS_MNT_NEW(m) (!(m)->mnt_ns) -#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) -#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) -#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) -#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) -#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) +#define CLEAR_MNT_SHARED(m) ((m)->mnt_t_flags &= ~T_SHARED) +#define IS_MNT_UNBINDABLE(m) ((m)->mnt_t_flags & T_UNBINDABLE) +#define IS_MNT_MARKED(m) ((m)->mnt_t_flags & T_MARKED) +#define SET_MNT_MARK(m) ((m)->mnt_t_flags |= T_MARKED) +#define CLEAR_MNT_MARK(m) ((m)->mnt_t_flags &= ~T_MARKED) #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) #define CL_EXPIRE 0x01 @@ -25,19 +25,26 @@ #define CL_COPY_UNBINDABLE 0x04 #define CL_MAKE_SHARED 0x08 #define CL_PRIVATE 0x10 -#define CL_SHARED_TO_SLAVE 0x20 #define CL_COPY_MNT_NS_FILE 0x40 +/* + * EXCL[namespace_sem] + */ static inline void set_mnt_shared(struct mount *mnt) { - mnt->mnt.mnt_flags &= ~MNT_SHARED_MASK; - mnt->mnt.mnt_flags |= MNT_SHARED; + mnt->mnt_t_flags &= ~T_SHARED_MASK; + mnt->mnt_t_flags |= T_SHARED; +} + +static inline bool peers(const struct mount *m1, const struct mount *m2) +{ + return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; } void change_mnt_propagation(struct mount *, int); int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, struct hlist_head *); -int propagate_umount(struct list_head *); +void propagate_umount(struct list_head *); int propagate_mount_busy(struct mount *, int); void propagate_mount_unlock(struct mount *); void mnt_release_group_id(struct mount *); diff --git a/fs/proc/base.c b/fs/proc/base.c index c667702dc69b..e93149a01341 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2704,8 +2704,7 @@ static struct dentry *proc_pident_instantiate(struct dentry *dentry, inode->i_fop = p->fop; ei->op = p->op; pid_update_inode(task, inode); - d_set_d_op(dentry, &pid_dentry_operations); - return d_splice_alias(inode, dentry); + return d_splice_alias_ops(inode, dentry, &pid_dentry_operations); } static struct dentry *proc_pident_lookup(struct inode *dir, @@ -3501,8 +3500,7 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry, set_nlink(inode, nlink_tgid); pid_update_inode(task, inode); - d_set_d_op(dentry, &pid_dentry_operations); - return d_splice_alias(inode, dentry); + return d_splice_alias_ops(inode, dentry, &pid_dentry_operations); } struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags) @@ -3804,8 +3802,7 @@ static struct dentry *proc_task_instantiate(struct dentry *dentry, set_nlink(inode, nlink_tid); pid_update_inode(task, inode); - d_set_d_op(dentry, &pid_dentry_operations); - return d_splice_alias(inode, dentry); + return d_splice_alias_ops(inode, dentry, &pid_dentry_operations); } static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 37aa778d1af7..9eeccff49b2a 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -352,18 +352,9 @@ static int proc_fd_getattr(struct mnt_idmap *idmap, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); - int rv = 0; generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); - - /* If it's a directory, put the number of open fds there */ - if (S_ISDIR(inode->i_mode)) { - rv = proc_readfd_count(inode, &stat->size); - if (rv < 0) - return rv; - } - - return rv; + return proc_readfd_count(inode, &stat->size); } const struct inode_operations proc_fd_inode_operations = { diff --git a/fs/proc/generic.c b/fs/proc/generic.c index a3e22803cddf..5635453cd476 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -254,8 +254,11 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, inode = proc_get_inode(dir->i_sb, de); if (!inode) return ERR_PTR(-ENOMEM); - d_set_d_op(dentry, de->proc_dops); - return d_splice_alias(inode, dentry); + if (de->flags & PROC_ENTRY_FORCE_LOOKUP) + return d_splice_alias_ops(inode, dentry, + &proc_net_dentry_ops); + return d_splice_alias_ops(inode, dentry, + &proc_misc_dentry_ops); } read_unlock(&proc_subdir_lock); return ERR_PTR(-ENOENT); @@ -448,9 +451,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, INIT_LIST_HEAD(&ent->pde_openers); proc_set_user(ent, (*parent)->uid, (*parent)->gid); - ent->proc_dops = &proc_misc_dentry_ops; /* Revalidate everything under /proc/${pid}/net */ - if ((*parent)->proc_dops == &proc_net_dentry_ops) + if ((*parent)->flags & PROC_ENTRY_FORCE_LOOKUP) pde_force_lookup(ent); out: diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 96122e91c645..520c4742101d 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -44,7 +44,6 @@ struct proc_dir_entry { const struct proc_ops *proc_ops; const struct file_operations *proc_dir_ops; }; - const struct dentry_operations *proc_dops; union { const struct seq_operations *seq_ops; int (*single_show)(struct seq_file *, void *); @@ -403,7 +402,7 @@ extern const struct dentry_operations proc_net_dentry_ops; static inline void pde_force_lookup(struct proc_dir_entry *pde) { /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ - pde->proc_dops = &proc_net_dentry_ops; + pde->flags |= PROC_ENTRY_FORCE_LOOKUP; } /* @@ -414,7 +413,6 @@ static inline void pde_force_lookup(struct proc_dir_entry *pde) static inline struct dentry *proc_splice_unmountable(struct inode *inode, struct dentry *dentry, const struct dentry_operations *d_ops) { - d_set_d_op(dentry, d_ops); dont_mount(dentry); - return d_splice_alias(inode, dentry); + return d_splice_alias_ops(inode, dentry, d_ops); } diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index c610224faf10..4403a2e20c16 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -111,8 +111,7 @@ static struct dentry *proc_ns_instantiate(struct dentry *dentry, ei->ns_ops = ns_ops; pid_update_inode(task, inode); - d_set_d_op(dentry, &pid_dentry_operations); - return d_splice_alias(inode, dentry); + return d_splice_alias_ops(inode, dentry, &pid_dentry_operations); } static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 08b78150cdde..49ab74e0bfde 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -540,9 +540,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, goto out; } - d_set_d_op(dentry, &proc_sys_dentry_operations); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); - err = d_splice_alias(inode, dentry); + err = d_splice_alias_ops(inode, dentry, &proc_sys_dentry_operations); out: if (h) @@ -699,9 +698,9 @@ static bool proc_sys_fill_cache(struct file *file, return false; if (d_in_lookup(child)) { struct dentry *res; - d_set_d_op(child, &proc_sys_dentry_operations); inode = proc_sys_make_inode(dir->d_sb, head, table); - res = d_splice_alias(inode, child); + res = d_splice_alias_ops(inode, child, + &proc_sys_dentry_operations); d_lookup_done(child); if (unlikely(res)) { dput(child); diff --git a/fs/proc/root.c b/fs/proc/root.c index 06a297a27ba3..ed86ac710384 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -363,12 +363,12 @@ static const struct inode_operations proc_root_inode_operations = { * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { - .low_ino = PROC_ROOT_INO, - .namelen = 5, - .mode = S_IFDIR | S_IRUGO | S_IXUGO, - .nlink = 2, + .low_ino = PROCFS_ROOT_INO, + .namelen = 5, + .mode = S_IFDIR | S_IRUGO | S_IXUGO, + .nlink = 2, .refcnt = REFCOUNT_INIT(1), - .proc_iops = &proc_root_inode_operations, + .proc_iops = &proc_root_inode_operations, .proc_dir_ops = &proc_root_operations, .parent = &proc_root, .subdir = RB_ROOT, diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index bb3b769edc71..1a2e1185426c 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -300,7 +300,7 @@ static struct dentry *psinfo_lock_root(void) return NULL; root = pstore_sb->s_root; - inode_lock(d_inode(root)); + inode_lock_nested(d_inode(root), I_MUTEX_PARENT); return root; } @@ -318,8 +318,7 @@ int pstore_put_backend_records(struct pstore_info *psi) list_for_each_entry_safe(pos, tmp, &records_list, list) { if (pos->record->psi == psi) { list_del_init(&pos->list); - d_invalidate(pos->dentry); - simple_unlink(d_inode(root), pos->dentry); + locked_recursive_removal(pos->dentry, NULL); pos->dentry = NULL; } } diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index b45c7edc3225..b11f5b20b78b 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -41,7 +41,7 @@ static unsigned long ramfs_mmu_get_unmapped_area(struct file *file, const struct file_operations ramfs_file_operations = { .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .fsync = noop_fsync, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 7a6d980e614d..77b8ca2757e0 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -28,7 +28,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long len, unsigned long pgoff, unsigned long flags); -static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); +static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc); static unsigned ramfs_mmap_capabilities(struct file *file) { @@ -38,7 +38,7 @@ static unsigned ramfs_mmap_capabilities(struct file *file) const struct file_operations ramfs_file_operations = { .mmap_capabilities = ramfs_mmap_capabilities, - .mmap = ramfs_nommu_mmap, + .mmap_prepare = ramfs_nommu_mmap_prepare, .get_unmapped_area = ramfs_nommu_get_unmapped_area, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, @@ -262,12 +262,12 @@ out: /* * set up a mapping for shared memory segments */ -static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) +static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc) { - if (!is_nommu_shared_mapping(vma->vm_flags)) + if (!is_nommu_shared_mapping(desc->vm_flags)) return -ENOSYS; - file_accessed(file); - vma->vm_ops = &generic_file_vm_ops; + file_accessed(desc->file); + desc->vm_ops = &generic_file_vm_ops; return 0; } diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 775fa905fda0..f8874c3b8c1e 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -269,6 +269,7 @@ static int ramfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = RAMFS_MAGIC; sb->s_op = &ramfs_ops; + sb->s_d_flags = DCACHE_DONTCACHE; sb->s_time_gran = 1; inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0); diff --git a/fs/read_write.c b/fs/read_write.c index 0ef70e128c4a..c5b6265d984b 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -28,7 +28,7 @@ const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, - .mmap = generic_file_readonly_mmap, + .mmap_prepare = generic_file_readonly_mmap_prepare, .splice_read = filemap_splice_read, }; @@ -237,7 +237,7 @@ EXPORT_SYMBOL(generic_llseek_cookie); * @offset: file offset to seek to * @whence: type of seek * - * This is a generic implemenation of ->llseek useable for all normal local + * This is a generic implementation of ->llseek useable for all normal local * filesystems. It just updates the file offset to the value specified by * @offset and @whence. */ diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index 4520ca413867..4b77c6dc4418 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -61,9 +61,9 @@ static unsigned long romfs_get_unmapped_area(struct file *file, * permit a R/O mapping to be made directly through onto an MTD device if * possible */ -static int romfs_mmap(struct file *file, struct vm_area_struct *vma) +static int romfs_mmap_prepare(struct vm_area_desc *desc) { - return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS; + return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS; } static unsigned romfs_mmap_capabilities(struct file *file) @@ -79,7 +79,7 @@ const struct file_operations romfs_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .splice_read = filemap_splice_read, - .mmap = romfs_mmap, + .mmap_prepare = romfs_mmap_prepare, .get_unmapped_area = romfs_get_unmapped_area, .mmap_capabilities = romfs_mmap_capabilities, }; diff --git a/fs/select.c b/fs/select.c index 9fb650d03d52..082cf60c7e23 100644 --- a/fs/select.c +++ b/fs/select.c @@ -192,7 +192,7 @@ static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *k * and is paired with smp_store_mb() in poll_schedule_timeout. */ smp_wmb(); - pwq->triggered = 1; + WRITE_ONCE(pwq->triggered, 1); /* * Perform the default wake up operation using a dummy @@ -237,7 +237,7 @@ static int poll_schedule_timeout(struct poll_wqueues *pwq, int state, int rc = -EINTR; set_current_state(state); - if (!pwq->triggered) + if (!READ_ONCE(pwq->triggered)) rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); __set_current_state(TASK_RUNNING); diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 0a5266ecfd15..0fdadd668a81 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -260,9 +260,9 @@ cifs_read_super(struct super_block *sb) } if (tcon->nocase) - sb->s_d_op = &cifs_ci_dentry_ops; + set_default_d_op(sb, &cifs_ci_dentry_ops); else - sb->s_d_op = &cifs_dentry_ops; + set_default_d_op(sb, &cifs_dentry_ops); sb->s_root = d_make_root(inode); if (!sb->s_root) { @@ -1525,7 +1525,7 @@ const struct file_operations cifs_file_ops = { .flock = cifs_flock, .fsync = cifs_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap_prepare = cifs_file_mmap_prepare, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, @@ -1545,7 +1545,7 @@ const struct file_operations cifs_file_strict_ops = { .flock = cifs_flock, .fsync = cifs_strict_fsync, .flush = cifs_flush, - .mmap = cifs_file_strict_mmap, + .mmap_prepare = cifs_file_strict_mmap_prepare, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, @@ -1565,7 +1565,7 @@ const struct file_operations cifs_file_direct_ops = { .flock = cifs_flock, .fsync = cifs_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap_prepare = cifs_file_mmap_prepare, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, @@ -1583,7 +1583,7 @@ const struct file_operations cifs_file_nobrl_ops = { .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap_prepare = cifs_file_mmap_prepare, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, @@ -1601,7 +1601,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .release = cifs_close, .fsync = cifs_strict_fsync, .flush = cifs_flush, - .mmap = cifs_file_strict_mmap, + .mmap_prepare = cifs_file_strict_mmap_prepare, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = cifs_llseek, @@ -1619,7 +1619,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap_prepare = cifs_file_mmap_prepare, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h index b9ec9fe16a98..487f39cff77e 100644 --- a/fs/smb/client/cifsfs.h +++ b/fs/smb/client/cifsfs.h @@ -103,8 +103,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_fsync(struct file *, loff_t, loff_t, int); extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int); extern int cifs_flush(struct file *, fl_owner_t id); -extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma); -extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma); +int cifs_file_mmap_prepare(struct vm_area_desc *desc); +int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc); extern const struct file_operations cifs_dir_ops; extern int cifs_readdir(struct file *file, struct dir_context *ctx); diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 1421bde045c2..186e061068be 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -2999,38 +2999,38 @@ static const struct vm_operations_struct cifs_file_vm_ops = { .page_mkwrite = cifs_page_mkwrite, }; -int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) +int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc) { int xid, rc = 0; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(desc->file); xid = get_xid(); if (!CIFS_CACHE_READ(CIFS_I(inode))) rc = cifs_zap_mapping(inode); if (!rc) - rc = generic_file_mmap(file, vma); + rc = generic_file_mmap_prepare(desc); if (!rc) - vma->vm_ops = &cifs_file_vm_ops; + desc->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; } -int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) +int cifs_file_mmap_prepare(struct vm_area_desc *desc) { int rc, xid; xid = get_xid(); - rc = cifs_revalidate_file(file); + rc = cifs_revalidate_file(desc->file); if (rc) cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", rc); if (!rc) - rc = generic_file_mmap(file, vma); + rc = generic_file_mmap_prepare(desc); if (!rc) - vma->vm_ops = &cifs_file_vm_ops; + desc->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 63d17cea2e95..6fc6ad63d004 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1594,7 +1594,7 @@ static int krb5_authenticate(struct ksmbd_work *work, struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; char *in_blob, *out_blob; - struct channel *chann = NULL; + struct channel *chann = NULL, *old; u64 prev_sess_id; int in_len, out_len; int retval; @@ -1621,11 +1621,24 @@ static int krb5_authenticate(struct ksmbd_work *work, rsp->SecurityBufferLength = cpu_to_le16(out_len); - if ((conn->sign || server_conf.enforced_signing) || + /* + * If session state is SMB2_SESSION_VALID, We can assume + * that it is reauthentication. And the user/password + * has been verified, so return it here. + */ + if (sess->state == SMB2_SESSION_VALID) { + if (conn->binding) + goto binding_session; + return 0; + } + + if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE && + (conn->sign || server_conf.enforced_signing)) || (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED)) sess->sign = true; - if (smb3_encryption_negotiated(conn)) { + if (smb3_encryption_negotiated(conn) && + !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) { retval = conn->ops->generate_encryptionkey(conn, sess); if (retval) { ksmbd_debug(SMB, @@ -1638,6 +1651,7 @@ static int krb5_authenticate(struct ksmbd_work *work, sess->sign = false; } +binding_session: if (conn->dialect >= SMB30_PROT_ID) { chann = lookup_chann_list(sess, conn); if (!chann) { @@ -1646,7 +1660,12 @@ static int krb5_authenticate(struct ksmbd_work *work, return -ENOMEM; chann->conn = conn; - xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP); + old = xa_store(&sess->ksmbd_chann_list, (long)conn, + chann, KSMBD_DEFAULT_GFP); + if (xa_is_err(old)) { + kfree(chann); + return xa_err(old); + } } } @@ -1833,8 +1852,6 @@ int smb2_sess_setup(struct ksmbd_work *work) ksmbd_conn_set_good(conn); sess->state = SMB2_SESSION_VALID; } - kfree(sess->Preauth_HashValue); - sess->Preauth_HashValue = NULL; } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) { if (negblob->MessageType == NtLmNegotiate) { rc = ntlm_negotiate(work, negblob, negblob_len, rsp); @@ -1861,8 +1878,6 @@ int smb2_sess_setup(struct ksmbd_work *work) kfree(preauth_sess); } } - kfree(sess->Preauth_HashValue); - sess->Preauth_HashValue = NULL; } else { pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n", le32_to_cpu(negblob->MessageType)); @@ -2581,7 +2596,7 @@ static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon, } } -static int smb2_creat(struct ksmbd_work *work, struct path *parent_path, +static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name, int open_flags, umode_t posix_mode, bool is_dir) { @@ -2610,7 +2625,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *parent_path, return rc; } - rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0); + rc = ksmbd_vfs_kern_path(work, name, 0, path, 0); if (rc) { pr_err("cannot get linux path (%s), err = %d\n", name, rc); @@ -2860,7 +2875,7 @@ int smb2_open(struct ksmbd_work *work) struct ksmbd_tree_connect *tcon = work->tcon; struct smb2_create_req *req; struct smb2_create_rsp *rsp; - struct path path, parent_path; + struct path path; struct ksmbd_share_config *share = tcon->share_conf; struct ksmbd_file *fp = NULL; struct file *filp = NULL; @@ -3116,8 +3131,8 @@ int smb2_open(struct ksmbd_work *work) goto err_out2; } - rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, - &parent_path, &path, 1); + rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, + &path, 1); if (!rc) { file_present = true; @@ -3238,7 +3253,7 @@ int smb2_open(struct ksmbd_work *work) /*create file if not present */ if (!file_present) { - rc = smb2_creat(work, &parent_path, &path, name, open_flags, + rc = smb2_creat(work, &path, name, open_flags, posix_mode, req->CreateOptions & FILE_DIRECTORY_FILE_LE); if (rc) { @@ -3443,7 +3458,7 @@ int smb2_open(struct ksmbd_work *work) } if (file_present || created) - ksmbd_vfs_kern_path_unlock(&parent_path, &path); + path_put(&path); if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC && !fp->attrib_only && !stream_name) { @@ -3724,7 +3739,7 @@ reconnected_fp: err_out: if (rc && (file_present || created)) - ksmbd_vfs_kern_path_unlock(&parent_path, &path); + path_put(&path); err_out1: ksmbd_revert_fsids(work); @@ -4108,20 +4123,6 @@ struct smb2_query_dir_private { int info_level; }; -static void lock_dir(struct ksmbd_file *dir_fp) -{ - struct dentry *dir = dir_fp->filp->f_path.dentry; - - inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); -} - -static void unlock_dir(struct ksmbd_file *dir_fp) -{ - struct dentry *dir = dir_fp->filp->f_path.dentry; - - inode_unlock(d_inode(dir)); -} - static int process_query_dir_entries(struct smb2_query_dir_private *priv) { struct mnt_idmap *idmap = file_mnt_idmap(priv->dir_fp->filp); @@ -4136,12 +4137,10 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv) if (dentry_name(priv->d_info, priv->info_level)) return -EINVAL; - lock_dir(priv->dir_fp); - dent = lookup_one(idmap, - &QSTR_LEN(priv->d_info->name, - priv->d_info->name_len), - priv->dir_fp->filp->f_path.dentry); - unlock_dir(priv->dir_fp); + dent = lookup_one_unlocked(idmap, + &QSTR_LEN(priv->d_info->name, + priv->d_info->name_len), + priv->dir_fp->filp->f_path.dentry); if (IS_ERR(dent)) { ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n", @@ -6052,8 +6051,7 @@ static int smb2_create_link(struct ksmbd_work *work, struct nls_table *local_nls) { char *link_name = NULL, *target_name = NULL, *pathname = NULL; - struct path path, parent_path; - bool file_present = false; + struct path path; int rc; if (buf_len < (u64)sizeof(struct smb2_file_link_info) + @@ -6082,15 +6080,12 @@ static int smb2_create_link(struct ksmbd_work *work, ksmbd_debug(SMB, "target name is %s\n", target_name); rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS, - &parent_path, &path, 0); + &path, 0); if (rc) { if (rc != -ENOENT) goto out; - } else - file_present = true; - - if (file_info->ReplaceIfExists) { - if (file_present) { + } else { + if (file_info->ReplaceIfExists) { rc = ksmbd_vfs_remove_file(work, &path); if (rc) { rc = -EINVAL; @@ -6098,21 +6093,17 @@ static int smb2_create_link(struct ksmbd_work *work, link_name); goto out; } - } - } else { - if (file_present) { + } else { rc = -EEXIST; ksmbd_debug(SMB, "link already exists\n"); goto out; } + ksmbd_vfs_kern_path_unlock(&path); } - rc = ksmbd_vfs_link(work, target_name, link_name); if (rc) rc = -EINVAL; out: - if (file_present) - ksmbd_vfs_kern_path_unlock(&parent_path, &path); if (!IS_ERR(link_name)) kfree(link_name); diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index d3437f6644e3..04539037108c 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -66,13 +66,12 @@ int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child) return 0; } -static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, - char *pathname, unsigned int flags, - struct path *parent_path, - struct path *path) +static int ksmbd_vfs_path_lookup(struct ksmbd_share_config *share_conf, + char *pathname, unsigned int flags, + struct path *path, bool do_lock) { struct qstr last; - struct filename *filename; + struct filename *filename __free(putname) = NULL; struct path *root_share_path = &share_conf->vfs_path; int err, type; struct dentry *d; @@ -89,51 +88,57 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, return PTR_ERR(filename); err = vfs_path_parent_lookup(filename, flags, - parent_path, &last, &type, + path, &last, &type, root_share_path); - if (err) { - putname(filename); + if (err) return err; - } if (unlikely(type != LAST_NORM)) { - path_put(parent_path); - putname(filename); + path_put(path); return -ENOENT; } - err = mnt_want_write(parent_path->mnt); - if (err) { - path_put(parent_path); - putname(filename); + if (do_lock) { + err = mnt_want_write(path->mnt); + if (err) { + path_put(path); + return -ENOENT; + } + + inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); + d = lookup_one_qstr_excl(&last, path->dentry, 0); + + if (!IS_ERR(d)) { + dput(path->dentry); + path->dentry = d; + return 0; + } + inode_unlock(path->dentry->d_inode); + mnt_drop_write(path->mnt); + path_put(path); return -ENOENT; } - inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT); - d = lookup_one_qstr_excl(&last, parent_path->dentry, 0); - if (IS_ERR(d)) - goto err_out; - + d = lookup_noperm_unlocked(&last, path->dentry); + if (!IS_ERR(d) && d_is_negative(d)) { + dput(d); + d = ERR_PTR(-ENOENT); + } + if (IS_ERR(d)) { + path_put(path); + return -ENOENT; + } + dput(path->dentry); path->dentry = d; - path->mnt = mntget(parent_path->mnt); if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) { err = follow_down(path, 0); if (err < 0) { path_put(path); - goto err_out; + return -ENOENT; } } - - putname(filename); return 0; - -err_out: - inode_unlock(d_inode(parent_path->dentry)); - mnt_drop_write(parent_path->mnt); - path_put(parent_path); - putname(filename); - return -ENOENT; } void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap, @@ -548,7 +553,8 @@ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat) { int err; - err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT); + err = vfs_getattr(path, stat, STATX_BASIC_STATS | STATX_BTIME, + AT_STATX_SYNC_AS_STAT); if (err) pr_err("getattr failed, err %d\n", err); return err; @@ -765,10 +771,10 @@ retry: } rd.old_mnt_idmap = mnt_idmap(old_path->mnt), - rd.old_dir = d_inode(old_parent), + rd.old_parent = old_parent, rd.old_dentry = old_child, rd.new_mnt_idmap = mnt_idmap(new_path.mnt), - rd.new_dir = new_path.dentry->d_inode, + rd.new_parent = new_path.dentry, rd.new_dentry = new_dentry, rd.flags = flags, rd.delegated_inode = NULL, @@ -1198,104 +1204,114 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name, return ret; } -/** - * ksmbd_vfs_kern_path_locked() - lookup a file and get path info - * @work: work - * @name: file path that is relative to share - * @flags: lookup flags - * @parent_path: if lookup succeed, return parent_path info - * @path: if lookup succeed, return path info - * @caseless: caseless filename lookup - * - * Return: 0 on success, otherwise error - */ -int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, - unsigned int flags, struct path *parent_path, - struct path *path, bool caseless) +static +int __ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath, + unsigned int flags, + struct path *path, bool caseless, bool do_lock) { struct ksmbd_share_config *share_conf = work->tcon->share_conf; + struct path parent_path; + size_t path_len, remain_len; int err; - err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path, - path); - if (!err) - return 0; - - if (caseless) { - char *filepath; - size_t path_len, remain_len; - - filepath = name; - path_len = strlen(filepath); - remain_len = path_len; - - *parent_path = share_conf->vfs_path; - path_get(parent_path); +retry: + err = ksmbd_vfs_path_lookup(share_conf, filepath, flags, path, do_lock); + if (!err || !caseless) + return err; - while (d_can_lookup(parent_path->dentry)) { - char *filename = filepath + path_len - remain_len; - char *next = strchrnul(filename, '/'); - size_t filename_len = next - filename; - bool is_last = !next[0]; + path_len = strlen(filepath); + remain_len = path_len; - if (filename_len == 0) - break; + parent_path = share_conf->vfs_path; + path_get(&parent_path); - err = ksmbd_vfs_lookup_in_dir(parent_path, filename, - filename_len, - work->conn->um); - if (err) - goto out2; + while (d_can_lookup(parent_path.dentry)) { + char *filename = filepath + path_len - remain_len; + char *next = strchrnul(filename, '/'); + size_t filename_len = next - filename; + bool is_last = !next[0]; - next[0] = '\0'; - - err = vfs_path_lookup(share_conf->vfs_path.dentry, - share_conf->vfs_path.mnt, - filepath, - flags, - path); - if (!is_last) - next[0] = '/'; - if (err) - goto out2; - else if (is_last) - goto out1; - path_put(parent_path); - *parent_path = *path; + if (filename_len == 0) + break; - remain_len -= filename_len + 1; + err = ksmbd_vfs_lookup_in_dir(&parent_path, filename, + filename_len, + work->conn->um); + path_put(&parent_path); + if (err) + goto out; + if (is_last) { + caseless = false; + goto retry; } + next[0] = '\0'; + + err = vfs_path_lookup(share_conf->vfs_path.dentry, + share_conf->vfs_path.mnt, + filepath, + flags, + &parent_path); + next[0] = '/'; + if (err) + goto out; - err = -EINVAL; -out2: - path_put(parent_path); + remain_len -= filename_len + 1; } -out1: - if (!err) { - err = mnt_want_write(parent_path->mnt); - if (err) { - path_put(path); - path_put(parent_path); - return err; - } - - err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); - if (err) { - mnt_drop_write(parent_path->mnt); - path_put(path); - path_put(parent_path); - } - } + err = -EINVAL; + path_put(&parent_path); +out: return err; } -void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path) +/** + * ksmbd_vfs_kern_path() - lookup a file and get path info + * @work: work + * @filepath: file path that is relative to share + * @flags: lookup flags + * @path: if lookup succeed, return path info + * @caseless: caseless filename lookup + * + * Perform the lookup, possibly crossing over any mount point. + * On return no locks will be held and write-access to filesystem + * won't have been checked. + * Return: 0 if file was found, otherwise error + */ +int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath, + unsigned int flags, + struct path *path, bool caseless) +{ + return __ksmbd_vfs_kern_path(work, filepath, flags, path, + caseless, false); +} + +/** + * ksmbd_vfs_kern_path_locked() - lookup a file and get path info + * @work: work + * @filepath: file path that is relative to share + * @flags: lookup flags + * @path: if lookup succeed, return path info + * @caseless: caseless filename lookup + * + * Perform the lookup, but don't cross over any mount point. + * On return the parent of path->dentry will be locked and write-access to + * filesystem will have been gained. + * Return: 0 on if file was found, otherwise error + */ +int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *filepath, + unsigned int flags, + struct path *path, bool caseless) { - inode_unlock(d_inode(parent_path->dentry)); - mnt_drop_write(parent_path->mnt); + return __ksmbd_vfs_kern_path(work, filepath, flags, path, + caseless, true); +} + +void ksmbd_vfs_kern_path_unlock(struct path *path) +{ + /* While lock is still held, ->d_parent is safe */ + inode_unlock(d_inode(path->dentry->d_parent)); + mnt_drop_write(path->mnt); path_put(path); - path_put(parent_path); } struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h index 2893f59803a6..d47472f3e30b 100644 --- a/fs/smb/server/vfs.h +++ b/fs/smb/server/vfs.h @@ -117,10 +117,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name, int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, const struct path *path, char *attr_name, bool get_write); +int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name, + unsigned int flags, + struct path *path, bool caseless); int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, - unsigned int flags, struct path *parent_path, + unsigned int flags, struct path *path, bool caseless); -void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path); +void ksmbd_vfs_kern_path_unlock(struct path *path); struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, const char *name, unsigned int flags, diff --git a/fs/stack.c b/fs/stack.c index f18920119944..d8c782e064e3 100644 --- a/fs/stack.c +++ b/fs/stack.c @@ -3,7 +3,7 @@ #include <linux/fs.h> #include <linux/fs_stack.h> -/* does _NOT_ require i_mutex to be held. +/* does _NOT_ require i_rwsem to be held. * * This function cannot be inlined since i_size_{read,write} is rather * heavy-weight on 32-bit systems @@ -41,7 +41,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for * fsstack_copy_inode_size() to hold some lock around * i_size_write(), otherwise i_size_read() may spin forever (see - * include/linux/fs.h). We don't necessarily hold i_mutex when this + * include/linux/fs.h). We don't necessarily hold i_rwsem when this * is called, so take i_lock for that case. * * And if on 32-bit, continue our effort to keep the two halves of diff --git a/fs/super.c b/fs/super.c index 80418ca8e215..7f876f32343a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1459,6 +1459,17 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) if (!sb) return; + if (sb->s_op->remove_bdev) { + int ret; + + ret = sb->s_op->remove_bdev(sb, bdev); + if (!ret) { + super_unlock_shared(sb); + return; + } + /* Fallback to shutdown. */ + } + if (!surprise) sync_filesystem(sb); shrink_dcache_sb(sb); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index a3fd3cc591bd..0c023941a316 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -465,9 +465,20 @@ static int tracefs_d_revalidate(struct inode *inode, const struct qstr *name, return !(ei && ei->is_freed); } +static int tracefs_d_delete(const struct dentry *dentry) +{ + /* + * We want to keep eventfs dentries around but not tracefs + * ones. eventfs dentries have content in d_fsdata. + * Use d_fsdata to determine if it's a eventfs dentry or not. + */ + return dentry->d_fsdata == NULL; +} + static const struct dentry_operations tracefs_dentry_operations = { .d_revalidate = tracefs_d_revalidate, .d_release = tracefs_d_release, + .d_delete = tracefs_d_delete, }; static int tracefs_fill_super(struct super_block *sb, struct fs_context *fc) @@ -480,7 +491,7 @@ static int tracefs_fill_super(struct super_block *sb, struct fs_context *fc) return err; sb->s_op = &tracefs_super_operations; - sb->s_d_op = &tracefs_dentry_operations; + set_default_d_op(sb, &tracefs_dentry_operations); return 0; } @@ -551,20 +562,9 @@ struct dentry *tracefs_start_creating(const char *name, struct dentry *parent) if (!parent) parent = tracefs_mount->mnt_root; - inode_lock(d_inode(parent)); - if (unlikely(IS_DEADDIR(d_inode(parent)))) - dentry = ERR_PTR(-ENOENT); - else - dentry = lookup_noperm(&QSTR(name), parent); - if (!IS_ERR(dentry) && d_inode(dentry)) { - dput(dentry); - dentry = ERR_PTR(-EEXIST); - } - - if (IS_ERR(dentry)) { - inode_unlock(d_inode(parent)); + dentry = simple_start_creating(parent, name); + if (IS_ERR(dentry)) simple_release_fs(&tracefs_mount, &tracefs_mount_count); - } return dentry; } diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index bf311c38d9a8..04dae67c0266 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -404,7 +404,8 @@ static int allocate_budget(struct ubifs_info *c, struct folio *folio, * there is a plenty of flash space and the budget will be acquired quickly, * without forcing write-back. The slow path does not make this assumption. */ -static int ubifs_write_begin(struct file *file, struct address_space *mapping, +static int ubifs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { @@ -514,8 +515,9 @@ static void cancel_budget(struct ubifs_info *c, struct folio *folio, } } -static int ubifs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, +static int ubifs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; @@ -1579,17 +1581,17 @@ static const struct vm_operations_struct ubifs_file_vm_ops = { .page_mkwrite = ubifs_vm_page_mkwrite, }; -static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ubifs_file_mmap_prepare(struct vm_area_desc *desc) { int err; - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); if (err) return err; - vma->vm_ops = &ubifs_file_vm_ops; + desc->vm_ops = &ubifs_file_vm_ops; if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) - file_accessed(file); + file_accessed(desc->file); return 0; } @@ -1652,7 +1654,7 @@ const struct file_operations ubifs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = ubifs_write_iter, - .mmap = ubifs_file_mmap, + .mmap_prepare = ubifs_file_mmap_prepare, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, .splice_read = filemap_splice_read, diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 2c99349cf537..79536b2e3d7a 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -130,7 +130,7 @@ static int setflags(struct inode *inode, int flags) return err; } -int ubifs_fileattr_get(struct dentry *dentry, struct fileattr *fa) +int ubifs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); int flags = ubifs2ioctl(ubifs_inode(inode)->flags); @@ -145,7 +145,7 @@ int ubifs_fileattr_get(struct dentry *dentry, struct fileattr *fa) } int ubifs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa) + struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); int flags = fa->flags; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 256dbaeeb0de..5db45c9e26ee 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -2073,9 +2073,9 @@ int ubifs_recover_size(struct ubifs_info *c, bool in_place); void ubifs_destroy_size_tree(struct ubifs_info *c); /* ioctl.c */ -int ubifs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int ubifs_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int ubifs_fileattr_set(struct mnt_idmap *idmap, - struct dentry *dentry, struct fileattr *fa); + struct dentry *dentry, struct file_kattr *fa); long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void ubifs_set_inode_flags(struct inode *inode); #ifdef CONFIG_COMPAT diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 4386dd845e40..f24aa98e6869 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -181,19 +181,23 @@ static void udf_write_failed(struct address_space *mapping, loff_t to) } } -static int udf_adinicb_writepage(struct folio *folio, - struct writeback_control *wbc, void *data) +static int udf_adinicb_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - struct inode *inode = folio->mapping->host; + struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); + struct folio *folio = NULL; + int error = 0; + + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { + BUG_ON(!folio_test_locked(folio)); + BUG_ON(folio->index != 0); + memcpy_from_file_folio(iinfo->i_data + iinfo->i_lenEAttr, folio, + 0, i_size_read(inode)); + folio_unlock(folio); + } - BUG_ON(!folio_test_locked(folio)); - BUG_ON(folio->index != 0); - memcpy_from_file_folio(iinfo->i_data + iinfo->i_lenEAttr, folio, 0, - i_size_read(inode)); - folio_unlock(folio); mark_inode_dirty(inode); - return 0; } @@ -203,9 +207,9 @@ static int udf_writepages(struct address_space *mapping, struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); - if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) - return mpage_writepages(mapping, wbc, udf_get_block_wb); - return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) + return udf_adinicb_writepages(mapping, wbc); + return mpage_writepages(mapping, wbc, udf_get_block_wb); } static void udf_adinicb_read_folio(struct folio *folio) @@ -244,10 +248,12 @@ static void udf_readahead(struct readahead_control *rac) mpage_readahead(rac, udf_get_block); } -static int udf_write_begin(struct file *file, struct address_space *mapping, +static int udf_write_begin(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { + struct file *file = iocb->ki_filp; struct udf_inode_info *iinfo = UDF_I(file_inode(file)); struct folio *folio; int ret; @@ -271,15 +277,16 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, return 0; } -static int udf_write_end(struct file *file, struct address_space *mapping, +static int udf_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(iocb->ki_filp); loff_t last_pos; if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) - return generic_write_end(file, mapping, pos, len, copied, folio, + return generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); last_pos = pos + copied; if (last_pos > inode->i_size) diff --git a/fs/udf/super.c b/fs/udf/super.c index 1c8a736b3309..b2f168b0a0d1 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1440,7 +1440,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, struct genericPartitionMap *gpm; uint16_t ident; struct buffer_head *bh; - unsigned int table_len; + unsigned int table_len, part_map_count; int ret; bh = udf_read_tagged(sb, block, block, &ident); @@ -1461,7 +1461,16 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, "logical volume"); if (ret) goto out_bh; - ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); + + part_map_count = le32_to_cpu(lvd->numPartitionMaps); + if (part_map_count > table_len / sizeof(struct genericPartitionMap1)) { + udf_err(sb, "error loading logical volume descriptor: " + "Too many partition maps (%u > %u)\n", part_map_count, + table_len / (unsigned)sizeof(struct genericPartitionMap1)); + ret = -EIO; + goto out_bh; + } + ret = udf_sb_alloc_partition_maps(sb, part_map_count); if (ret) goto out_bh; diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 88d0062cfdb9..0388a1bae326 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -48,7 +48,7 @@ static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len) struct inode *dir = mapping->host; inode_inc_iversion(dir); - block_write_end(NULL, mapping, pos, len, len, folio, NULL); + block_write_end(pos, len, len, folio); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); diff --git a/fs/ufs/file.c b/fs/ufs/file.c index 487ad1fc2de6..c2a391c17df7 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -38,7 +38,7 @@ const struct file_operations ufs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = generic_file_mmap, + .mmap_prepare = generic_file_mmap_prepare, .open = generic_file_open, .fsync = generic_file_fsync, .splice_read = filemap_splice_read, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 7dc38fdef2ea..8361c00e8fa6 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -474,9 +474,10 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to) } } -static int ufs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, - struct folio **foliop, void **fsdata) +static int ufs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { int ret; @@ -487,13 +488,14 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping, return ret; } -static int ufs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct folio *folio, void *fsdata) +static int ufs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { int ret; - ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); + ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata); if (ret < len) ufs_write_failed(mapping, pos + len); return ret; diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index b492794f8e9a..4bebd947314a 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -165,13 +165,13 @@ static const struct vm_operations_struct vboxsf_file_vm_ops = { .map_pages = filemap_map_pages, }; -static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma) +static int vboxsf_file_mmap_prepare(struct vm_area_desc *desc) { int err; - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); if (!err) - vma->vm_ops = &vboxsf_file_vm_ops; + desc->vm_ops = &vboxsf_file_vm_ops; return err; } @@ -213,7 +213,7 @@ const struct file_operations vboxsf_reg_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, - .mmap = vboxsf_file_mmap, + .mmap_prepare = vboxsf_file_mmap_prepare, .open = vboxsf_file_open, .release = vboxsf_file_release, .fsync = noop_fsync, @@ -300,12 +300,13 @@ static int vboxsf_writepages(struct address_space *mapping, return error; } -static int vboxsf_write_end(struct file *file, struct address_space *mapping, +static int vboxsf_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, unsigned int len, unsigned int copied, struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; - struct vboxsf_handle *sf_handle = file->private_data; + struct vboxsf_handle *sf_handle = iocb->ki_filp->private_data; size_t from = offset_in_folio(folio, pos); u32 nwritten = len; u8 *buf; diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index 0bc96ab6580b..241647b060ee 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -189,7 +189,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_blocksize = 1024; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_op = &vboxsf_super_ops; - sb->s_d_op = &vboxsf_dentry_ops; + set_default_d_op(sb, &vboxsf_dentry_ops); iroot = iget_locked(sb, 0); if (!iroot) { diff --git a/fs/xattr.c b/fs/xattr.c index 600ae97969cf..8851a5ef34f5 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -215,7 +215,7 @@ EXPORT_SYMBOL(__vfs_setxattr); * * returns the result of the internal setxattr or setsecurity operations. * - * This function requires the caller to lock the inode's i_mutex before it + * This function requires the caller to lock the inode's i_rwsem before it * is executed. It also assumes that the caller will make the appropriate * permission checks. */ diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index a4ac37ba5d51..fa1f03c1331e 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -186,35 +186,32 @@ xfs_allocbt_init_ptr_from_cur( ptr->s = agf->agf_cnt_root; } -STATIC int64_t -xfs_bnobt_key_diff( +STATIC int +xfs_bnobt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; const struct xfs_alloc_rec *kp = &key->alloc; - return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; + return cmp_int(be32_to_cpu(kp->ar_startblock), + rec->ar_startblock); } -STATIC int64_t -xfs_cntbt_key_diff( +STATIC int +xfs_cntbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; const struct xfs_alloc_rec *kp = &key->alloc; - int64_t diff; - diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; - if (diff) - return diff; - - return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; + return cmp_int(be32_to_cpu(kp->ar_blockcount), rec->ar_blockcount) ?: + cmp_int(be32_to_cpu(kp->ar_startblock), rec->ar_startblock); } -STATIC int64_t -xfs_bnobt_diff_two_keys( +STATIC int +xfs_bnobt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -222,29 +219,24 @@ xfs_bnobt_diff_two_keys( { ASSERT(!mask || mask->alloc.ar_startblock); - return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) - - be32_to_cpu(k2->alloc.ar_startblock); + return cmp_int(be32_to_cpu(k1->alloc.ar_startblock), + be32_to_cpu(k2->alloc.ar_startblock)); } -STATIC int64_t -xfs_cntbt_diff_two_keys( +STATIC int +xfs_cntbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, const union xfs_btree_key *mask) { - int64_t diff; - ASSERT(!mask || (mask->alloc.ar_blockcount && mask->alloc.ar_startblock)); - diff = be32_to_cpu(k1->alloc.ar_blockcount) - - be32_to_cpu(k2->alloc.ar_blockcount); - if (diff) - return diff; - - return be32_to_cpu(k1->alloc.ar_startblock) - - be32_to_cpu(k2->alloc.ar_startblock); + return cmp_int(be32_to_cpu(k1->alloc.ar_blockcount), + be32_to_cpu(k2->alloc.ar_blockcount)) ?: + cmp_int(be32_to_cpu(k1->alloc.ar_startblock), + be32_to_cpu(k2->alloc.ar_startblock)); } static xfs_failaddr_t @@ -438,9 +430,9 @@ const struct xfs_btree_ops xfs_bnobt_ops = { .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec, .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, - .key_diff = xfs_bnobt_key_diff, + .cmp_key_with_cur = xfs_bnobt_cmp_key_with_cur, .buf_ops = &xfs_bnobt_buf_ops, - .diff_two_keys = xfs_bnobt_diff_two_keys, + .cmp_two_keys = xfs_bnobt_cmp_two_keys, .keys_inorder = xfs_bnobt_keys_inorder, .recs_inorder = xfs_bnobt_recs_inorder, .keys_contiguous = xfs_allocbt_keys_contiguous, @@ -468,9 +460,9 @@ const struct xfs_btree_ops xfs_cntbt_ops = { .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec, .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, - .key_diff = xfs_cntbt_key_diff, + .cmp_key_with_cur = xfs_cntbt_cmp_key_with_cur, .buf_ops = &xfs_cntbt_buf_ops, - .diff_two_keys = xfs_cntbt_diff_two_keys, + .cmp_two_keys = xfs_cntbt_cmp_two_keys, .keys_inorder = xfs_cntbt_keys_inorder, .recs_inorder = xfs_cntbt_recs_inorder, .keys_contiguous = NULL, /* not needed right now */ diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 908d7b050e9c..188feac04b60 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -369,38 +369,26 @@ xfs_bmbt_init_rec_from_cur( xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); } -STATIC int64_t -xfs_bmbt_key_diff( +STATIC int +xfs_bmbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { - return (int64_t)be64_to_cpu(key->bmbt.br_startoff) - - cur->bc_rec.b.br_startoff; + return cmp_int(be64_to_cpu(key->bmbt.br_startoff), + cur->bc_rec.b.br_startoff); } -STATIC int64_t -xfs_bmbt_diff_two_keys( +STATIC int +xfs_bmbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, const union xfs_btree_key *mask) { - uint64_t a = be64_to_cpu(k1->bmbt.br_startoff); - uint64_t b = be64_to_cpu(k2->bmbt.br_startoff); - ASSERT(!mask || mask->bmbt.br_startoff); - /* - * Note: This routine previously casted a and b to int64 and subtracted - * them to generate a result. This lead to problems if b was the - * "maximum" key value (all ones) being signed incorrectly, hence this - * somewhat less efficient version. - */ - if (a > b) - return 1; - if (b > a) - return -1; - return 0; + return cmp_int(be64_to_cpu(k1->bmbt.br_startoff), + be64_to_cpu(k2->bmbt.br_startoff)); } static xfs_failaddr_t @@ -647,8 +635,8 @@ const struct xfs_btree_ops xfs_bmbt_ops = { .init_key_from_rec = xfs_bmbt_init_key_from_rec, .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec, .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, - .key_diff = xfs_bmbt_key_diff, - .diff_two_keys = xfs_bmbt_diff_two_keys, + .cmp_key_with_cur = xfs_bmbt_cmp_key_with_cur, + .cmp_two_keys = xfs_bmbt_cmp_two_keys, .buf_ops = &xfs_bmbt_buf_ops, .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 299ce7fd11b0..a61211d253f1 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -1985,7 +1985,7 @@ xfs_btree_lookup( int *stat) /* success/failure */ { struct xfs_btree_block *block; /* current btree block */ - int64_t diff; /* difference for the current key */ + int cmp_r; /* current key comparison result */ int error; /* error return value */ int keyno; /* current key number */ int level; /* level in the btree */ @@ -2013,13 +2013,13 @@ xfs_btree_lookup( * on the lookup record, then follow the corresponding block * pointer down to the next level. */ - for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + for (level = cur->bc_nlevels - 1, cmp_r = 1; level >= 0; level--) { /* Get the block we need to do the lookup on. */ error = xfs_btree_lookup_get_block(cur, level, pp, &block); if (error) goto error0; - if (diff == 0) { + if (cmp_r == 0) { /* * If we already had a key match at a higher level, we * know we need to use the first entry in this block. @@ -2065,15 +2065,16 @@ xfs_btree_lookup( keyno, block, &key); /* - * Compute difference to get next direction: + * Compute comparison result to get next + * direction: * - less than, move right * - greater than, move left * - equal, we're done */ - diff = cur->bc_ops->key_diff(cur, kp); - if (diff < 0) + cmp_r = cur->bc_ops->cmp_key_with_cur(cur, kp); + if (cmp_r < 0) low = keyno + 1; - else if (diff > 0) + else if (cmp_r > 0) high = keyno - 1; else break; @@ -2089,7 +2090,7 @@ xfs_btree_lookup( * If we moved left, need the previous key number, * unless there isn't one. */ - if (diff > 0 && --keyno < 1) + if (cmp_r > 0 && --keyno < 1) keyno = 1; pp = xfs_btree_ptr_addr(cur, keyno, block); @@ -2102,7 +2103,7 @@ xfs_btree_lookup( } /* Done with the search. See if we need to adjust the results. */ - if (dir != XFS_LOOKUP_LE && diff < 0) { + if (dir != XFS_LOOKUP_LE && cmp_r < 0) { keyno++; /* * If ge search and we went off the end of the block, but it's @@ -2125,14 +2126,14 @@ xfs_btree_lookup( *stat = 1; return 0; } - } else if (dir == XFS_LOOKUP_LE && diff > 0) + } else if (dir == XFS_LOOKUP_LE && cmp_r > 0) keyno--; cur->bc_levels[0].ptr = keyno; /* Return if we succeeded or not. */ if (keyno == 0 || keyno > xfs_btree_get_numrecs(block)) *stat = 0; - else if (dir != XFS_LOOKUP_EQ || diff == 0) + else if (dir != XFS_LOOKUP_EQ || cmp_r == 0) *stat = 1; else *stat = 0; @@ -5058,7 +5059,7 @@ xfs_btree_simple_query_range( int error; ASSERT(cur->bc_ops->init_high_key_from_rec); - ASSERT(cur->bc_ops->diff_two_keys); + ASSERT(cur->bc_ops->cmp_two_keys); /* * Find the leftmost record. The btree cursor must be set @@ -5352,15 +5353,15 @@ xfs_btree_count_blocks( } /* Compare two btree pointers. */ -int64_t -xfs_btree_diff_two_ptrs( +int +xfs_btree_cmp_two_ptrs( struct xfs_btree_cur *cur, const union xfs_btree_ptr *a, const union xfs_btree_ptr *b) { if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) - return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l); - return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s); + return cmp_int(be64_to_cpu(a->l), be64_to_cpu(b->l)); + return cmp_int(be32_to_cpu(a->s), be32_to_cpu(b->s)); } struct xfs_btree_has_records { diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index 355b304696e6..60e78572e725 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -171,20 +171,23 @@ struct xfs_btree_ops { void (*init_high_key_from_rec)(union xfs_btree_key *key, const union xfs_btree_rec *rec); - /* difference between key value and cursor value */ - int64_t (*key_diff)(struct xfs_btree_cur *cur, - const union xfs_btree_key *key); + /* + * Compare key value and cursor value -- positive if key > cur, + * negative if key < cur, and zero if equal. + */ + int (*cmp_key_with_cur)(struct xfs_btree_cur *cur, + const union xfs_btree_key *key); /* - * Difference between key2 and key1 -- positive if key1 > key2, - * negative if key1 < key2, and zero if equal. If the @mask parameter - * is non NULL, each key field to be used in the comparison must - * contain a nonzero value. + * Compare key1 and key2 -- positive if key1 > key2, negative if + * key1 < key2, and zero if equal. If the @mask parameter is non NULL, + * each key field to be used in the comparison must contain a nonzero + * value. */ - int64_t (*diff_two_keys)(struct xfs_btree_cur *cur, - const union xfs_btree_key *key1, - const union xfs_btree_key *key2, - const union xfs_btree_key *mask); + int (*cmp_two_keys)(struct xfs_btree_cur *cur, + const union xfs_btree_key *key1, + const union xfs_btree_key *key2, + const union xfs_btree_key *mask); const struct xfs_buf_ops *buf_ops; @@ -516,9 +519,9 @@ struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur, int level, struct xfs_buf **bpp); bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur, const union xfs_btree_ptr *ptr); -int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur, - const union xfs_btree_ptr *a, - const union xfs_btree_ptr *b); +int xfs_btree_cmp_two_ptrs(struct xfs_btree_cur *cur, + const union xfs_btree_ptr *a, + const union xfs_btree_ptr *b); void xfs_btree_get_sibling(struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_ptr *ptr, int lr); @@ -546,7 +549,7 @@ xfs_btree_keycmp_lt( const union xfs_btree_key *key1, const union xfs_btree_key *key2) { - return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0; + return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) < 0; } static inline bool @@ -555,7 +558,7 @@ xfs_btree_keycmp_gt( const union xfs_btree_key *key1, const union xfs_btree_key *key2) { - return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0; + return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) > 0; } static inline bool @@ -564,7 +567,7 @@ xfs_btree_keycmp_eq( const union xfs_btree_key *key1, const union xfs_btree_key *key2) { - return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0; + return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) == 0; } static inline bool @@ -602,7 +605,7 @@ xfs_btree_masked_keycmp_lt( const union xfs_btree_key *key2, const union xfs_btree_key *mask) { - return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0; + return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) < 0; } static inline bool @@ -612,7 +615,7 @@ xfs_btree_masked_keycmp_gt( const union xfs_btree_key *key2, const union xfs_btree_key *mask) { - return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0; + return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) > 0; } static inline bool diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 9566a7623365..779dac59b1f3 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -112,7 +112,7 @@ typedef struct xfs_sb { uint16_t sb_sectsize; /* volume sector size, bytes */ uint16_t sb_inodesize; /* inode size, bytes */ uint16_t sb_inopblock; /* inodes per block */ - char sb_fname[XFSLABEL_MAX]; /* file system name */ + char sb_fname[XFSLABEL_MAX] __nonstring; /* file system name */ uint8_t sb_blocklog; /* log2 of sb_blocksize */ uint8_t sb_sectlog; /* log2 of sb_sectsize */ uint8_t sb_inodelog; /* log2 of sb_inodesize */ diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 20ad7c309489..792f76d2e2a0 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -172,7 +172,8 @@ xfs_group_free( /* drop the mount's active reference */ xfs_group_rele(xg); - XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0); + XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) > 0); + XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) < 0); kfree_rcu_mightsleep(xg); } diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 6f270d8f4270..100afdd66cdd 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -265,17 +265,17 @@ xfs_finobt_init_ptr_from_cur( ptr->s = agi->agi_free_root; } -STATIC int64_t -xfs_inobt_key_diff( +STATIC int +xfs_inobt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { - return (int64_t)be32_to_cpu(key->inobt.ir_startino) - - cur->bc_rec.i.ir_startino; + return cmp_int(be32_to_cpu(key->inobt.ir_startino), + cur->bc_rec.i.ir_startino); } -STATIC int64_t -xfs_inobt_diff_two_keys( +STATIC int +xfs_inobt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -283,8 +283,8 @@ xfs_inobt_diff_two_keys( { ASSERT(!mask || mask->inobt.ir_startino); - return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - - be32_to_cpu(k2->inobt.ir_startino); + return cmp_int(be32_to_cpu(k1->inobt.ir_startino), + be32_to_cpu(k2->inobt.ir_startino)); } static xfs_failaddr_t @@ -430,9 +430,9 @@ const struct xfs_btree_ops xfs_inobt_ops = { .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, .init_rec_from_cur = xfs_inobt_init_rec_from_cur, .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, - .key_diff = xfs_inobt_key_diff, + .cmp_key_with_cur = xfs_inobt_cmp_key_with_cur, .buf_ops = &xfs_inobt_buf_ops, - .diff_two_keys = xfs_inobt_diff_two_keys, + .cmp_two_keys = xfs_inobt_cmp_two_keys, .keys_inorder = xfs_inobt_keys_inorder, .recs_inorder = xfs_inobt_recs_inorder, .keys_contiguous = xfs_inobt_keys_contiguous, @@ -460,9 +460,9 @@ const struct xfs_btree_ops xfs_finobt_ops = { .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, .init_rec_from_cur = xfs_inobt_init_rec_from_cur, .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, - .key_diff = xfs_inobt_key_diff, + .cmp_key_with_cur = xfs_inobt_cmp_key_with_cur, .buf_ops = &xfs_finobt_buf_ops, - .diff_two_keys = xfs_inobt_diff_two_keys, + .cmp_two_keys = xfs_inobt_cmp_two_keys, .keys_inorder = xfs_inobt_keys_inorder, .recs_inorder = xfs_inobt_recs_inorder, .keys_contiguous = xfs_inobt_keys_contiguous, diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h index 66c7916fb5cd..95de23095030 100644 --- a/fs/xfs/libxfs/xfs_log_recover.h +++ b/fs/xfs/libxfs/xfs_log_recover.h @@ -104,7 +104,7 @@ struct xlog_recover_item { struct list_head ri_list; int ri_cnt; /* count of regions found */ int ri_total; /* total regions */ - struct xfs_log_iovec *ri_buf; /* ptr to regions buffer */ + struct kvec *ri_buf; /* ptr to regions buffer */ const struct xlog_recover_item_ops *ri_ops; }; @@ -117,7 +117,7 @@ struct xlog_recover { struct list_head r_itemq; /* q for items */ }; -#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr) +#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].iov_base) #define XLOG_RECOVER_CRCPASS 0 #define XLOG_RECOVER_PASS1 1 diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index cebe83f7842a..897784037483 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -2099,9 +2099,7 @@ xfs_refcount_recover_cow_leftovers( * recording the CoW debris we cancel the (empty) transaction * and everything goes away cleanly. */ - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(mp); if (isrt) { xfs_rtgroup_lock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT); diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index 54505fee1852..06da3ca14727 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -174,8 +174,8 @@ xfs_refcountbt_init_ptr_from_cur( ptr->s = agf->agf_refcount_root; } -STATIC int64_t -xfs_refcountbt_key_diff( +STATIC int +xfs_refcountbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { @@ -185,11 +185,11 @@ xfs_refcountbt_key_diff( start = xfs_refcount_encode_startblock(irec->rc_startblock, irec->rc_domain); - return (int64_t)be32_to_cpu(kp->rc_startblock) - start; + return cmp_int(be32_to_cpu(kp->rc_startblock), start); } -STATIC int64_t -xfs_refcountbt_diff_two_keys( +STATIC int +xfs_refcountbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -197,8 +197,8 @@ xfs_refcountbt_diff_two_keys( { ASSERT(!mask || mask->refc.rc_startblock); - return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - - be32_to_cpu(k2->refc.rc_startblock); + return cmp_int(be32_to_cpu(k1->refc.rc_startblock), + be32_to_cpu(k2->refc.rc_startblock)); } STATIC xfs_failaddr_t @@ -339,9 +339,9 @@ const struct xfs_btree_ops xfs_refcountbt_ops = { .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec, .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur, .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur, - .key_diff = xfs_refcountbt_key_diff, + .cmp_key_with_cur = xfs_refcountbt_cmp_key_with_cur, .buf_ops = &xfs_refcountbt_buf_ops, - .diff_two_keys = xfs_refcountbt_diff_two_keys, + .cmp_two_keys = xfs_refcountbt_cmp_two_keys, .keys_inorder = xfs_refcountbt_keys_inorder, .recs_inorder = xfs_refcountbt_recs_inorder, .keys_contiguous = xfs_refcountbt_keys_contiguous, diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index 2cab694ac58a..bf16aee50d73 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -243,38 +243,22 @@ static inline uint64_t offset_keymask(uint64_t offset) return offset & ~XFS_RMAP_OFF_UNWRITTEN; } -STATIC int64_t -xfs_rmapbt_key_diff( +STATIC int +xfs_rmapbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { struct xfs_rmap_irec *rec = &cur->bc_rec.r; const struct xfs_rmap_key *kp = &key->rmap; - __u64 x, y; - int64_t d; - d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock; - if (d) - return d; - - x = be64_to_cpu(kp->rm_owner); - y = rec->rm_owner; - if (x > y) - return 1; - else if (y > x) - return -1; - - x = offset_keymask(be64_to_cpu(kp->rm_offset)); - y = offset_keymask(xfs_rmap_irec_offset_pack(rec)); - if (x > y) - return 1; - else if (y > x) - return -1; - return 0; + return cmp_int(be32_to_cpu(kp->rm_startblock), rec->rm_startblock) ?: + cmp_int(be64_to_cpu(kp->rm_owner), rec->rm_owner) ?: + cmp_int(offset_keymask(be64_to_cpu(kp->rm_offset)), + offset_keymask(xfs_rmap_irec_offset_pack(rec))); } -STATIC int64_t -xfs_rmapbt_diff_two_keys( +STATIC int +xfs_rmapbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -282,36 +266,31 @@ xfs_rmapbt_diff_two_keys( { const struct xfs_rmap_key *kp1 = &k1->rmap; const struct xfs_rmap_key *kp2 = &k2->rmap; - int64_t d; - __u64 x, y; + int d; /* Doesn't make sense to mask off the physical space part */ ASSERT(!mask || mask->rmap.rm_startblock); - d = (int64_t)be32_to_cpu(kp1->rm_startblock) - - be32_to_cpu(kp2->rm_startblock); + d = cmp_int(be32_to_cpu(kp1->rm_startblock), + be32_to_cpu(kp2->rm_startblock)); if (d) return d; if (!mask || mask->rmap.rm_owner) { - x = be64_to_cpu(kp1->rm_owner); - y = be64_to_cpu(kp2->rm_owner); - if (x > y) - return 1; - else if (y > x) - return -1; + d = cmp_int(be64_to_cpu(kp1->rm_owner), + be64_to_cpu(kp2->rm_owner)); + if (d) + return d; } if (!mask || mask->rmap.rm_offset) { /* Doesn't make sense to allow offset but not owner */ ASSERT(!mask || mask->rmap.rm_owner); - x = offset_keymask(be64_to_cpu(kp1->rm_offset)); - y = offset_keymask(be64_to_cpu(kp2->rm_offset)); - if (x > y) - return 1; - else if (y > x) - return -1; + d = cmp_int(offset_keymask(be64_to_cpu(kp1->rm_offset)), + offset_keymask(be64_to_cpu(kp2->rm_offset))); + if (d) + return d; } return 0; @@ -515,9 +494,9 @@ const struct xfs_btree_ops xfs_rmapbt_ops = { .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec, .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur, .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur, - .key_diff = xfs_rmapbt_key_diff, + .cmp_key_with_cur = xfs_rmapbt_cmp_key_with_cur, .buf_ops = &xfs_rmapbt_buf_ops, - .diff_two_keys = xfs_rmapbt_diff_two_keys, + .cmp_two_keys = xfs_rmapbt_cmp_two_keys, .keys_inorder = xfs_rmapbt_keys_inorder, .recs_inorder = xfs_rmapbt_recs_inorder, .keys_contiguous = xfs_rmapbt_keys_contiguous, @@ -632,9 +611,9 @@ const struct xfs_btree_ops xfs_rmapbt_mem_ops = { .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec, .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur, .init_ptr_from_cur = xfbtree_init_ptr_from_cur, - .key_diff = xfs_rmapbt_key_diff, + .cmp_key_with_cur = xfs_rmapbt_cmp_key_with_cur, .buf_ops = &xfs_rmapbt_mem_buf_ops, - .diff_two_keys = xfs_rmapbt_diff_two_keys, + .cmp_two_keys = xfs_rmapbt_cmp_two_keys, .keys_inorder = xfs_rmapbt_keys_inorder, .recs_inorder = xfs_rmapbt_recs_inorder, .keys_contiguous = xfs_rmapbt_keys_contiguous, diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.c b/fs/xfs/libxfs/xfs_rtrefcount_btree.c index 3db5e7a4a945..ac11e94b42ae 100644 --- a/fs/xfs/libxfs/xfs_rtrefcount_btree.c +++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.c @@ -156,8 +156,8 @@ xfs_rtrefcountbt_init_ptr_from_cur( ptr->l = 0; } -STATIC int64_t -xfs_rtrefcountbt_key_diff( +STATIC int +xfs_rtrefcountbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { @@ -167,11 +167,11 @@ xfs_rtrefcountbt_key_diff( start = xfs_refcount_encode_startblock(irec->rc_startblock, irec->rc_domain); - return (int64_t)be32_to_cpu(kp->rc_startblock) - start; + return cmp_int(be32_to_cpu(kp->rc_startblock), start); } -STATIC int64_t -xfs_rtrefcountbt_diff_two_keys( +STATIC int +xfs_rtrefcountbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -179,8 +179,8 @@ xfs_rtrefcountbt_diff_two_keys( { ASSERT(!mask || mask->refc.rc_startblock); - return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - - be32_to_cpu(k2->refc.rc_startblock); + return cmp_int(be32_to_cpu(k1->refc.rc_startblock), + be32_to_cpu(k2->refc.rc_startblock)); } static xfs_failaddr_t @@ -387,9 +387,9 @@ const struct xfs_btree_ops xfs_rtrefcountbt_ops = { .init_high_key_from_rec = xfs_rtrefcountbt_init_high_key_from_rec, .init_rec_from_cur = xfs_rtrefcountbt_init_rec_from_cur, .init_ptr_from_cur = xfs_rtrefcountbt_init_ptr_from_cur, - .key_diff = xfs_rtrefcountbt_key_diff, + .cmp_key_with_cur = xfs_rtrefcountbt_cmp_key_with_cur, .buf_ops = &xfs_rtrefcountbt_buf_ops, - .diff_two_keys = xfs_rtrefcountbt_diff_two_keys, + .cmp_two_keys = xfs_rtrefcountbt_cmp_two_keys, .keys_inorder = xfs_rtrefcountbt_keys_inorder, .recs_inorder = xfs_rtrefcountbt_recs_inorder, .keys_contiguous = xfs_rtrefcountbt_keys_contiguous, diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.c b/fs/xfs/libxfs/xfs_rtrmap_btree.c index 9bdc2cbfc113..55f903165769 100644 --- a/fs/xfs/libxfs/xfs_rtrmap_btree.c +++ b/fs/xfs/libxfs/xfs_rtrmap_btree.c @@ -185,38 +185,22 @@ static inline uint64_t offset_keymask(uint64_t offset) return offset & ~XFS_RMAP_OFF_UNWRITTEN; } -STATIC int64_t -xfs_rtrmapbt_key_diff( +STATIC int +xfs_rtrmapbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { struct xfs_rmap_irec *rec = &cur->bc_rec.r; const struct xfs_rmap_key *kp = &key->rmap; - __u64 x, y; - int64_t d; - d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock; - if (d) - return d; - - x = be64_to_cpu(kp->rm_owner); - y = rec->rm_owner; - if (x > y) - return 1; - else if (y > x) - return -1; - - x = offset_keymask(be64_to_cpu(kp->rm_offset)); - y = offset_keymask(xfs_rmap_irec_offset_pack(rec)); - if (x > y) - return 1; - else if (y > x) - return -1; - return 0; + return cmp_int(be32_to_cpu(kp->rm_startblock), rec->rm_startblock) ?: + cmp_int(be64_to_cpu(kp->rm_owner), rec->rm_owner) ?: + cmp_int(offset_keymask(be64_to_cpu(kp->rm_offset)), + offset_keymask(xfs_rmap_irec_offset_pack(rec))); } -STATIC int64_t -xfs_rtrmapbt_diff_two_keys( +STATIC int +xfs_rtrmapbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -224,36 +208,31 @@ xfs_rtrmapbt_diff_two_keys( { const struct xfs_rmap_key *kp1 = &k1->rmap; const struct xfs_rmap_key *kp2 = &k2->rmap; - int64_t d; - __u64 x, y; + int d; /* Doesn't make sense to mask off the physical space part */ ASSERT(!mask || mask->rmap.rm_startblock); - d = (int64_t)be32_to_cpu(kp1->rm_startblock) - - be32_to_cpu(kp2->rm_startblock); + d = cmp_int(be32_to_cpu(kp1->rm_startblock), + be32_to_cpu(kp2->rm_startblock)); if (d) return d; if (!mask || mask->rmap.rm_owner) { - x = be64_to_cpu(kp1->rm_owner); - y = be64_to_cpu(kp2->rm_owner); - if (x > y) - return 1; - else if (y > x) - return -1; + d = cmp_int(be64_to_cpu(kp1->rm_owner), + be64_to_cpu(kp2->rm_owner)); + if (d) + return d; } if (!mask || mask->rmap.rm_offset) { /* Doesn't make sense to allow offset but not owner */ ASSERT(!mask || mask->rmap.rm_owner); - x = offset_keymask(be64_to_cpu(kp1->rm_offset)); - y = offset_keymask(be64_to_cpu(kp2->rm_offset)); - if (x > y) - return 1; - else if (y > x) - return -1; + d = cmp_int(offset_keymask(be64_to_cpu(kp1->rm_offset)), + offset_keymask(be64_to_cpu(kp2->rm_offset))); + if (d) + return d; } return 0; @@ -511,9 +490,9 @@ const struct xfs_btree_ops xfs_rtrmapbt_ops = { .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec, .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur, .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur, - .key_diff = xfs_rtrmapbt_key_diff, + .cmp_key_with_cur = xfs_rtrmapbt_cmp_key_with_cur, .buf_ops = &xfs_rtrmapbt_buf_ops, - .diff_two_keys = xfs_rtrmapbt_diff_two_keys, + .cmp_two_keys = xfs_rtrmapbt_cmp_two_keys, .keys_inorder = xfs_rtrmapbt_keys_inorder, .recs_inorder = xfs_rtrmapbt_recs_inorder, .keys_contiguous = xfs_rtrmapbt_keys_contiguous, @@ -620,9 +599,9 @@ const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = { .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec, .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur, .init_ptr_from_cur = xfbtree_init_ptr_from_cur, - .key_diff = xfs_rtrmapbt_key_diff, + .cmp_key_with_cur = xfs_rtrmapbt_cmp_key_with_cur, .buf_ops = &xfs_rtrmapbt_mem_buf_ops, - .diff_two_keys = xfs_rtrmapbt_diff_two_keys, + .cmp_two_keys = xfs_rtrmapbt_cmp_two_keys, .keys_inorder = xfs_rtrmapbt_keys_inorder, .recs_inorder = xfs_rtrmapbt_recs_inorder, .keys_contiguous = xfs_rtrmapbt_keys_contiguous, diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index fe678a0438bc..cd6f0ff382a7 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c @@ -306,7 +306,7 @@ xchk_btree_block_check_sibling( if (pbp) xchk_buffer_recheck(bs->sc, pbp); - if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) + if (xfs_btree_cmp_two_ptrs(cur, pp, sibling)) xchk_btree_set_corrupt(bs->sc, cur, level); out: xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR); diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c index 28ad341df8ee..2ef7742be7d3 100644 --- a/fs/xfs/scrub/common.c +++ b/fs/xfs/scrub/common.c @@ -866,11 +866,11 @@ xchk_trans_cancel( sc->tp = NULL; } -int +void xchk_trans_alloc_empty( struct xfs_scrub *sc) { - return xfs_trans_alloc_empty(sc->mp, &sc->tp); + sc->tp = xfs_trans_alloc_empty(sc->mp); } /* @@ -892,7 +892,8 @@ xchk_trans_alloc( return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, resblks, 0, 0, &sc->tp); - return xchk_trans_alloc_empty(sc); + xchk_trans_alloc_empty(sc); + return 0; } /* Set us up with a transaction and an empty context. */ diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 19877d99f255..ddbc065c798c 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -7,7 +7,7 @@ #define __XFS_SCRUB_COMMON_H__ int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks); -int xchk_trans_alloc_empty(struct xfs_scrub *sc); +void xchk_trans_alloc_empty(struct xfs_scrub *sc); void xchk_trans_cancel(struct xfs_scrub *sc); bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno, diff --git a/fs/xfs/scrub/dir_repair.c b/fs/xfs/scrub/dir_repair.c index 249313882108..8d3b550990b5 100644 --- a/fs/xfs/scrub/dir_repair.c +++ b/fs/xfs/scrub/dir_repair.c @@ -1289,9 +1289,7 @@ xrep_dir_scan_dirtree( if (sc->ilock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) xchk_iunlock(sc, sc->ilock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); while ((error = xchk_iscan_iter(&rd->pscan.iscan, &ip)) == 1) { bool flush; @@ -1317,9 +1315,7 @@ xrep_dir_scan_dirtree( if (error) break; - error = xchk_trans_alloc_empty(sc); - if (error) - break; + xchk_trans_alloc_empty(sc); } if (xchk_should_terminate(sc, &error)) diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index 9b598c5790ad..cebd0d526926 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -237,7 +237,8 @@ xchk_setup_fscounters( return error; } - return xchk_trans_alloc_empty(sc); + xchk_trans_alloc_empty(sc); + return 0; } /* diff --git a/fs/xfs/scrub/metapath.c b/fs/xfs/scrub/metapath.c index e21c16fbd15d..14939d7de349 100644 --- a/fs/xfs/scrub/metapath.c +++ b/fs/xfs/scrub/metapath.c @@ -318,9 +318,7 @@ xchk_metapath( return 0; } - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); error = xchk_metapath_ilock_both(mpath); if (error) diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c index 4a47d0aabf73..26721fab5cab 100644 --- a/fs/xfs/scrub/nlinks.c +++ b/fs/xfs/scrub/nlinks.c @@ -555,9 +555,7 @@ xchk_nlinks_collect( * do not take sb_internal. */ xchk_trans_cancel(sc); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); while ((error = xchk_iscan_iter(&xnc->collect_iscan, &ip)) == 1) { if (S_ISDIR(VFS_I(ip)->i_mode)) @@ -880,9 +878,7 @@ xchk_nlinks_compare( * inactivation workqueue. */ xchk_trans_cancel(sc); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); /* * Use the inobt to walk all allocated inodes to compare the link diff --git a/fs/xfs/scrub/nlinks_repair.c b/fs/xfs/scrub/nlinks_repair.c index 4ebdee095428..6ef2ee9c3814 100644 --- a/fs/xfs/scrub/nlinks_repair.c +++ b/fs/xfs/scrub/nlinks_repair.c @@ -340,9 +340,7 @@ xrep_nlinks( * We can only push the inactivation workqueues with an empty * transaction. */ - error = xchk_trans_alloc_empty(sc); - if (error) - break; + xchk_trans_alloc_empty(sc); } xchk_iscan_iter_finish(&xnc->compare_iscan); xchk_iscan_teardown(&xnc->compare_iscan); diff --git a/fs/xfs/scrub/parent_repair.c b/fs/xfs/scrub/parent_repair.c index 31bfe10be22a..2949feda6271 100644 --- a/fs/xfs/scrub/parent_repair.c +++ b/fs/xfs/scrub/parent_repair.c @@ -569,9 +569,7 @@ xrep_parent_scan_dirtree( if (sc->ilock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) xchk_iunlock(sc, sc->ilock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); while ((error = xchk_iscan_iter(&rp->pscan.iscan, &ip)) == 1) { bool flush; @@ -597,9 +595,7 @@ xrep_parent_scan_dirtree( if (error) break; - error = xchk_trans_alloc_empty(sc); - if (error) - break; + xchk_trans_alloc_empty(sc); } if (xchk_should_terminate(sc, &error)) @@ -1099,9 +1095,7 @@ xrep_parent_flush_xattrs( xrep_tempfile_iounlock(rp->sc); /* Recreate the empty transaction and relock the inode. */ - error = xchk_trans_alloc_empty(rp->sc); - if (error) - return error; + xchk_trans_alloc_empty(rp->sc); xchk_ilock(rp->sc, XFS_ILOCK_EXCL); return 0; } diff --git a/fs/xfs/scrub/quotacheck.c b/fs/xfs/scrub/quotacheck.c index dc4033b91e44..e4105aaafe84 100644 --- a/fs/xfs/scrub/quotacheck.c +++ b/fs/xfs/scrub/quotacheck.c @@ -505,9 +505,7 @@ xqcheck_collect_counts( * transactions do not take sb_internal. */ xchk_trans_cancel(sc); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); while ((error = xchk_iscan_iter(&xqc->iscan, &ip)) == 1) { error = xqcheck_collect_inode(xqc, ip); diff --git a/fs/xfs/scrub/rcbag_btree.c b/fs/xfs/scrub/rcbag_btree.c index 709356dc6256..9a4ef823c5a7 100644 --- a/fs/xfs/scrub/rcbag_btree.c +++ b/fs/xfs/scrub/rcbag_btree.c @@ -47,29 +47,20 @@ rcbagbt_init_rec_from_cur( bag_rec->rbg_refcount = bag_irec->rbg_refcount; } -STATIC int64_t -rcbagbt_key_diff( +STATIC int +rcbagbt_cmp_key_with_cur( struct xfs_btree_cur *cur, const union xfs_btree_key *key) { struct rcbag_rec *rec = (struct rcbag_rec *)&cur->bc_rec; const struct rcbag_key *kp = (const struct rcbag_key *)key; - if (kp->rbg_startblock > rec->rbg_startblock) - return 1; - if (kp->rbg_startblock < rec->rbg_startblock) - return -1; - - if (kp->rbg_blockcount > rec->rbg_blockcount) - return 1; - if (kp->rbg_blockcount < rec->rbg_blockcount) - return -1; - - return 0; + return cmp_int(kp->rbg_startblock, rec->rbg_startblock) ?: + cmp_int(kp->rbg_blockcount, rec->rbg_blockcount); } -STATIC int64_t -rcbagbt_diff_two_keys( +STATIC int +rcbagbt_cmp_two_keys( struct xfs_btree_cur *cur, const union xfs_btree_key *k1, const union xfs_btree_key *k2, @@ -80,17 +71,8 @@ rcbagbt_diff_two_keys( ASSERT(mask == NULL); - if (kp1->rbg_startblock > kp2->rbg_startblock) - return 1; - if (kp1->rbg_startblock < kp2->rbg_startblock) - return -1; - - if (kp1->rbg_blockcount > kp2->rbg_blockcount) - return 1; - if (kp1->rbg_blockcount < kp2->rbg_blockcount) - return -1; - - return 0; + return cmp_int(kp1->rbg_startblock, kp2->rbg_startblock) ?: + cmp_int(kp1->rbg_blockcount, kp2->rbg_blockcount); } STATIC int @@ -201,9 +183,9 @@ static const struct xfs_btree_ops rcbagbt_mem_ops = { .init_key_from_rec = rcbagbt_init_key_from_rec, .init_rec_from_cur = rcbagbt_init_rec_from_cur, .init_ptr_from_cur = xfbtree_init_ptr_from_cur, - .key_diff = rcbagbt_key_diff, + .cmp_key_with_cur = rcbagbt_cmp_key_with_cur, .buf_ops = &rcbagbt_mem_buf_ops, - .diff_two_keys = rcbagbt_diff_two_keys, + .cmp_two_keys = rcbagbt_cmp_two_keys, .keys_inorder = rcbagbt_keys_inorder, .recs_inorder = rcbagbt_recs_inorder, }; diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index f8f9ed30f56b..d00c18954a26 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -1269,42 +1269,6 @@ xrep_setup_xfbtree( } /* - * Create a dummy transaction for use in a live update hook function. This - * function MUST NOT be called from regular repair code because the current - * process' transaction is saved via the cookie. - */ -int -xrep_trans_alloc_hook_dummy( - struct xfs_mount *mp, - void **cookiep, - struct xfs_trans **tpp) -{ - int error; - - *cookiep = current->journal_info; - current->journal_info = NULL; - - error = xfs_trans_alloc_empty(mp, tpp); - if (!error) - return 0; - - current->journal_info = *cookiep; - *cookiep = NULL; - return error; -} - -/* Cancel a dummy transaction used by a live update hook function. */ -void -xrep_trans_cancel_hook_dummy( - void **cookiep, - struct xfs_trans *tp) -{ - xfs_trans_cancel(tp); - current->journal_info = *cookiep; - *cookiep = NULL; -} - -/* * See if this buffer can pass the given ->verify_struct() function. * * If the buffer already has ops attached and they're not the ones that were diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index af0a3a9e5ed9..9c04295742c8 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -180,10 +180,6 @@ int xrep_quotacheck(struct xfs_scrub *sc); int xrep_reinit_pagf(struct xfs_scrub *sc); int xrep_reinit_pagi(struct xfs_scrub *sc); -int xrep_trans_alloc_hook_dummy(struct xfs_mount *mp, void **cookiep, - struct xfs_trans **tpp); -void xrep_trans_cancel_hook_dummy(void **cookiep, struct xfs_trans *tp); - bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops); void xrep_inode_set_nblocks(struct xfs_scrub *sc, int64_t new_blocks); int xrep_reset_metafile_resv(struct xfs_scrub *sc); diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index f5f73078ffe2..17d4a38d735c 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -951,9 +951,7 @@ end_agscan: sa->agf_bp = NULL; sa->agi_bp = NULL; xchk_trans_cancel(sc); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); /* Iterate all AGs for inodes rmaps. */ while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) { @@ -1612,7 +1610,6 @@ xrep_rmapbt_live_update( struct xfs_mount *mp; struct xfs_btree_cur *mcur; struct xfs_trans *tp; - void *txcookie; int error; rr = container_of(nb, struct xrep_rmap, rhook.rmap_hook.nb); @@ -1623,9 +1620,7 @@ xrep_rmapbt_live_update( trace_xrep_rmap_live_update(pag_group(rr->sc->sa.pag), action, p); - error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp); - if (error) - goto out_abort; + tp = xfs_trans_alloc_empty(mp); mutex_lock(&rr->lock); mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, &rr->rmap_btree); @@ -1639,14 +1634,13 @@ xrep_rmapbt_live_update( if (error) goto out_cancel; - xrep_trans_cancel_hook_dummy(&txcookie, tp); + xfs_trans_cancel(tp); mutex_unlock(&rr->lock); return NOTIFY_DONE; out_cancel: xfbtree_trans_cancel(&rr->rmap_btree, tp); - xrep_trans_cancel_hook_dummy(&txcookie, tp); -out_abort: + xfs_trans_cancel(tp); mutex_unlock(&rr->lock); xchk_iscan_abort(&rr->iscan); out_unlock: diff --git a/fs/xfs/scrub/rtrmap_repair.c b/fs/xfs/scrub/rtrmap_repair.c index fc2592c53af5..7561941a337a 100644 --- a/fs/xfs/scrub/rtrmap_repair.c +++ b/fs/xfs/scrub/rtrmap_repair.c @@ -580,9 +580,7 @@ xrep_rtrmap_find_rmaps( */ xchk_trans_cancel(sc); xchk_rtgroup_unlock(&sc->sr); - error = xchk_trans_alloc_empty(sc); - if (error) - return error; + xchk_trans_alloc_empty(sc); while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) { error = xrep_rtrmap_scan_inode(rr, ip); @@ -846,7 +844,6 @@ xrep_rtrmapbt_live_update( struct xfs_mount *mp; struct xfs_btree_cur *mcur; struct xfs_trans *tp; - void *txcookie; int error; rr = container_of(nb, struct xrep_rtrmap, rhook.rmap_hook.nb); @@ -857,9 +854,7 @@ xrep_rtrmapbt_live_update( trace_xrep_rmap_live_update(rtg_group(rr->sc->sr.rtg), action, p); - error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp); - if (error) - goto out_abort; + tp = xfs_trans_alloc_empty(mp); mutex_lock(&rr->lock); mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, tp, &rr->rtrmap_btree); @@ -873,14 +868,13 @@ xrep_rtrmapbt_live_update( if (error) goto out_cancel; - xrep_trans_cancel_hook_dummy(&txcookie, tp); + xfs_trans_cancel(tp); mutex_unlock(&rr->lock); return NOTIFY_DONE; out_cancel: xfbtree_trans_cancel(&rr->rtrmap_btree, tp); - xrep_trans_cancel_hook_dummy(&txcookie, tp); -out_abort: + xfs_trans_cancel(tp); xchk_iscan_abort(&rr->iscan); mutex_unlock(&rr->lock); out_unlock: diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 76e24032e99a..3c3b0d25006f 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -876,10 +876,7 @@ xchk_scrubv_open_by_handle( struct xfs_inode *ip; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return NULL; - + tp = xfs_trans_alloc_empty(mp); error = xfs_iget(mp, tp, head->svh_ino, XCHK_IGET_FLAGS, 0, &ip); xfs_trans_cancel(tp); if (error) diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index d7c4ced47c15..1e6e9c10cea2 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -2996,7 +2996,7 @@ DEFINE_EVENT(xrep_pptr_salvage_class, name, \ DEFINE_XREP_PPTR_SALVAGE_EVENT(xrep_xattr_salvage_pptr); DEFINE_XREP_PPTR_SALVAGE_EVENT(xrep_xattr_insert_pptr); -TRACE_EVENT(xrep_xattr_class, +DECLARE_EVENT_CLASS(xrep_xattr_class, TP_PROTO(struct xfs_inode *ip, struct xfs_inode *arg_ip), TP_ARGS(ip, arg_ip), TP_STRUCT__entry( diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 63151feb9c3f..1ee4f835ac3c 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -234,6 +234,47 @@ xfs_end_bio( } /* + * We cannot cancel the ioend directly on error. We may have already set other + * pages under writeback and hence we have to run I/O completion to mark the + * error state of the pages under writeback appropriately. + * + * If the folio has delalloc blocks on it, the caller is asking us to punch them + * out. If we don't, we can leave a stale delalloc mapping covered by a clean + * page that needs to be dirtied again before the delalloc mapping can be + * converted. This stale delalloc mapping can trip up a later direct I/O read + * operation on the same region. + * + * We prevent this by truncating away the delalloc regions on the folio. Because + * they are delalloc, we can do this without needing a transaction. Indeed - if + * we get ENOSPC errors, we have to be able to do this truncation without a + * transaction as there is no space left for block reservation (typically why + * we see a ENOSPC in writeback). + */ +static void +xfs_discard_folio( + struct folio *folio, + loff_t pos) +{ + struct xfs_inode *ip = XFS_I(folio->mapping->host); + struct xfs_mount *mp = ip->i_mount; + + if (xfs_is_shutdown(mp)) + return; + + xfs_alert_ratelimited(mp, + "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.", + folio, ip->i_ino, pos); + + /* + * The end of the punch range is always the offset of the first + * byte of the next folio. Hence the end offset is only dependent on the + * folio itself and not the start offset that is passed in. + */ + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, + folio_pos(folio) + folio_size(folio), NULL); +} + +/* * Fast revalidation of the cached writeback mapping. Return true if the current * mapping is valid, false otherwise. */ @@ -278,13 +319,12 @@ xfs_imap_valid( static int xfs_map_blocks( struct iomap_writepage_ctx *wpc, - struct inode *inode, loff_t offset, unsigned int len) { - struct xfs_inode *ip = XFS_I(inode); + struct xfs_inode *ip = XFS_I(wpc->inode); struct xfs_mount *mp = ip->i_mount; - ssize_t count = i_blocksize(inode); + ssize_t count = i_blocksize(wpc->inode); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); xfs_fileoff_t cow_fsb; @@ -436,6 +476,24 @@ allocate_blocks: return 0; } +static ssize_t +xfs_writeback_range( + struct iomap_writepage_ctx *wpc, + struct folio *folio, + u64 offset, + unsigned int len, + u64 end_pos) +{ + ssize_t ret; + + ret = xfs_map_blocks(wpc, offset, len); + if (!ret) + ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len); + if (ret < 0) + xfs_discard_folio(folio, offset); + return ret; +} + static bool xfs_ioend_needs_wq_completion( struct iomap_ioend *ioend) @@ -456,79 +514,40 @@ xfs_ioend_needs_wq_completion( } static int -xfs_submit_ioend( - struct iomap_writepage_ctx *wpc, - int status) +xfs_writeback_submit( + struct iomap_writepage_ctx *wpc, + int error) { - struct iomap_ioend *ioend = wpc->ioend; - unsigned int nofs_flag; + struct iomap_ioend *ioend = wpc->wb_ctx; /* - * We can allocate memory here while doing writeback on behalf of - * memory reclaim. To avoid memory allocation deadlocks set the - * task-wide nofs context for the following operations. + * Convert CoW extents to regular. + * + * We can allocate memory here while doing writeback on behalf of memory + * reclaim. To avoid memory allocation deadlocks, set the task-wide + * nofs context. */ - nofs_flag = memalloc_nofs_save(); + if (!error && (ioend->io_flags & IOMAP_IOEND_SHARED)) { + unsigned int nofs_flag; - /* Convert CoW extents to regular */ - if (!status && (ioend->io_flags & IOMAP_IOEND_SHARED)) { - status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), + nofs_flag = memalloc_nofs_save(); + error = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), ioend->io_offset, ioend->io_size); + memalloc_nofs_restore(nofs_flag); } - memalloc_nofs_restore(nofs_flag); - - /* send ioends that might require a transaction to the completion wq */ + /* + * Send ioends that might require a transaction to the completion wq. + */ if (xfs_ioend_needs_wq_completion(ioend)) ioend->io_bio.bi_end_io = xfs_end_bio; - if (status) - return status; - submit_bio(&ioend->io_bio); - return 0; -} - -/* - * If the folio has delalloc blocks on it, the caller is asking us to punch them - * out. If we don't, we can leave a stale delalloc mapping covered by a clean - * page that needs to be dirtied again before the delalloc mapping can be - * converted. This stale delalloc mapping can trip up a later direct I/O read - * operation on the same region. - * - * We prevent this by truncating away the delalloc regions on the folio. Because - * they are delalloc, we can do this without needing a transaction. Indeed - if - * we get ENOSPC errors, we have to be able to do this truncation without a - * transaction as there is no space left for block reservation (typically why - * we see a ENOSPC in writeback). - */ -static void -xfs_discard_folio( - struct folio *folio, - loff_t pos) -{ - struct xfs_inode *ip = XFS_I(folio->mapping->host); - struct xfs_mount *mp = ip->i_mount; - - if (xfs_is_shutdown(mp)) - return; - - xfs_alert_ratelimited(mp, - "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.", - folio, ip->i_ino, pos); - - /* - * The end of the punch range is always the offset of the first - * byte of the next folio. Hence the end offset is only dependent on the - * folio itself and not the start offset that is passed in. - */ - xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, - folio_pos(folio) + folio_size(folio), NULL); + return iomap_ioend_writeback_submit(wpc, error); } static const struct iomap_writeback_ops xfs_writeback_ops = { - .map_blocks = xfs_map_blocks, - .submit_ioend = xfs_submit_ioend, - .discard_folio = xfs_discard_folio, + .writeback_range = xfs_writeback_range, + .writeback_submit = xfs_writeback_submit, }; struct xfs_zoned_writepage_ctx { @@ -545,11 +564,10 @@ XFS_ZWPC(struct iomap_writepage_ctx *ctx) static int xfs_zoned_map_blocks( struct iomap_writepage_ctx *wpc, - struct inode *inode, loff_t offset, unsigned int len) { - struct xfs_inode *ip = XFS_I(inode); + struct xfs_inode *ip = XFS_I(wpc->inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + len); @@ -608,22 +626,44 @@ xfs_zoned_map_blocks( return 0; } -static int -xfs_zoned_submit_ioend( +static ssize_t +xfs_zoned_writeback_range( struct iomap_writepage_ctx *wpc, - int status) + struct folio *folio, + u64 offset, + unsigned int len, + u64 end_pos) { - wpc->ioend->io_bio.bi_end_io = xfs_end_bio; - if (status) - return status; - xfs_zone_alloc_and_submit(wpc->ioend, &XFS_ZWPC(wpc)->open_zone); + ssize_t ret; + + ret = xfs_zoned_map_blocks(wpc, offset, len); + if (!ret) + ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len); + if (ret < 0) + xfs_discard_folio(folio, offset); + return ret; +} + +static int +xfs_zoned_writeback_submit( + struct iomap_writepage_ctx *wpc, + int error) +{ + struct iomap_ioend *ioend = wpc->wb_ctx; + + ioend->io_bio.bi_end_io = xfs_end_bio; + if (error) { + ioend->io_bio.bi_status = errno_to_blk_status(error); + bio_endio(&ioend->io_bio); + return error; + } + xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone); return 0; } static const struct iomap_writeback_ops xfs_zoned_writeback_ops = { - .map_blocks = xfs_zoned_map_blocks, - .submit_ioend = xfs_zoned_submit_ioend, - .discard_folio = xfs_discard_folio, + .writeback_range = xfs_zoned_writeback_range, + .writeback_submit = xfs_zoned_writeback_submit, }; STATIC int @@ -636,19 +676,29 @@ xfs_vm_writepages( xfs_iflags_clear(ip, XFS_ITRUNCATED); if (xfs_is_zoned_inode(ip)) { - struct xfs_zoned_writepage_ctx xc = { }; + struct xfs_zoned_writepage_ctx xc = { + .ctx = { + .inode = mapping->host, + .wbc = wbc, + .ops = &xfs_zoned_writeback_ops + }, + }; int error; - error = iomap_writepages(mapping, wbc, &xc.ctx, - &xfs_zoned_writeback_ops); + error = iomap_writepages(&xc.ctx); if (xc.open_zone) xfs_open_zone_put(xc.open_zone); return error; } else { - struct xfs_writepage_ctx wpc = { }; - - return iomap_writepages(mapping, wbc, &wpc.ctx, - &xfs_writeback_ops); + struct xfs_writepage_ctx wpc = { + .ctx = { + .inode = mapping->host, + .wbc = wbc, + .ops = &xfs_writeback_ops + }, + }; + + return iomap_writepages(&wpc.ctx); } } diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c index f683b7a9323f..5eef3bc30bda 100644 --- a/fs/xfs/xfs_attr_item.c +++ b/fs/xfs/xfs_attr_item.c @@ -91,41 +91,37 @@ xfs_attri_log_nameval_alloc( name_len + new_name_len + value_len + new_value_len); - nv->name.i_addr = nv + 1; - nv->name.i_len = name_len; - nv->name.i_type = XLOG_REG_TYPE_ATTR_NAME; - memcpy(nv->name.i_addr, name, name_len); + nv->name.iov_base = nv + 1; + nv->name.iov_len = name_len; + memcpy(nv->name.iov_base, name, name_len); if (new_name_len) { - nv->new_name.i_addr = nv->name.i_addr + name_len; - nv->new_name.i_len = new_name_len; - memcpy(nv->new_name.i_addr, new_name, new_name_len); + nv->new_name.iov_base = nv->name.iov_base + name_len; + nv->new_name.iov_len = new_name_len; + memcpy(nv->new_name.iov_base, new_name, new_name_len); } else { - nv->new_name.i_addr = NULL; - nv->new_name.i_len = 0; + nv->new_name.iov_base = NULL; + nv->new_name.iov_len = 0; } - nv->new_name.i_type = XLOG_REG_TYPE_ATTR_NEWNAME; if (value_len) { - nv->value.i_addr = nv->name.i_addr + name_len + new_name_len; - nv->value.i_len = value_len; - memcpy(nv->value.i_addr, value, value_len); + nv->value.iov_base = nv->name.iov_base + name_len + new_name_len; + nv->value.iov_len = value_len; + memcpy(nv->value.iov_base, value, value_len); } else { - nv->value.i_addr = NULL; - nv->value.i_len = 0; + nv->value.iov_base = NULL; + nv->value.iov_len = 0; } - nv->value.i_type = XLOG_REG_TYPE_ATTR_VALUE; if (new_value_len) { - nv->new_value.i_addr = nv->name.i_addr + name_len + + nv->new_value.iov_base = nv->name.iov_base + name_len + new_name_len + value_len; - nv->new_value.i_len = new_value_len; - memcpy(nv->new_value.i_addr, new_value, new_value_len); + nv->new_value.iov_len = new_value_len; + memcpy(nv->new_value.iov_base, new_value, new_value_len); } else { - nv->new_value.i_addr = NULL; - nv->new_value.i_len = 0; + nv->new_value.iov_base = NULL; + nv->new_value.iov_len = 0; } - nv->new_value.i_type = XLOG_REG_TYPE_ATTR_NEWVALUE; refcount_set(&nv->refcount, 1); return nv; @@ -170,21 +166,21 @@ xfs_attri_item_size( *nvecs += 2; *nbytes += sizeof(struct xfs_attri_log_format) + - xlog_calc_iovec_len(nv->name.i_len); + xlog_calc_iovec_len(nv->name.iov_len); - if (nv->new_name.i_len) { + if (nv->new_name.iov_len) { *nvecs += 1; - *nbytes += xlog_calc_iovec_len(nv->new_name.i_len); + *nbytes += xlog_calc_iovec_len(nv->new_name.iov_len); } - if (nv->value.i_len) { + if (nv->value.iov_len) { *nvecs += 1; - *nbytes += xlog_calc_iovec_len(nv->value.i_len); + *nbytes += xlog_calc_iovec_len(nv->value.iov_len); } - if (nv->new_value.i_len) { + if (nv->new_value.iov_len) { *nvecs += 1; - *nbytes += xlog_calc_iovec_len(nv->new_value.i_len); + *nbytes += xlog_calc_iovec_len(nv->new_value.iov_len); } } @@ -212,31 +208,36 @@ xfs_attri_item_format( * the log recovery. */ - ASSERT(nv->name.i_len > 0); + ASSERT(nv->name.iov_len > 0); attrip->attri_format.alfi_size++; - if (nv->new_name.i_len > 0) + if (nv->new_name.iov_len > 0) attrip->attri_format.alfi_size++; - if (nv->value.i_len > 0) + if (nv->value.iov_len > 0) attrip->attri_format.alfi_size++; - if (nv->new_value.i_len > 0) + if (nv->new_value.iov_len > 0) attrip->attri_format.alfi_size++; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTRI_FORMAT, &attrip->attri_format, sizeof(struct xfs_attri_log_format)); - xlog_copy_from_iovec(lv, &vecp, &nv->name); - if (nv->new_name.i_len > 0) - xlog_copy_from_iovec(lv, &vecp, &nv->new_name); + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_NAME, nv->name.iov_base, + nv->name.iov_len); - if (nv->value.i_len > 0) - xlog_copy_from_iovec(lv, &vecp, &nv->value); + if (nv->new_name.iov_len > 0) + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_NEWNAME, + nv->new_name.iov_base, nv->new_name.iov_len); - if (nv->new_value.i_len > 0) - xlog_copy_from_iovec(lv, &vecp, &nv->new_value); + if (nv->value.iov_len > 0) + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_VALUE, + nv->value.iov_base, nv->value.iov_len); + + if (nv->new_value.iov_len > 0) + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_NEWVALUE, + nv->new_value.iov_base, nv->new_value.iov_len); } /* @@ -383,22 +384,22 @@ xfs_attr_log_item( attrp->alfi_ino = args->dp->i_ino; ASSERT(!(attr->xattri_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK)); attrp->alfi_op_flags = attr->xattri_op_flags; - attrp->alfi_value_len = nv->value.i_len; + attrp->alfi_value_len = nv->value.iov_len; switch (xfs_attr_log_item_op(attrp)) { case XFS_ATTRI_OP_FLAGS_PPTR_REPLACE: - ASSERT(nv->value.i_len == nv->new_value.i_len); + ASSERT(nv->value.iov_len == nv->new_value.iov_len); attrp->alfi_igen = VFS_I(args->dp)->i_generation; - attrp->alfi_old_name_len = nv->name.i_len; - attrp->alfi_new_name_len = nv->new_name.i_len; + attrp->alfi_old_name_len = nv->name.iov_len; + attrp->alfi_new_name_len = nv->new_name.iov_len; break; case XFS_ATTRI_OP_FLAGS_PPTR_REMOVE: case XFS_ATTRI_OP_FLAGS_PPTR_SET: attrp->alfi_igen = VFS_I(args->dp)->i_generation; fallthrough; default: - attrp->alfi_name_len = nv->name.i_len; + attrp->alfi_name_len = nv->name.iov_len; break; } @@ -616,10 +617,7 @@ xfs_attri_iread_extents( struct xfs_trans *tp; int error; - error = xfs_trans_alloc_empty(ip->i_mount, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(ip->i_mount); xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_iread_extents(tp, ip, XFS_ATTR_FORK); xfs_iunlock(ip, XFS_ILOCK_EXCL); @@ -690,14 +688,14 @@ xfs_attri_recover_work( args->dp = ip; args->geo = mp->m_attr_geo; args->whichfork = XFS_ATTR_FORK; - args->name = nv->name.i_addr; - args->namelen = nv->name.i_len; - args->new_name = nv->new_name.i_addr; - args->new_namelen = nv->new_name.i_len; - args->value = nv->value.i_addr; - args->valuelen = nv->value.i_len; - args->new_value = nv->new_value.i_addr; - args->new_valuelen = nv->new_value.i_len; + args->name = nv->name.iov_base; + args->namelen = nv->name.iov_len; + args->new_name = nv->new_name.iov_base; + args->new_namelen = nv->new_name.iov_len; + args->value = nv->value.iov_base; + args->valuelen = nv->value.iov_len; + args->new_value = nv->new_value.iov_base; + args->new_valuelen = nv->new_value.iov_len; args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK; args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT | XFS_DA_OP_LOGGED; @@ -754,8 +752,8 @@ xfs_attr_recover_work( */ attrp = &attrip->attri_format; if (!xfs_attri_validate(mp, attrp) || - !xfs_attr_namecheck(attrp->alfi_attr_filter, nv->name.i_addr, - nv->name.i_len)) + !xfs_attr_namecheck(attrp->alfi_attr_filter, nv->name.iov_base, + nv->name.iov_len)) return -EFSCORRUPTED; attr = xfs_attri_recover_work(mp, dfp, attrp, &ip, nv); @@ -953,50 +951,50 @@ static inline void * xfs_attri_validate_name_iovec( struct xfs_mount *mp, struct xfs_attri_log_format *attri_formatp, - const struct xfs_log_iovec *iovec, + const struct kvec *iovec, unsigned int name_len) { - if (iovec->i_len != xlog_calc_iovec_len(name_len)) { + if (iovec->iov_len != xlog_calc_iovec_len(name_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attri_formatp, sizeof(*attri_formatp)); return NULL; } - if (!xfs_attr_namecheck(attri_formatp->alfi_attr_filter, iovec->i_addr, + if (!xfs_attr_namecheck(attri_formatp->alfi_attr_filter, iovec->iov_base, name_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attri_formatp, sizeof(*attri_formatp)); XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - iovec->i_addr, iovec->i_len); + iovec->iov_base, iovec->iov_len); return NULL; } - return iovec->i_addr; + return iovec->iov_base; } static inline void * xfs_attri_validate_value_iovec( struct xfs_mount *mp, struct xfs_attri_log_format *attri_formatp, - const struct xfs_log_iovec *iovec, + const struct kvec *iovec, unsigned int value_len) { - if (iovec->i_len != xlog_calc_iovec_len(value_len)) { + if (iovec->iov_len != xlog_calc_iovec_len(value_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attri_formatp, sizeof(*attri_formatp)); return NULL; } if ((attri_formatp->alfi_attr_filter & XFS_ATTR_PARENT) && - !xfs_parent_valuecheck(mp, iovec->i_addr, value_len)) { + !xfs_parent_valuecheck(mp, iovec->iov_base, value_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attri_formatp, sizeof(*attri_formatp)); XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - iovec->i_addr, iovec->i_len); + iovec->iov_base, iovec->iov_len); return NULL; } - return iovec->i_addr; + return iovec->iov_base; } STATIC int @@ -1023,13 +1021,13 @@ xlog_recover_attri_commit_pass2( /* Validate xfs_attri_log_format before the large memory allocation */ len = sizeof(struct xfs_attri_log_format); - if (item->ri_buf[i].i_len != len) { + if (item->ri_buf[i].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } - attri_formatp = item->ri_buf[i].i_addr; + attri_formatp = item->ri_buf[i].iov_base; if (!xfs_attri_validate(mp, attri_formatp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, attri_formatp, len); @@ -1218,10 +1216,10 @@ xlog_recover_attrd_commit_pass2( { struct xfs_attrd_log_format *attrd_formatp; - attrd_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_attrd_log_format)) { + attrd_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_attrd_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_attr_item.h b/fs/xfs/xfs_attr_item.h index e74128cbb722..d108a11b55ae 100644 --- a/fs/xfs/xfs_attr_item.h +++ b/fs/xfs/xfs_attr_item.h @@ -12,10 +12,10 @@ struct xfs_mount; struct kmem_zone; struct xfs_attri_log_nameval { - struct xfs_log_iovec name; - struct xfs_log_iovec new_name; /* PPTR_REPLACE only */ - struct xfs_log_iovec value; - struct xfs_log_iovec new_value; /* PPTR_REPLACE only */ + struct kvec name; + struct kvec new_name; /* PPTR_REPLACE only */ + struct kvec value; + struct kvec new_value; /* PPTR_REPLACE only */ refcount_t refcount; /* name and value follow the end of this struct */ diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 646c515ee355..80f0c4bcc483 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -654,24 +654,24 @@ xlog_recover_bui_commit_pass2( struct xfs_bui_log_format *bui_formatp; size_t len; - bui_formatp = item->ri_buf[0].i_addr; + bui_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_bui_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_bui_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } len = xfs_bui_log_format_sizeof(bui_formatp->bui_nextents); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -705,10 +705,10 @@ xlog_recover_bud_commit_pass2( { struct xfs_bud_log_format *bud_formatp; - bud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) { + bud_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_bud_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 7fc54725c5f6..8d85b5eee444 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -90,16 +90,16 @@ xfs_buf_item_relse( /* Is this log iovec plausibly large enough to contain the buffer log format? */ bool xfs_buf_log_check_iovec( - struct xfs_log_iovec *iovec) + struct kvec *iovec) { - struct xfs_buf_log_format *blfp = iovec->i_addr; + struct xfs_buf_log_format *blfp = iovec->iov_base; char *bmp_end; char *item_end; - if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len) + if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->iov_len) return false; - item_end = (char *)iovec->i_addr + iovec->i_len; + item_end = (char *)iovec->iov_base + iovec->iov_len; bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size]; return bmp_end <= item_end; } diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 416890b84f8c..3159325dd17b 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -61,7 +61,7 @@ static inline void xfs_buf_dquot_iodone(struct xfs_buf *bp) } #endif /* CONFIG_XFS_QUOTA */ void xfs_buf_iodone(struct xfs_buf *); -bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec); +bool xfs_buf_log_check_iovec(struct kvec *iovec); unsigned int xfs_buf_inval_log_space(unsigned int map_count, unsigned int blocksize); diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index d4c5cef5bc43..5d58e2ae4972 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -159,7 +159,7 @@ STATIC enum xlog_recover_reorder xlog_recover_buf_reorder( struct xlog_recover_item *item) { - struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + struct xfs_buf_log_format *buf_f = item->ri_buf[0].iov_base; if (buf_f->blf_flags & XFS_BLF_CANCEL) return XLOG_REORDER_CANCEL_LIST; @@ -173,7 +173,7 @@ xlog_recover_buf_ra_pass2( struct xlog *log, struct xlog_recover_item *item) { - struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + struct xfs_buf_log_format *buf_f = item->ri_buf[0].iov_base; xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL); } @@ -187,11 +187,11 @@ xlog_recover_buf_commit_pass1( struct xlog *log, struct xlog_recover_item *item) { - struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr; + struct xfs_buf_log_format *bf = item->ri_buf[0].iov_base; if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) { - xfs_err(log->l_mp, "bad buffer log item size (%d)", - item->ri_buf[0].i_len); + xfs_err(log->l_mp, "bad buffer log item size (%zd)", + item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -487,8 +487,8 @@ xlog_recover_do_reg_buffer( nbits = xfs_contig_bits(buf_f->blf_data_map, buf_f->blf_map_size, bit); ASSERT(nbits > 0); - ASSERT(item->ri_buf[i].i_addr != NULL); - ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); + ASSERT(item->ri_buf[i].iov_base != NULL); + ASSERT(item->ri_buf[i].iov_len % XFS_BLF_CHUNK == 0); ASSERT(BBTOB(bp->b_length) >= ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); @@ -500,8 +500,8 @@ xlog_recover_do_reg_buffer( * the log. Hence we need to trim nbits back to the length of * the current region being copied out of the log. */ - if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) - nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; + if (item->ri_buf[i].iov_len < (nbits << XFS_BLF_SHIFT)) + nbits = item->ri_buf[i].iov_len >> XFS_BLF_SHIFT; /* * Do a sanity check if this is a dquot buffer. Just checking @@ -511,18 +511,18 @@ xlog_recover_do_reg_buffer( fa = NULL; if (buf_f->blf_flags & (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { - if (item->ri_buf[i].i_addr == NULL) { + if (item->ri_buf[i].iov_base == NULL) { xfs_alert(mp, "XFS: NULL dquot in %s.", __func__); goto next; } - if (item->ri_buf[i].i_len < size_disk_dquot) { + if (item->ri_buf[i].iov_len < size_disk_dquot) { xfs_alert(mp, - "XFS: dquot too small (%d) in %s.", - item->ri_buf[i].i_len, __func__); + "XFS: dquot too small (%zd) in %s.", + item->ri_buf[i].iov_len, __func__); goto next; } - fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1); + fa = xfs_dquot_verify(mp, item->ri_buf[i].iov_base, -1); if (fa) { xfs_alert(mp, "dquot corrupt at %pS trying to replay into block 0x%llx", @@ -533,7 +533,7 @@ xlog_recover_do_reg_buffer( memcpy(xfs_buf_offset(bp, (uint)bit << XFS_BLF_SHIFT), /* dest */ - item->ri_buf[i].i_addr, /* source */ + item->ri_buf[i].iov_base, /* source */ nbits<<XFS_BLF_SHIFT); /* length */ next: i++; @@ -669,8 +669,8 @@ xlog_recover_do_inode_buffer( if (next_unlinked_offset < reg_buf_offset) continue; - ASSERT(item->ri_buf[item_index].i_addr != NULL); - ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); + ASSERT(item->ri_buf[item_index].iov_base != NULL); + ASSERT((item->ri_buf[item_index].iov_len % XFS_BLF_CHUNK) == 0); ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length)); /* @@ -678,7 +678,7 @@ xlog_recover_do_inode_buffer( * current di_next_unlinked field. Extract its value * and copy it to the buffer copy. */ - logged_nextp = item->ri_buf[item_index].i_addr + + logged_nextp = item->ri_buf[item_index].iov_base + next_unlinked_offset - reg_buf_offset; if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) { xfs_alert(mp, @@ -1002,7 +1002,7 @@ xlog_recover_buf_commit_pass2( struct xlog_recover_item *item, xfs_lsn_t current_lsn) { - struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + struct xfs_buf_log_format *buf_f = item->ri_buf[0].iov_base; struct xfs_mount *mp = log->l_mp; struct xfs_buf *bp; int error; diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 603d51365645..ee49f20875af 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -189,9 +189,7 @@ xfs_trim_gather_extents( */ xfs_log_force(mp, XFS_LOG_SYNC); - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(mp); error = xfs_alloc_read_agf(pag, tp, 0, &agbp); if (error) @@ -583,9 +581,7 @@ xfs_trim_rtextents( struct xfs_trans *tp; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(mp); /* * Walk the free ranges between low and high. The query_range function @@ -701,9 +697,7 @@ xfs_trim_rtgroup_extents( struct xfs_trans *tp; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(mp); /* * Walk the free ranges between low and high. The query_range function diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c index 2c2720ce6923..89bc9bcaf51e 100644 --- a/fs/xfs/xfs_dquot_item_recover.c +++ b/fs/xfs/xfs_dquot_item_recover.c @@ -34,10 +34,10 @@ xlog_recover_dquot_ra_pass2( if (mp->m_qflags == 0) return; - recddq = item->ri_buf[1].i_addr; + recddq = item->ri_buf[1].iov_base; if (recddq == NULL) return; - if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) + if (item->ri_buf[1].iov_len < sizeof(struct xfs_disk_dquot)) return; type = recddq->d_type & XFS_DQTYPE_REC_MASK; @@ -45,7 +45,7 @@ xlog_recover_dquot_ra_pass2( if (log->l_quotaoffs_flag & type) return; - dq_f = item->ri_buf[0].i_addr; + dq_f = item->ri_buf[0].iov_base; ASSERT(dq_f); ASSERT(dq_f->qlf_len == 1); @@ -79,14 +79,14 @@ xlog_recover_dquot_commit_pass2( if (mp->m_qflags == 0) return 0; - recddq = item->ri_buf[1].i_addr; + recddq = item->ri_buf[1].iov_base; if (recddq == NULL) { xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); return -EFSCORRUPTED; } - if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) { - xfs_alert(log->l_mp, "dquot too small (%d) in %s.", - item->ri_buf[1].i_len, __func__); + if (item->ri_buf[1].iov_len < sizeof(struct xfs_disk_dquot)) { + xfs_alert(log->l_mp, "dquot too small (%zd) in %s.", + item->ri_buf[1].iov_len, __func__); return -EFSCORRUPTED; } @@ -108,7 +108,7 @@ xlog_recover_dquot_commit_pass2( * The other possibility, of course, is that the quota subsystem was * removed since the last mount - ENOSYS. */ - dq_f = item->ri_buf[0].i_addr; + dq_f = item->ri_buf[0].iov_base; ASSERT(dq_f); fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id); if (fa) { @@ -147,7 +147,7 @@ xlog_recover_dquot_commit_pass2( } } - memcpy(ddq, recddq, item->ri_buf[1].i_len); + memcpy(ddq, recddq, item->ri_buf[1].iov_len); if (xfs_has_crc(mp)) { xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); @@ -192,7 +192,7 @@ xlog_recover_quotaoff_commit_pass1( struct xlog *log, struct xlog_recover_item *item) { - struct xfs_qoff_logformat *qoff_f = item->ri_buf[0].i_addr; + struct xfs_qoff_logformat *qoff_f = item->ri_buf[0].iov_base; ASSERT(qoff_f); /* diff --git a/fs/xfs/xfs_exchmaps_item.c b/fs/xfs/xfs_exchmaps_item.c index 264a121c5e16..229cbe0adf17 100644 --- a/fs/xfs/xfs_exchmaps_item.c +++ b/fs/xfs/xfs_exchmaps_item.c @@ -558,12 +558,12 @@ xlog_recover_xmi_commit_pass2( size_t len; len = sizeof(struct xfs_xmi_log_format); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); return -EFSCORRUPTED; } - xmi_formatp = item->ri_buf[0].i_addr; + xmi_formatp = item->ri_buf[0].iov_base; if (xmi_formatp->__pad != 0) { XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); return -EFSCORRUPTED; @@ -598,8 +598,8 @@ xlog_recover_xmd_commit_pass2( { struct xfs_xmd_log_format *xmd_formatp; - xmd_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_xmd_log_format)) { + xmd_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_xmd_log_format)) { XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index d574f5f639fa..47ee598a9827 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -182,15 +182,18 @@ xfs_efi_init( * It will handle the conversion of formats if necessary. */ STATIC int -xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) +xfs_efi_copy_format( + struct kvec *buf, + struct xfs_efi_log_format *dst_efi_fmt) { - xfs_efi_log_format_t *src_efi_fmt = buf->i_addr; - uint i; - uint len = xfs_efi_log_format_sizeof(src_efi_fmt->efi_nextents); - uint len32 = xfs_efi_log_format32_sizeof(src_efi_fmt->efi_nextents); - uint len64 = xfs_efi_log_format64_sizeof(src_efi_fmt->efi_nextents); + struct xfs_efi_log_format *src_efi_fmt = buf->iov_base; + uint len, len32, len64, i; - if (buf->i_len == len) { + len = xfs_efi_log_format_sizeof(src_efi_fmt->efi_nextents); + len32 = xfs_efi_log_format32_sizeof(src_efi_fmt->efi_nextents); + len64 = xfs_efi_log_format64_sizeof(src_efi_fmt->efi_nextents); + + if (buf->iov_len == len) { memcpy(dst_efi_fmt, src_efi_fmt, offsetof(struct xfs_efi_log_format, efi_extents)); for (i = 0; i < src_efi_fmt->efi_nextents; i++) @@ -198,8 +201,8 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) &src_efi_fmt->efi_extents[i], sizeof(struct xfs_extent)); return 0; - } else if (buf->i_len == len32) { - xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr; + } else if (buf->iov_len == len32) { + xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->iov_base; dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type; dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size; @@ -212,8 +215,8 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) src_efi_fmt_32->efi_extents[i].ext_len; } return 0; - } else if (buf->i_len == len64) { - xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->i_addr; + } else if (buf->iov_len == len64) { + xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->iov_base; dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type; dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size; @@ -227,8 +230,8 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) } return 0; } - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, NULL, buf->i_addr, - buf->i_len); + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, NULL, buf->iov_base, + buf->iov_len); return -EFSCORRUPTED; } @@ -865,11 +868,11 @@ xlog_recover_efi_commit_pass2( struct xfs_efi_log_format *efi_formatp; int error; - efi_formatp = item->ri_buf[0].i_addr; + efi_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_efi_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_efi_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -904,11 +907,11 @@ xlog_recover_rtefi_commit_pass2( struct xfs_efi_log_format *efi_formatp; int error; - efi_formatp = item->ri_buf[0].i_addr; + efi_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_efi_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_efi_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -933,7 +936,7 @@ xlog_recover_rtefi_commit_pass2( xfs_lsn_t lsn) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } #endif @@ -958,9 +961,9 @@ xlog_recover_efd_commit_pass2( xfs_lsn_t lsn) { struct xfs_efd_log_format *efd_formatp; - int buflen = item->ri_buf[0].i_len; + int buflen = item->ri_buf[0].iov_len; - efd_formatp = item->ri_buf[0].i_addr; + efd_formatp = item->ri_buf[0].iov_base; if (buflen < sizeof(struct xfs_efd_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, @@ -968,9 +971,9 @@ xlog_recover_efd_commit_pass2( return -EFSCORRUPTED; } - if (item->ri_buf[0].i_len != xfs_efd_log_format32_sizeof( + if (item->ri_buf[0].iov_len != xfs_efd_log_format32_sizeof( efd_formatp->efd_nextents) && - item->ri_buf[0].i_len != xfs_efd_log_format64_sizeof( + item->ri_buf[0].iov_len != xfs_efd_log_format64_sizeof( efd_formatp->efd_nextents)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, efd_formatp, buflen); @@ -995,9 +998,9 @@ xlog_recover_rtefd_commit_pass2( xfs_lsn_t lsn) { struct xfs_efd_log_format *efd_formatp; - int buflen = item->ri_buf[0].i_len; + int buflen = item->ri_buf[0].iov_len; - efd_formatp = item->ri_buf[0].i_addr; + efd_formatp = item->ri_buf[0].iov_base; if (buflen < sizeof(struct xfs_efd_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, @@ -1005,9 +1008,9 @@ xlog_recover_rtefd_commit_pass2( return -EFSCORRUPTED; } - if (item->ri_buf[0].i_len != xfs_efd_log_format32_sizeof( + if (item->ri_buf[0].iov_len != xfs_efd_log_format32_sizeof( efd_formatp->efd_nextents) && - item->ri_buf[0].i_len != xfs_efd_log_format64_sizeof( + item->ri_buf[0].iov_len != xfs_efd_log_format64_sizeof( efd_formatp->efd_nextents)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, efd_formatp, buflen); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 38e365b16348..b04c59d87378 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -497,7 +497,7 @@ restart: static ssize_t xfs_zoned_write_space_reserve( - struct xfs_inode *ip, + struct xfs_mount *mp, struct kiocb *iocb, struct iov_iter *from, unsigned int flags, @@ -533,8 +533,8 @@ xfs_zoned_write_space_reserve( * * Any remaining block will be returned after the write. */ - return xfs_zoned_space_reserve(ip, - XFS_B_TO_FSB(ip->i_mount, count) + 1 + 2, flags, ac); + return xfs_zoned_space_reserve(mp, XFS_B_TO_FSB(mp, count) + 1 + 2, + flags, ac); } static int @@ -718,13 +718,13 @@ xfs_file_dio_write_zoned( struct xfs_zone_alloc_ctx ac = { }; ssize_t ret; - ret = xfs_zoned_write_space_reserve(ip, iocb, from, 0, &ac); + ret = xfs_zoned_write_space_reserve(ip->i_mount, iocb, from, 0, &ac); if (ret < 0) return ret; ret = xfs_file_dio_write_aligned(ip, iocb, from, &xfs_zoned_direct_write_iomap_ops, &xfs_dio_zoned_write_ops, &ac); - xfs_zoned_space_unreserve(ip, &ac); + xfs_zoned_space_unreserve(ip->i_mount, &ac); return ret; } @@ -979,7 +979,8 @@ write_retry: trace_xfs_file_buffered_write(iocb, from); ret = iomap_file_buffered_write(iocb, from, - &xfs_buffered_write_iomap_ops, NULL); + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, + NULL); /* * If we hit a space limit, try to free up some lingering preallocated @@ -1032,7 +1033,7 @@ xfs_file_buffered_write_zoned( struct xfs_zone_alloc_ctx ac = { }; ssize_t ret; - ret = xfs_zoned_write_space_reserve(ip, iocb, from, XFS_ZR_GREEDY, &ac); + ret = xfs_zoned_write_space_reserve(mp, iocb, from, XFS_ZR_GREEDY, &ac); if (ret < 0) return ret; @@ -1059,7 +1060,8 @@ xfs_file_buffered_write_zoned( retry: trace_xfs_file_buffered_write(iocb, from); ret = iomap_file_buffered_write(iocb, from, - &xfs_buffered_write_iomap_ops, &ac); + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, + &ac); if (ret == -ENOSPC && !cleared_space) { /* * Kick off writeback to convert delalloc space and release the @@ -1073,7 +1075,7 @@ retry: out_unlock: xfs_iunlock(ip, iolock); out_unreserve: - xfs_zoned_space_unreserve(ip, &ac); + xfs_zoned_space_unreserve(ip->i_mount, &ac); if (ret > 0) { XFS_STATS_ADD(mp, xs_write_bytes, ret); ret = generic_write_sync(iocb, ret); @@ -1414,11 +1416,11 @@ xfs_file_zoned_fallocate( struct xfs_inode *ip = XFS_I(file_inode(file)); int error; - error = xfs_zoned_space_reserve(ip, 2, XFS_ZR_RESERVED, &ac); + error = xfs_zoned_space_reserve(ip->i_mount, 2, XFS_ZR_RESERVED, &ac); if (error) return error; error = __xfs_file_fallocate(file, mode, offset, len, &ac); - xfs_zoned_space_unreserve(ip, &ac); + xfs_zoned_space_unreserve(ip->i_mount, &ac); return error; } @@ -1828,12 +1830,12 @@ xfs_write_fault_zoned( * But as the overallocation is limited to less than a folio and will be * release instantly that's just fine. */ - error = xfs_zoned_space_reserve(ip, XFS_B_TO_FSB(ip->i_mount, len), 0, - &ac); + error = xfs_zoned_space_reserve(ip->i_mount, + XFS_B_TO_FSB(ip->i_mount, len), 0, &ac); if (error < 0) return vmf_fs_error(error); ret = __xfs_write_fault(vmf, order, &ac); - xfs_zoned_space_unreserve(ip, &ac); + xfs_zoned_space_unreserve(ip->i_mount, &ac); return ret; } @@ -1914,10 +1916,10 @@ static const struct vm_operations_struct xfs_file_vm_ops = { }; STATIC int -xfs_file_mmap( - struct file *file, - struct vm_area_struct *vma) +xfs_file_mmap_prepare( + struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode)); @@ -1925,13 +1927,14 @@ xfs_file_mmap( * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ - if (!daxdev_mapping_supported(vma, target->bt_daxdev)) + if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), + target->bt_daxdev)) return -EOPNOTSUPP; file_accessed(file); - vma->vm_ops = &xfs_file_vm_ops; + desc->vm_ops = &xfs_file_vm_ops; if (IS_DAX(inode)) - vm_flags_set(vma, VM_HUGEPAGE); + desc->vm_flags |= VM_HUGEPAGE; return 0; } @@ -1946,7 +1949,7 @@ const struct file_operations xfs_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = xfs_file_compat_ioctl, #endif - .mmap = xfs_file_mmap, + .mmap_prepare = xfs_file_mmap_prepare, .open = xfs_file_open, .release = xfs_file_release, .fsync = xfs_file_fsync, diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 414b27a86458..af68c7de8ee8 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -1270,9 +1270,7 @@ xfs_getfsmap( * buffer locking abilities to detect cycles in the rmapbt * without deadlocking. */ - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - break; + tp = xfs_trans_alloc_empty(mp); info.dev = handlers[i].dev; info.last = false; diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index bbc2f2973dcc..4cf7abe50143 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -893,10 +893,7 @@ xfs_metafile_iget( struct xfs_trans *tp; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(mp); error = xfs_trans_metafile_iget(tp, ino, metafile_type, ipp); xfs_trans_cancel(tp); return error; diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c index 4345db501714..f83ec2bd0583 100644 --- a/fs/xfs/xfs_icreate_item.c +++ b/fs/xfs/xfs_icreate_item.c @@ -158,7 +158,7 @@ xlog_recover_icreate_commit_pass2( int nbufs; int i; - icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; + icl = (struct xfs_icreate_log *)item->ri_buf[0].iov_base; if (icl->icl_type != XFS_LI_ICREATE) { xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); return -EINVAL; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 761a996a857c..9c39251961a3 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2932,12 +2932,9 @@ xfs_inode_reload_unlinked( struct xfs_inode *ip) { struct xfs_trans *tp; - int error; - - error = xfs_trans_alloc_empty(ip->i_mount, &tp); - if (error) - return error; + int error = 0; + tp = xfs_trans_alloc_empty(ip->i_mount); xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_inode_unlinked_incomplete(ip)) error = xfs_inode_reload_unlinked_bucket(tp, ip); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 285e27ff89e2..829675700fcd 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -1182,12 +1182,12 @@ xfs_iflush_shutdown_abort( */ int xfs_inode_item_format_convert( - struct xfs_log_iovec *buf, + struct kvec *buf, struct xfs_inode_log_format *in_f) { - struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; + struct xfs_inode_log_format_32 *in_f32 = buf->iov_base; - if (buf->i_len != sizeof(*in_f32)) { + if (buf->iov_len != sizeof(*in_f32)) { XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 377e06007804..ba92ce11a011 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -46,8 +46,8 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_iflush_abort(struct xfs_inode *); extern void xfs_iflush_shutdown_abort(struct xfs_inode *); -extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, - struct xfs_inode_log_format *); +int xfs_inode_item_format_convert(struct kvec *buf, + struct xfs_inode_log_format *in_f); extern struct kmem_cache *xfs_ili_cache; diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index 7205fd14f6b3..9d1999d41be1 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -30,13 +30,13 @@ xlog_recover_inode_ra_pass2( struct xlog *log, struct xlog_recover_item *item) { - if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { - struct xfs_inode_log_format *ilfp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].iov_len == sizeof(struct xfs_inode_log_format)) { + struct xfs_inode_log_format *ilfp = item->ri_buf[0].iov_base; xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len, &xfs_inode_buf_ra_ops); } else { - struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].i_addr; + struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].iov_base; xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len, &xfs_inode_buf_ra_ops); @@ -326,8 +326,8 @@ xlog_recover_inode_commit_pass2( int need_free = 0; xfs_failaddr_t fa; - if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { - in_f = item->ri_buf[0].i_addr; + if (item->ri_buf[0].iov_len == sizeof(struct xfs_inode_log_format)) { + in_f = item->ri_buf[0].iov_base; } else { in_f = kmalloc(sizeof(struct xfs_inode_log_format), GFP_KERNEL | __GFP_NOFAIL); @@ -366,7 +366,7 @@ xlog_recover_inode_commit_pass2( error = -EFSCORRUPTED; goto out_release; } - ldip = item->ri_buf[1].i_addr; + ldip = item->ri_buf[1].iov_base; if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) { xfs_alert(mp, "%s: Bad inode log record, rec ptr "PTR_FMT", ino %lld", @@ -472,12 +472,12 @@ xlog_recover_inode_commit_pass2( goto out_release; } isize = xfs_log_dinode_size(mp); - if (unlikely(item->ri_buf[1].i_len > isize)) { + if (unlikely(item->ri_buf[1].iov_len > isize)) { XFS_CORRUPTION_ERROR("Bad log dinode size", XFS_ERRLEVEL_LOW, mp, ldip, sizeof(*ldip)); xfs_alert(mp, - "Bad inode 0x%llx log dinode size 0x%x", - in_f->ilf_ino, item->ri_buf[1].i_len); + "Bad inode 0x%llx log dinode size 0x%zx", + in_f->ilf_ino, item->ri_buf[1].iov_len); error = -EFSCORRUPTED; goto out_release; } @@ -500,8 +500,8 @@ xlog_recover_inode_commit_pass2( if (in_f->ilf_size == 2) goto out_owner_change; - len = item->ri_buf[2].i_len; - src = item->ri_buf[2].i_addr; + len = item->ri_buf[2].iov_len; + src = item->ri_buf[2].iov_base; ASSERT(in_f->ilf_size <= 4); ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); ASSERT(!(fields & XFS_ILOG_DFORK) || @@ -538,8 +538,8 @@ xlog_recover_inode_commit_pass2( } else { attr_index = 2; } - len = item->ri_buf[attr_index].i_len; - src = item->ri_buf[attr_index].i_addr; + len = item->ri_buf[attr_index].iov_len; + src = item->ri_buf[attr_index].iov_base; ASSERT(len == xlog_calc_iovec_len(in_f->ilf_asize)); switch (in_f->ilf_fields & XFS_ILOG_AFORK) { diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index d250f7f74e3b..fe1f74a3b6a3 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -444,7 +444,7 @@ static void xfs_fill_fsxattr( struct xfs_inode *ip, int whichfork, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); @@ -496,7 +496,7 @@ xfs_ioc_fsgetxattra( xfs_inode_t *ip, void __user *arg) { - struct fileattr fa; + struct file_kattr fa; xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa); @@ -508,7 +508,7 @@ xfs_ioc_fsgetxattra( int xfs_fileattr_get( struct dentry *dentry, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_inode *ip = XFS_I(d_inode(dentry)); @@ -526,7 +526,7 @@ static int xfs_ioctl_setattr_xflags( struct xfs_trans *tp, struct xfs_inode *ip, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_mount *mp = ip->i_mount; bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME); @@ -582,7 +582,7 @@ xfs_ioctl_setattr_xflags( static void xfs_ioctl_setattr_prepare_dax( struct xfs_inode *ip, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); @@ -642,7 +642,7 @@ out_error: static int xfs_ioctl_setattr_check_extsize( struct xfs_inode *ip, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_mount *mp = ip->i_mount; xfs_failaddr_t failaddr; @@ -684,7 +684,7 @@ xfs_ioctl_setattr_check_extsize( static int xfs_ioctl_setattr_check_cowextsize( struct xfs_inode *ip, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_mount *mp = ip->i_mount; xfs_failaddr_t failaddr; @@ -709,7 +709,7 @@ xfs_ioctl_setattr_check_cowextsize( static int xfs_ioctl_setattr_check_projid( struct xfs_inode *ip, - struct fileattr *fa) + struct file_kattr *fa) { if (!fa->fsx_valid) return 0; @@ -725,7 +725,7 @@ int xfs_fileattr_set( struct mnt_idmap *idmap, struct dentry *dentry, - struct fileattr *fa) + struct file_kattr *fa) { struct xfs_inode *ip = XFS_I(d_inode(dentry)); struct xfs_mount *mp = ip->i_mount; @@ -990,9 +990,8 @@ xfs_ioc_getlabel( BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX); /* 1 larger than sb_fname, so this ensures a trailing NUL char */ - memset(label, 0, sizeof(label)); spin_lock(&mp->m_sb_lock); - strncpy(label, sbp->sb_fname, XFSLABEL_MAX); + memtostr_pad(label, sbp->sb_fname); spin_unlock(&mp->m_sb_lock); if (copy_to_user(user_label, label, sizeof(label))) diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h index 12124946f347..f5ed5cf9d3df 100644 --- a/fs/xfs/xfs_ioctl.h +++ b/fs/xfs/xfs_ioctl.h @@ -17,13 +17,13 @@ xfs_ioc_swapext( extern int xfs_fileattr_get( struct dentry *dentry, - struct fileattr *fa); + struct file_kattr *fa); extern int xfs_fileattr_set( struct mnt_idmap *idmap, struct dentry *dentry, - struct fileattr *fa); + struct file_kattr *fa); extern long xfs_file_ioctl( diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ec30b78bf5c4..2a74f2957341 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -79,6 +79,9 @@ xfs_iomap_valid( { struct xfs_inode *ip = XFS_I(inode); + if (iomap->type == IOMAP_HOLE) + return true; + if (iomap->validity_cookie != xfs_iomap_inode_sequence(ip, iomap->flags)) { trace_xfs_iomap_invalid(ip, iomap); @@ -89,7 +92,7 @@ xfs_iomap_valid( return true; } -static const struct iomap_folio_ops xfs_iomap_folio_ops = { +const struct iomap_write_ops xfs_iomap_write_ops = { .iomap_valid = xfs_iomap_valid, }; @@ -151,7 +154,6 @@ xfs_bmbt_to_iomap( iomap->flags |= IOMAP_F_DIRTY; iomap->validity_cookie = sequence_cookie; - iomap->folio_ops = &xfs_iomap_folio_ops; return 0; } @@ -2198,7 +2200,8 @@ xfs_zero_range( return dax_zero_range(inode, pos, len, did_zero, &xfs_dax_write_iomap_ops); return iomap_zero_range(inode, pos, len, did_zero, - &xfs_buffered_write_iomap_ops, ac); + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, + ac); } int @@ -2214,5 +2217,6 @@ xfs_truncate_page( return dax_truncate_page(inode, pos, did_zero, &xfs_dax_write_iomap_ops); return iomap_truncate_page(inode, pos, did_zero, - &xfs_buffered_write_iomap_ops, ac); + &xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops, + ac); } diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 674f8ac1b9bd..ebcce7d49446 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -57,5 +57,6 @@ extern const struct iomap_ops xfs_seek_iomap_ops; extern const struct iomap_ops xfs_xattr_iomap_ops; extern const struct iomap_ops xfs_dax_write_iomap_ops; extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops; +extern const struct iomap_write_ops xfs_iomap_write_ops; #endif /* __XFS_IOMAP_H__*/ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 01e597290eb5..149b5460fbfd 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -970,7 +970,7 @@ xfs_setattr_size( * change. */ if (xfs_is_zoned_inode(ip)) { - error = xfs_zoned_space_reserve(ip, 1, + error = xfs_zoned_space_reserve(mp, 1, XFS_ZR_NOWAIT | XFS_ZR_RESERVED, &ac); if (error) { if (error == -EAGAIN) @@ -998,7 +998,7 @@ xfs_setattr_size( } if (xfs_is_zoned_inode(ip)) - xfs_zoned_space_unreserve(ip, &ac); + xfs_zoned_space_unreserve(mp, &ac); if (error) return error; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 1fa1c0564b0c..c8c9b8d8309f 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -239,14 +239,10 @@ xfs_bulkstat_one( * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking. */ - error = xfs_trans_alloc_empty(breq->mp, &tp); - if (error) - goto out; - + tp = xfs_trans_alloc_empty(breq->mp); error = xfs_bulkstat_one_int(breq->mp, breq->idmap, tp, breq->startino, &bc); xfs_trans_cancel(tp); -out: kfree(bc.buf); /* @@ -331,17 +327,13 @@ xfs_bulkstat( * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking. */ - error = xfs_trans_alloc_empty(breq->mp, &tp); - if (error) - goto out; - + tp = xfs_trans_alloc_empty(breq->mp); if (breq->flags & XFS_IBULK_SAME_AG) iwalk_flags |= XFS_IWALK_SAME_AG; error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags, xfs_bulkstat_iwalk, breq->icount, &bc); xfs_trans_cancel(tp); -out: kfree(bc.buf); /* @@ -464,14 +456,10 @@ xfs_inumbers( * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking. */ - error = xfs_trans_alloc_empty(breq->mp, &tp); - if (error) - goto out; - + tp = xfs_trans_alloc_empty(breq->mp); error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, xfs_inumbers_walk, breq->icount, &ic); xfs_trans_cancel(tp); -out: /* * We found some inode groups, so clear the error status and return diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index 7db3ece370b1..c1c31d1a8e21 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -377,11 +377,8 @@ xfs_iwalk_run_callbacks( if (!has_more) return 0; - if (iwag->drop_trans) { - error = xfs_trans_alloc_empty(mp, &iwag->tp); - if (error) - return error; - } + if (iwag->drop_trans) + iwag->tp = xfs_trans_alloc_empty(mp); /* ...and recreate the cursor just past where we left off. */ error = xfs_ialloc_read_agi(iwag->pag, iwag->tp, 0, agi_bpp); @@ -617,9 +614,7 @@ xfs_iwalk_ag_work( * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking. */ - error = xfs_trans_alloc_empty(mp, &iwag->tp); - if (error) - goto out; + iwag->tp = xfs_trans_alloc_empty(mp); iwag->drop_trans = 1; error = xfs_iwalk_ag(iwag); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 793468b4d30d..c8a57e21a1d3 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -109,14 +109,14 @@ xlog_prepare_iovec( vec = &lv->lv_iovecp[0]; } - len = lv->lv_buf_len + sizeof(struct xlog_op_header); + len = lv->lv_buf_used + sizeof(struct xlog_op_header); if (!IS_ALIGNED(len, sizeof(uint64_t))) { - lv->lv_buf_len = round_up(len, sizeof(uint64_t)) - + lv->lv_buf_used = round_up(len, sizeof(uint64_t)) - sizeof(struct xlog_op_header); } vec->i_type = type; - vec->i_addr = lv->lv_buf + lv->lv_buf_len; + vec->i_addr = lv->lv_buf + lv->lv_buf_used; oph = vec->i_addr; oph->oh_clientid = XFS_TRANSACTION; @@ -1931,9 +1931,9 @@ xlog_print_trans( if (!lv) continue; xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); - xfs_warn(mp, " size = %d", lv->lv_size); + xfs_warn(mp, " alloc_size = %d", lv->lv_alloc_size); xfs_warn(mp, " bytes = %d", lv->lv_bytes); - xfs_warn(mp, " buf len = %d", lv->lv_buf_len); + xfs_warn(mp, " buf used= %d", lv->lv_buf_used); /* dump each iovec for the log item */ vec = lv->lv_iovecp; @@ -3092,16 +3092,16 @@ xfs_log_force_seq( */ void xfs_log_ticket_put( - xlog_ticket_t *ticket) + struct xlog_ticket *ticket) { ASSERT(atomic_read(&ticket->t_ref) > 0); if (atomic_dec_and_test(&ticket->t_ref)) kmem_cache_free(xfs_log_ticket_cache, ticket); } -xlog_ticket_t * +struct xlog_ticket * xfs_log_ticket_get( - xlog_ticket_t *ticket) + struct xlog_ticket *ticket) { ASSERT(atomic_read(&ticket->t_ref) > 0); atomic_inc(&ticket->t_ref); diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 13455854365f..af6daf4f6792 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -16,8 +16,8 @@ struct xfs_log_vec { struct xfs_log_item *lv_item; /* owner */ char *lv_buf; /* formatted buffer */ int lv_bytes; /* accounted space in buffer */ - int lv_buf_len; /* aligned size of buffer */ - int lv_size; /* size of allocated lv */ + int lv_buf_used; /* buffer space used so far */ + int lv_alloc_size; /* size of allocated lv */ }; #define XFS_LOG_VEC_ORDERED (-1) @@ -64,12 +64,13 @@ xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, oph->oh_len = cpu_to_be32(len); len += sizeof(struct xlog_op_header); - lv->lv_buf_len += len; + lv->lv_buf_used += len; lv->lv_bytes += len; vec->i_len = len; /* Catch buffer overruns */ - ASSERT((void *)lv->lv_buf + lv->lv_bytes <= (void *)lv + lv->lv_size); + ASSERT((void *)lv->lv_buf + lv->lv_bytes <= + (void *)lv + lv->lv_alloc_size); } /* @@ -87,13 +88,6 @@ xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, return buf; } -static inline void * -xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, - const struct xfs_log_iovec *src) -{ - return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len); -} - /* * By comparing each component, we don't have to worry about extra * endian issues in treating two 32 bit numbers as one 64 bit number diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index a80cb6b9969a..f443757e93c2 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -275,7 +275,7 @@ xlog_cil_alloc_shadow_bufs( struct xfs_log_vec *lv; int niovecs = 0; int nbytes = 0; - int buf_size; + int alloc_size; bool ordered = false; /* Skip items which aren't dirty in this transaction. */ @@ -316,14 +316,14 @@ xlog_cil_alloc_shadow_bufs( * that space to ensure we can align it appropriately and not * overrun the buffer. */ - buf_size = nbytes + xlog_cil_iovec_space(niovecs); + alloc_size = nbytes + xlog_cil_iovec_space(niovecs); /* * if we have no shadow buffer, or it is too small, we need to * reallocate it. */ if (!lip->li_lv_shadow || - buf_size > lip->li_lv_shadow->lv_size) { + alloc_size > lip->li_lv_shadow->lv_alloc_size) { /* * We free and allocate here as a realloc would copy * unnecessary data. We don't use kvzalloc() for the @@ -332,15 +332,15 @@ xlog_cil_alloc_shadow_bufs( * storage. */ kvfree(lip->li_lv_shadow); - lv = xlog_kvmalloc(buf_size); + lv = xlog_kvmalloc(alloc_size); memset(lv, 0, xlog_cil_iovec_space(niovecs)); INIT_LIST_HEAD(&lv->lv_list); lv->lv_item = lip; - lv->lv_size = buf_size; + lv->lv_alloc_size = alloc_size; if (ordered) - lv->lv_buf_len = XFS_LOG_VEC_ORDERED; + lv->lv_buf_used = XFS_LOG_VEC_ORDERED; else lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; lip->li_lv_shadow = lv; @@ -348,9 +348,9 @@ xlog_cil_alloc_shadow_bufs( /* same or smaller, optimise common overwrite case */ lv = lip->li_lv_shadow; if (ordered) - lv->lv_buf_len = XFS_LOG_VEC_ORDERED; + lv->lv_buf_used = XFS_LOG_VEC_ORDERED; else - lv->lv_buf_len = 0; + lv->lv_buf_used = 0; lv->lv_bytes = 0; } @@ -370,30 +370,30 @@ xlog_cil_alloc_shadow_bufs( STATIC void xfs_cil_prepare_item( struct xlog *log, + struct xfs_log_item *lip, struct xfs_log_vec *lv, - struct xfs_log_vec *old_lv, int *diff_len) { /* Account for the new LV being passed in */ - if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) + if (lv->lv_buf_used != XFS_LOG_VEC_ORDERED) *diff_len += lv->lv_bytes; /* * If there is no old LV, this is the first time we've seen the item in * this CIL context and so we need to pin it. If we are replacing the - * old_lv, then remove the space it accounts for and make it the shadow + * old lv, then remove the space it accounts for and make it the shadow * buffer for later freeing. In both cases we are now switching to the * shadow buffer, so update the pointer to it appropriately. */ - if (!old_lv) { + if (!lip->li_lv) { if (lv->lv_item->li_ops->iop_pin) lv->lv_item->li_ops->iop_pin(lv->lv_item); lv->lv_item->li_lv_shadow = NULL; - } else if (old_lv != lv) { - ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); + } else if (lip->li_lv != lv) { + ASSERT(lv->lv_buf_used != XFS_LOG_VEC_ORDERED); - *diff_len -= old_lv->lv_bytes; - lv->lv_item->li_lv_shadow = old_lv; + *diff_len -= lip->li_lv->lv_bytes; + lv->lv_item->li_lv_shadow = lip->li_lv; } /* attach new log vector to log item */ @@ -452,10 +452,8 @@ xlog_cil_insert_format_items( } list_for_each_entry(lip, &tp->t_items, li_trans) { - struct xfs_log_vec *lv; - struct xfs_log_vec *old_lv = NULL; - struct xfs_log_vec *shadow; - bool ordered = false; + struct xfs_log_vec *lv = lip->li_lv; + struct xfs_log_vec *shadow = lip->li_lv_shadow; /* Skip items which aren't dirty in this transaction. */ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) @@ -465,22 +463,23 @@ xlog_cil_insert_format_items( * The formatting size information is already attached to * the shadow lv on the log item. */ - shadow = lip->li_lv_shadow; - if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) - ordered = true; + if (shadow->lv_buf_used == XFS_LOG_VEC_ORDERED) { + if (!lv) { + lv = shadow; + lv->lv_item = lip; + } + ASSERT(shadow->lv_alloc_size == lv->lv_alloc_size); + xfs_cil_prepare_item(log, lip, lv, diff_len); + continue; + } /* Skip items that do not have any vectors for writing */ - if (!shadow->lv_niovecs && !ordered) + if (!shadow->lv_niovecs) continue; /* compare to existing item size */ - old_lv = lip->li_lv; - if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { + if (lv && shadow->lv_alloc_size <= lv->lv_alloc_size) { /* same or smaller, optimise common overwrite case */ - lv = lip->li_lv; - - if (ordered) - goto insert; /* * set the item up as though it is a new insertion so @@ -492,7 +491,7 @@ xlog_cil_insert_format_items( lv->lv_niovecs = shadow->lv_niovecs; /* reset the lv buffer information for new formatting */ - lv->lv_buf_len = 0; + lv->lv_buf_used = 0; lv->lv_bytes = 0; lv->lv_buf = (char *)lv + xlog_cil_iovec_space(lv->lv_niovecs); @@ -500,17 +499,11 @@ xlog_cil_insert_format_items( /* switch to shadow buffer! */ lv = shadow; lv->lv_item = lip; - if (ordered) { - /* track as an ordered logvec */ - ASSERT(lip->li_lv == NULL); - goto insert; - } } ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); lip->li_ops->iop_format(lip, lv); -insert: - xfs_cil_prepare_item(log, lv, old_lv, diff_len); + xfs_cil_prepare_item(log, lip, lv, diff_len); } } @@ -1245,7 +1238,7 @@ xlog_cil_build_lv_chain( lv->lv_order_id = item->li_order_id; /* we don't write ordered log vectors */ - if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) + if (lv->lv_buf_used != XFS_LOG_VEC_ORDERED) *num_bytes += lv->lv_bytes; *num_iovecs += lv->lv_niovecs; list_add_tail(&lv->lv_list, &ctx->lv_chain); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 39a102cc1b43..a9a7a271c15b 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -144,7 +144,7 @@ enum xlog_iclog_state { #define XLOG_COVER_OPS 5 -typedef struct xlog_ticket { +struct xlog_ticket { struct list_head t_queue; /* reserve/write queue */ struct task_struct *t_task; /* task that owns this ticket */ xlog_tid_t t_tid; /* transaction identifier */ @@ -155,7 +155,7 @@ typedef struct xlog_ticket { char t_cnt; /* current unit count */ uint8_t t_flags; /* properties of reservation */ int t_iclog_hdrs; /* iclog hdrs in t_curr_res */ -} xlog_ticket_t; +}; /* * - A log record header is 512 bytes. There is plenty of room to grow the diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 2f76531842f8..e6ed9e09c027 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2131,15 +2131,15 @@ xlog_recover_add_to_cont_trans( item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, ri_list); - old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; - old_len = item->ri_buf[item->ri_cnt-1].i_len; + old_ptr = item->ri_buf[item->ri_cnt-1].iov_base; + old_len = item->ri_buf[item->ri_cnt-1].iov_len; ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL); if (!ptr) return -ENOMEM; memcpy(&ptr[old_len], dp, len); - item->ri_buf[item->ri_cnt-1].i_len += len; - item->ri_buf[item->ri_cnt-1].i_addr = ptr; + item->ri_buf[item->ri_cnt-1].iov_len += len; + item->ri_buf[item->ri_cnt-1].iov_base = ptr; trace_xfs_log_recover_item_add_cont(log, trans, item, 0); return 0; } @@ -2223,7 +2223,7 @@ xlog_recover_add_to_trans( } item->ri_total = in_f->ilf_size; - item->ri_buf = kzalloc(item->ri_total * sizeof(xfs_log_iovec_t), + item->ri_buf = kcalloc(item->ri_total, sizeof(*item->ri_buf), GFP_KERNEL | __GFP_NOFAIL); } @@ -2237,8 +2237,8 @@ xlog_recover_add_to_trans( } /* Description region is ri_buf[0] */ - item->ri_buf[item->ri_cnt].i_addr = ptr; - item->ri_buf[item->ri_cnt].i_len = len; + item->ri_buf[item->ri_cnt].iov_base = ptr; + item->ri_buf[item->ri_cnt].iov_len = len; item->ri_cnt++; trace_xfs_log_recover_item_add(log, trans, item, 0); return 0; @@ -2262,7 +2262,7 @@ xlog_recover_free_trans( /* Free the regions in the item. */ list_del(&item->ri_list); for (i = 0; i < item->ri_cnt; i++) - kvfree(item->ri_buf[i].i_addr); + kvfree(item->ri_buf[i].iov_base); /* Free the item itself */ kfree(item->ri_buf); kfree(item); diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index 42e9c72b85c0..fbeddcac4792 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -279,10 +279,7 @@ xfs_dax_notify_dev_failure( kernel_frozen = xfs_dax_notify_failure_freeze(mp) == 0; } - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - goto out; - + tp = xfs_trans_alloc_empty(mp); start_gno = xfs_fsb_to_gno(mp, start_bno, type); end_gno = xfs_fsb_to_gno(mp, end_bno, type); while ((xg = xfs_group_next_range(mp, xg, start_gno, end_gno, type))) { @@ -353,7 +350,6 @@ xfs_dax_notify_dev_failure( error = -EFSCORRUPTED; } -out: /* Thaw the fs if it has been frozen before. */ if (mf_flags & MF_MEM_PRE_REMOVE) xfs_dax_notify_failure_thaw(mp, kernel_frozen); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index fa135ac26471..23ba84ec919a 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -660,10 +660,7 @@ xfs_qm_load_metadir_qinos( struct xfs_trans *tp; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(mp); error = xfs_dqinode_load_parent(tp, &qi->qi_dirip); if (error == -ENOENT) { /* no quota dir directory, but we'll create one later */ @@ -1755,10 +1752,7 @@ xfs_qm_qino_load( struct xfs_inode *dp = NULL; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(mp); if (xfs_has_metadir(mp)) { error = xfs_dqinode_load_parent(tp, &dp); if (error) diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 076501123d89..3728234699a2 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -717,18 +717,18 @@ xlog_recover_cui_commit_pass2( struct xfs_cui_log_format *cui_formatp; size_t len; - cui_formatp = item->ri_buf[0].i_addr; + cui_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_cui_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -759,18 +759,18 @@ xlog_recover_rtcui_commit_pass2( struct xfs_cui_log_format *cui_formatp; size_t len; - cui_formatp = item->ri_buf[0].i_addr; + cui_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_cui_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -791,7 +791,7 @@ xlog_recover_rtcui_commit_pass2( xfs_lsn_t lsn) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } #endif @@ -817,10 +817,10 @@ xlog_recover_cud_commit_pass2( { struct xfs_cud_log_format *cud_formatp; - cud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { + cud_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_cud_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -843,10 +843,10 @@ xlog_recover_rtcud_commit_pass2( { struct xfs_cud_log_format *cud_formatp; - cud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { + cud_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_cud_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index ad3bcb76d805..3f177b4ec131 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1881,7 +1881,8 @@ xfs_reflink_unshare( &xfs_dax_write_iomap_ops); else error = iomap_file_unshare(inode, offset, len, - &xfs_buffered_write_iomap_ops); + &xfs_buffered_write_iomap_ops, + &xfs_iomap_write_ops); if (error) goto out; diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index c99700318ec2..15f0903f6fd4 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -746,18 +746,18 @@ xlog_recover_rui_commit_pass2( struct xfs_rui_log_format *rui_formatp; size_t len; - rui_formatp = item->ri_buf[0].i_addr; + rui_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_rui_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -788,18 +788,18 @@ xlog_recover_rtrui_commit_pass2( struct xfs_rui_log_format *rui_formatp; size_t len; - rui_formatp = item->ri_buf[0].i_addr; + rui_formatp = item->ri_buf[0].iov_base; - if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) { + if (item->ri_buf[0].iov_len < xfs_rui_log_format_sizeof(0)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[0].iov_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -820,7 +820,7 @@ xlog_recover_rtrui_commit_pass2( xfs_lsn_t lsn) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + item->ri_buf[0].iov_base, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } #endif @@ -846,10 +846,10 @@ xlog_recover_rud_commit_pass2( { struct xfs_rud_log_format *rud_formatp; - rud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) { + rud_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_rud_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - rud_formatp, item->ri_buf[0].i_len); + rud_formatp, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } @@ -872,10 +872,10 @@ xlog_recover_rtrud_commit_pass2( { struct xfs_rud_log_format *rud_formatp; - rud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) { + rud_formatp = item->ri_buf[0].iov_base; + if (item->ri_buf[0].iov_len != sizeof(struct xfs_rud_log_format)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, - rud_formatp, item->ri_buf[0].i_len); + rud_formatp, item->ri_buf[0].iov_len); return -EFSCORRUPTED; } diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 736eb0924573..6907e871fa15 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -729,9 +729,7 @@ xfs_rtginode_ensure( if (rtg->rtg_inodes[type]) return 0; - error = xfs_trans_alloc_empty(rtg_mount(rtg), &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(rtg_mount(rtg)); error = xfs_rtginode_load(rtg, type, tp); xfs_trans_cancel(tp); @@ -1305,9 +1303,7 @@ xfs_growfs_rt_prep_groups( if (!mp->m_rtdirip) { struct xfs_trans *tp; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; + tp = xfs_trans_alloc_empty(mp); error = xfs_rtginode_load_parent(tp); xfs_trans_cancel(tp); @@ -1674,10 +1670,7 @@ xfs_rtmount_inodes( struct xfs_rtgroup *rtg = NULL; int error; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(mp); if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) { error = xfs_rtginode_load_parent(tp); if (error) diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 78be223b13b2..e1794e3e3156 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -425,8 +425,8 @@ DECLARE_EVENT_CLASS(xfs_zone_alloc_class, __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rgblock_t, used) + __field(xfs_rgblock_t, allocated) __field(xfs_rgblock_t, written) - __field(xfs_rgblock_t, write_pointer) __field(xfs_rgblock_t, rgbno) __field(xfs_extlen_t, len) ), @@ -434,17 +434,17 @@ DECLARE_EVENT_CLASS(xfs_zone_alloc_class, __entry->dev = rtg_mount(oz->oz_rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(oz->oz_rtg); __entry->used = rtg_rmap(oz->oz_rtg)->i_used_blocks; + __entry->allocated = oz->oz_allocated; __entry->written = oz->oz_written; - __entry->write_pointer = oz->oz_write_pointer; __entry->rgbno = rgbno; __entry->len = len; ), - TP_printk("dev %d:%d rgno 0x%x used 0x%x written 0x%x wp 0x%x rgbno 0x%x len 0x%x", + TP_printk("dev %d:%d rgno 0x%x used 0x%x alloced 0x%x written 0x%x rgbno 0x%x len 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->used, + __entry->allocated, __entry->written, - __entry->write_pointer, __entry->rgbno, __entry->len) ); @@ -1079,7 +1079,9 @@ DEFINE_INODE_EVENT(xfs_get_acl); #endif DEFINE_INODE_EVENT(xfs_vm_bmap); DEFINE_INODE_EVENT(xfs_file_ioctl); +#ifdef CONFIG_COMPAT DEFINE_INODE_EVENT(xfs_file_compat_ioctl); +#endif DEFINE_INODE_EVENT(xfs_ioctl_setattr); DEFINE_INODE_EVENT(xfs_dir_fsync); DEFINE_INODE_EVENT(xfs_file_fsync); @@ -1394,7 +1396,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \ TP_ARGS(dqp)) DEFINE_DQUOT_EVENT(xfs_dqadjust); DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); -DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy); DEFINE_DQUOT_EVENT(xfs_dqreclaim_done); DEFINE_DQUOT_EVENT(xfs_dqattach_found); @@ -1599,7 +1600,6 @@ DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait); -DEFINE_LOGGRANT_EVENT(xfs_log_cil_return); DECLARE_EVENT_CLASS(xfs_log_item_class, TP_PROTO(struct xfs_log_item *lip), @@ -1862,8 +1862,6 @@ DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof); DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write); -DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten); -DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append); DEFINE_SIMPLE_IO_EVENT(xfs_file_splice_read); DEFINE_SIMPLE_IO_EVENT(xfs_zoned_map_blocks); @@ -1896,31 +1894,6 @@ DEFINE_EVENT(xfs_itrunc_class, name, \ DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start); DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end); -TRACE_EVENT(xfs_pagecache_inval, - TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), - TP_ARGS(ip, start, finish), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_off_t, start) - __field(xfs_off_t, finish) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_disk_size; - __entry->start = start; - __entry->finish = finish; - ), - TP_printk("dev %d:%d ino 0x%llx disize 0x%llx start 0x%llx finish 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->start, - __entry->finish) -); - TRACE_EVENT(xfs_bunmap, TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t fileoff, xfs_filblks_t len, int flags, unsigned long caller_ip), @@ -2266,14 +2239,12 @@ DEFINE_EVENT(xfs_alloc_class, name, \ DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound); DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); -DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); DEFINE_ALLOC_EVENT(xfs_alloc_near_first); DEFINE_ALLOC_EVENT(xfs_alloc_cur); DEFINE_ALLOC_EVENT(xfs_alloc_cur_right); DEFINE_ALLOC_EVENT(xfs_alloc_cur_left); DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup); DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done); -DEFINE_ALLOC_EVENT(xfs_alloc_near_error); DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry); DEFINE_ALLOC_EVENT(xfs_alloc_near_busy); DEFINE_ALLOC_EVENT(xfs_alloc_size_neither); @@ -2468,13 +2439,8 @@ DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall); DEFINE_ATTR_EVENT(xfs_attr_node_addname); DEFINE_ATTR_EVENT(xfs_attr_node_get); DEFINE_ATTR_EVENT(xfs_attr_node_replace); -DEFINE_ATTR_EVENT(xfs_attr_node_removename); - -DEFINE_ATTR_EVENT(xfs_attr_fillstate); -DEFINE_ATTR_EVENT(xfs_attr_refillstate); DEFINE_ATTR_EVENT(xfs_attr_rmtval_get); -DEFINE_ATTR_EVENT(xfs_attr_rmtval_set); #define DEFINE_DA_EVENT(name) \ DEFINE_EVENT(xfs_da_class, name, \ @@ -2892,7 +2858,6 @@ DEFINE_EVENT(xfs_rtdiscard_class, name, \ TP_ARGS(mp, rtbno, len)) DEFINE_RTDISCARD_EVENT(xfs_discard_rtextent); DEFINE_RTDISCARD_EVENT(xfs_discard_rttoosmall); -DEFINE_RTDISCARD_EVENT(xfs_discard_rtrelax); DECLARE_EVENT_CLASS(xfs_btree_cur_class, TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), @@ -4209,36 +4174,6 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error); DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src); DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest); -/* dedupe tracepoints */ -DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents); -DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error); - -/* ioctl tracepoints */ -TRACE_EVENT(xfs_ioctl_clone, - TP_PROTO(struct inode *src, struct inode *dest), - TP_ARGS(src, dest), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(unsigned long, src_ino) - __field(loff_t, src_isize) - __field(unsigned long, dest_ino) - __field(loff_t, dest_isize) - ), - TP_fast_assign( - __entry->dev = src->i_sb->s_dev; - __entry->src_ino = src->i_ino; - __entry->src_isize = i_size_read(src); - __entry->dest_ino = dest->i_ino; - __entry->dest_isize = i_size_read(dest); - ), - TP_printk("dev %d:%d ino 0x%lx isize 0x%llx -> ino 0x%lx isize 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->src_ino, - __entry->src_isize, - __entry->dest_ino, - __entry->dest_isize) -); - /* unshare tracepoints */ DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare); DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error); @@ -4246,7 +4181,6 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error); /* copy on write */ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); -DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); @@ -5031,7 +4965,6 @@ DEFINE_ICLOG_EVENT(xlog_iclog_switch); DEFINE_ICLOG_EVENT(xlog_iclog_sync); DEFINE_ICLOG_EVENT(xlog_iclog_syncing); DEFINE_ICLOG_EVENT(xlog_iclog_sync_done); -DEFINE_ICLOG_EVENT(xlog_iclog_want_sync); DEFINE_ICLOG_EVENT(xlog_iclog_wait_on); DEFINE_ICLOG_EVENT(xlog_iclog_write); @@ -5080,7 +5013,6 @@ DEFINE_DAS_STATE_EVENT(xfs_attr_sf_addname_return); DEFINE_DAS_STATE_EVENT(xfs_attr_set_iter_return); DEFINE_DAS_STATE_EVENT(xfs_attr_leaf_addname_return); DEFINE_DAS_STATE_EVENT(xfs_attr_node_addname_return); -DEFINE_DAS_STATE_EVENT(xfs_attr_remove_iter_return); DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_alloc); DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_remove_return); DEFINE_DAS_STATE_EVENT(xfs_attr_defer_add); diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index b4a07af513ba..ece374d622b3 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -134,18 +134,14 @@ xfs_trans_dup( } /* - * This is called to reserve free disk blocks and log space for the - * given transaction. This must be done before allocating any resources - * within the transaction. + * This is called to reserve free disk blocks and log space for the given + * transaction before allocating any resources within the transaction. * * This will return ENOSPC if there are not enough blocks available. * It will sleep waiting for available log space. - * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which - * is used by long running transactions. If any one of the reservations - * fails then they will all be backed out. * - * This does not do quota reservations. That typically is done by the - * caller afterwards. + * This does not do quota reservations. That typically is done by the caller + * afterwards. */ static int xfs_trans_reserve( @@ -158,10 +154,12 @@ xfs_trans_reserve( int error = 0; bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + ASSERT(resp->tr_logres > 0); + /* - * Attempt to reserve the needed disk blocks by decrementing - * the number needed from the number available. This will - * fail if the count would go below zero. + * Attempt to reserve the needed disk blocks by decrementing the number + * needed from the number available. This will fail if the count would + * go below zero. */ if (blocks > 0) { error = xfs_dec_fdblocks(mp, blocks, rsvd); @@ -173,42 +171,20 @@ xfs_trans_reserve( /* * Reserve the log space needed for this transaction. */ - if (resp->tr_logres > 0) { - bool permanent = false; - - ASSERT(tp->t_log_res == 0 || - tp->t_log_res == resp->tr_logres); - ASSERT(tp->t_log_count == 0 || - tp->t_log_count == resp->tr_logcount); - - if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { - tp->t_flags |= XFS_TRANS_PERM_LOG_RES; - permanent = true; - } else { - ASSERT(tp->t_ticket == NULL); - ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); - } - - if (tp->t_ticket != NULL) { - ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); - error = xfs_log_regrant(mp, tp->t_ticket); - } else { - error = xfs_log_reserve(mp, resp->tr_logres, - resp->tr_logcount, - &tp->t_ticket, permanent); - } - - if (error) - goto undo_blocks; + if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) + tp->t_flags |= XFS_TRANS_PERM_LOG_RES; + error = xfs_log_reserve(mp, resp->tr_logres, resp->tr_logcount, + &tp->t_ticket, (tp->t_flags & XFS_TRANS_PERM_LOG_RES)); + if (error) + goto undo_blocks; - tp->t_log_res = resp->tr_logres; - tp->t_log_count = resp->tr_logcount; - } + tp->t_log_res = resp->tr_logres; + tp->t_log_count = resp->tr_logcount; /* - * Attempt to reserve the needed realtime extents by decrementing - * the number needed from the number available. This will - * fail if the count would go below zero. + * Attempt to reserve the needed realtime extents by decrementing the + * number needed from the number available. This will fail if the + * count would go below zero. */ if (rtextents > 0) { error = xfs_dec_frextents(mp, rtextents); @@ -221,18 +197,11 @@ xfs_trans_reserve( return 0; - /* - * Error cases jump to one of these labels to undo any - * reservations which have already been performed. - */ undo_log: - if (resp->tr_logres > 0) { - xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); - tp->t_ticket = NULL; - tp->t_log_res = 0; - tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; - } - + xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); + tp->t_ticket = NULL; + tp->t_log_res = 0; + tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; undo_blocks: if (blocks > 0) { xfs_add_fdblocks(mp, blocks); @@ -241,6 +210,28 @@ undo_blocks: return error; } +static struct xfs_trans * +__xfs_trans_alloc( + struct xfs_mount *mp, + uint flags) +{ + struct xfs_trans *tp; + + ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || xfs_has_lazysbcount(mp)); + + tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); + if (!(flags & XFS_TRANS_NO_WRITECOUNT)) + sb_start_intwrite(mp->m_super); + xfs_trans_set_context(tp); + tp->t_flags = flags; + tp->t_mountp = mp; + INIT_LIST_HEAD(&tp->t_items); + INIT_LIST_HEAD(&tp->t_busy); + INIT_LIST_HEAD(&tp->t_dfops); + tp->t_highest_agno = NULLAGNUMBER; + return tp; +} + int xfs_trans_alloc( struct xfs_mount *mp, @@ -254,33 +245,16 @@ xfs_trans_alloc( bool want_retry = true; int error; + ASSERT(resp->tr_logres > 0); + /* * Allocate the handle before we do our freeze accounting and setting up * GFP_NOFS allocation context so that we avoid lockdep false positives * by doing GFP_KERNEL allocations inside sb_start_intwrite(). */ retry: - tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); - if (!(flags & XFS_TRANS_NO_WRITECOUNT)) - sb_start_intwrite(mp->m_super); - xfs_trans_set_context(tp); - - /* - * Zero-reservation ("empty") transactions can't modify anything, so - * they're allowed to run while we're frozen. - */ - WARN_ON(resp->tr_logres > 0 && - mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); - ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || - xfs_has_lazysbcount(mp)); - - tp->t_flags = flags; - tp->t_mountp = mp; - INIT_LIST_HEAD(&tp->t_items); - INIT_LIST_HEAD(&tp->t_busy); - INIT_LIST_HEAD(&tp->t_dfops); - tp->t_highest_agno = NULLAGNUMBER; - + WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); + tp = __xfs_trans_alloc(mp, flags); error = xfs_trans_reserve(tp, resp, blocks, rtextents); if (error == -ENOSPC && want_retry) { xfs_trans_cancel(tp); @@ -324,14 +298,11 @@ retry: * where we can be grabbing buffers at the same time that freeze is trying to * drain the buffer LRU list. */ -int +struct xfs_trans * xfs_trans_alloc_empty( - struct xfs_mount *mp, - struct xfs_trans **tpp) + struct xfs_mount *mp) { - struct xfs_trans_res resv = {0}; - - return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); + return __xfs_trans_alloc(mp, XFS_TRANS_NO_WRITECOUNT); } /* @@ -1026,51 +997,57 @@ xfs_trans_cancel( } /* - * Roll from one trans in the sequence of PERMANENT transactions to - * the next: permanent transactions are only flushed out when - * committed with xfs_trans_commit(), but we still want as soon - * as possible to let chunks of it go to the log. So we commit the - * chunk we've been working on and get a new transaction to continue. + * Roll from one trans in the sequence of PERMANENT transactions to the next: + * permanent transactions are only flushed out when committed with + * xfs_trans_commit(), but we still want as soon as possible to let chunks of it + * go to the log. So we commit the chunk we've been working on and get a new + * transaction to continue. */ int xfs_trans_roll( struct xfs_trans **tpp) { - struct xfs_trans *trans = *tpp; - struct xfs_trans_res tres; + struct xfs_trans *tp = *tpp; + unsigned int log_res = tp->t_log_res; + unsigned int log_count = tp->t_log_count; int error; - trace_xfs_trans_roll(trans, _RET_IP_); + trace_xfs_trans_roll(tp, _RET_IP_); + + ASSERT(log_res > 0); /* * Copy the critical parameters from one trans to the next. */ - tres.tr_logres = trans->t_log_res; - tres.tr_logcount = trans->t_log_count; - - *tpp = xfs_trans_dup(trans); + *tpp = xfs_trans_dup(tp); /* * Commit the current transaction. - * If this commit failed, then it'd just unlock those items that - * are not marked ihold. That also means that a filesystem shutdown - * is in progress. The caller takes the responsibility to cancel - * the duplicate transaction that gets returned. + * + * If this commit failed, then it'd just unlock those items that are not + * marked ihold. That also means that a filesystem shutdown is in + * progress. The caller takes the responsibility to cancel the + * duplicate transaction that gets returned. */ - error = __xfs_trans_commit(trans, true); + error = __xfs_trans_commit(tp, true); if (error) return error; /* * Reserve space in the log for the next transaction. - * This also pushes items in the "AIL", the list of logged items, - * out to disk if they are taking up space at the tail of the log - * that we want to use. This requires that either nothing be locked - * across this call, or that anything that is locked be logged in - * the prior and the next transactions. + * + * This also pushes items in the AIL out to disk if they are taking up + * space at the tail of the log that we want to use. This requires that + * either nothing be locked across this call, or that anything that is + * locked be logged in the prior and the next transactions. */ - tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; - return xfs_trans_reserve(*tpp, &tres, 0, 0); + tp = *tpp; + error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); + if (error) + return error; + tp->t_log_res = log_res; + tp->t_log_count = log_count; + return 0; } /* @@ -1146,9 +1123,18 @@ xfs_trans_reserve_more( unsigned int blocks, unsigned int rtextents) { - struct xfs_trans_res resv = { }; - - return xfs_trans_reserve(tp, &resv, blocks, rtextents); + bool rsvd = tp->t_flags & XFS_TRANS_RESERVE; + + if (blocks && xfs_dec_fdblocks(tp->t_mountp, blocks, rsvd)) + return -ENOSPC; + if (rtextents && xfs_dec_frextents(tp->t_mountp, rtextents)) { + if (blocks) + xfs_add_fdblocks(tp->t_mountp, blocks); + return -ENOSPC; + } + tp->t_blk_res += blocks; + tp->t_rtx_res += rtextents; + return 0; } /* @@ -1163,14 +1149,13 @@ xfs_trans_reserve_more_inode( unsigned int rblocks, bool force_quota) { - struct xfs_trans_res resv = { }; struct xfs_mount *mp = ip->i_mount; unsigned int rtx = xfs_extlen_to_rtxlen(mp, rblocks); int error; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); - error = xfs_trans_reserve(tp, &resv, dblocks, rtx); + error = xfs_trans_reserve_more(tp, dblocks, rtx); if (error) return error; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 2b366851e9a4..7fb860f645a3 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -15,7 +15,6 @@ struct xfs_efd_log_item; struct xfs_efi_log_item; struct xfs_inode; struct xfs_item_ops; -struct xfs_log_iovec; struct xfs_mount; struct xfs_trans; struct xfs_trans_res; @@ -168,8 +167,7 @@ int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp, struct xfs_trans **tpp); int xfs_trans_reserve_more(struct xfs_trans *tp, unsigned int blocks, unsigned int rtextents); -int xfs_trans_alloc_empty(struct xfs_mount *mp, - struct xfs_trans **tpp); +struct xfs_trans *xfs_trans_alloc_empty(struct xfs_mount *mp); void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t); int xfs_trans_get_buf_map(struct xfs_trans *tp, struct xfs_buftarg *target, diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 01315ed75502..33f7eee521a8 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -434,7 +434,7 @@ xfs_init_open_zone( spin_lock_init(&oz->oz_alloc_lock); atomic_set(&oz->oz_ref, 1); oz->oz_rtg = rtg; - oz->oz_write_pointer = write_pointer; + oz->oz_allocated = write_pointer; oz->oz_written = write_pointer; oz->oz_write_hint = write_hint; oz->oz_is_gc = is_gc; @@ -569,7 +569,7 @@ xfs_try_use_zone( struct xfs_open_zone *oz, bool lowspace) { - if (oz->oz_write_pointer == rtg_blocks(oz->oz_rtg)) + if (oz->oz_allocated == rtg_blocks(oz->oz_rtg)) return false; if (!lowspace && !xfs_good_hint_match(oz, file_hint)) return false; @@ -654,13 +654,6 @@ static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip) !(ip->i_diflags & XFS_DIFLAG_APPEND); } -/* - * Pick a new zone for writes. - * - * If we aren't using up our budget of open zones just open a new one from the - * freelist. Else try to find one that matches the expected data lifetime. If - * we don't find one that is good pick any zone that is available. - */ static struct xfs_open_zone * xfs_select_zone_nowait( struct xfs_mount *mp, @@ -688,7 +681,8 @@ xfs_select_zone_nowait( goto out_unlock; /* - * See if we can open a new zone and use that. + * See if we can open a new zone and use that so that data for different + * files is mixed as little as possible. */ oz = xfs_try_open_zone(mp, write_hint); if (oz) @@ -744,25 +738,25 @@ xfs_zone_alloc_blocks( { struct xfs_rtgroup *rtg = oz->oz_rtg; struct xfs_mount *mp = rtg_mount(rtg); - xfs_rgblock_t rgbno; + xfs_rgblock_t allocated; spin_lock(&oz->oz_alloc_lock); count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN, - (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_write_pointer); + (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated); if (!count_fsb) { spin_unlock(&oz->oz_alloc_lock); return 0; } - rgbno = oz->oz_write_pointer; - oz->oz_write_pointer += count_fsb; + allocated = oz->oz_allocated; + oz->oz_allocated += count_fsb; spin_unlock(&oz->oz_alloc_lock); - trace_xfs_zone_alloc_blocks(oz, rgbno, count_fsb); + trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb); *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector); if (!*is_seq) - *sector += XFS_FSB_TO_BB(mp, rgbno); + *sector += XFS_FSB_TO_BB(mp, allocated); return XFS_FSB_TO_B(mp, count_fsb); } @@ -983,7 +977,7 @@ xfs_zone_rgbno_is_valid( lockdep_assert_held(&rtg_rmap(rtg)->i_lock); if (rtg->rtg_open_zone) - return rgbno < rtg->rtg_open_zone->oz_write_pointer; + return rgbno < rtg->rtg_open_zone->oz_allocated; return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa, rtg_rgno(rtg), XFS_RTG_FREE); } @@ -1017,7 +1011,7 @@ xfs_init_zone( { struct xfs_mount *mp = rtg_mount(rtg); struct xfs_zone_info *zi = mp->m_zone_info; - uint64_t used = rtg_rmap(rtg)->i_used_blocks; + uint32_t used = rtg_rmap(rtg)->i_used_blocks; xfs_rgblock_t write_pointer, highest_rgbno; int error; @@ -1114,24 +1108,27 @@ xfs_get_zone_info_cb( } /* - * Calculate the max open zone limit based on the of number of - * backing zones available + * Calculate the max open zone limit based on the of number of backing zones + * available. */ static inline uint32_t xfs_max_open_zones( struct xfs_mount *mp) { unsigned int max_open, max_open_data_zones; + /* - * We need two zones for every open data zone, - * one in reserve as we don't reclaim open zones. One data zone - * and its spare is included in XFS_MIN_ZONES. + * We need two zones for every open data zone, one in reserve as we + * don't reclaim open zones. One data zone and its spare is included + * in XFS_MIN_ZONES to support at least one user data writer. */ max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1; max_open = max_open_data_zones + XFS_OPEN_GC_ZONES; /* - * Cap the max open limit to 1/4 of available space + * Cap the max open limit to 1/4 of available space. Without this we'd + * run out of easy reclaim targets too quickly and storage devices don't + * handle huge numbers of concurrent write streams overly well. */ max_open = min(max_open, mp->m_sb.sb_rgcount / 4); diff --git a/fs/xfs/xfs_zone_alloc.h b/fs/xfs/xfs_zone_alloc.h index ecf39106704c..4db02816d0fd 100644 --- a/fs/xfs/xfs_zone_alloc.h +++ b/fs/xfs/xfs_zone_alloc.h @@ -23,9 +23,9 @@ struct xfs_zone_alloc_ctx { */ #define XFS_ZR_RESERVED (1U << 2) -int xfs_zoned_space_reserve(struct xfs_inode *ip, xfs_filblks_t count_fsb, +int xfs_zoned_space_reserve(struct xfs_mount *mp, xfs_filblks_t count_fsb, unsigned int flags, struct xfs_zone_alloc_ctx *ac); -void xfs_zoned_space_unreserve(struct xfs_inode *ip, +void xfs_zoned_space_unreserve(struct xfs_mount *mp, struct xfs_zone_alloc_ctx *ac); void xfs_zoned_add_available(struct xfs_mount *mp, xfs_filblks_t count_fsb); diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 9c00fc5baa30..064cd1a857a0 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -328,10 +328,7 @@ xfs_zone_gc_query( iter->rec_idx = 0; iter->rec_count = 0; - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - + tp = xfs_trans_alloc_empty(mp); xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); cur = xfs_rtrmapbt_init_cursor(tp, rtg); error = xfs_rmap_query_range(cur, &ri_low, &ri_high, @@ -533,8 +530,7 @@ xfs_zone_gc_steal_open( spin_lock(&zi->zi_open_zones_lock); list_for_each_entry(oz, &zi->zi_open_zones, oz_entry) { - if (!found || - oz->oz_write_pointer < found->oz_write_pointer) + if (!found || oz->oz_allocated < found->oz_allocated) found = oz; } @@ -584,7 +580,7 @@ xfs_zone_gc_ensure_target( { struct xfs_open_zone *oz = mp->m_zone_info->zi_open_gc_zone; - if (!oz || oz->oz_write_pointer == rtg_blocks(oz->oz_rtg)) + if (!oz || oz->oz_allocated == rtg_blocks(oz->oz_rtg)) return xfs_zone_gc_select_target(mp); return oz; } @@ -605,7 +601,7 @@ xfs_zone_gc_space_available( oz = xfs_zone_gc_ensure_target(data->mp); if (!oz) return false; - return oz->oz_write_pointer < rtg_blocks(oz->oz_rtg) && + return oz->oz_allocated < rtg_blocks(oz->oz_rtg) && xfs_zone_gc_scratch_available(data); } @@ -647,7 +643,7 @@ xfs_zone_gc_alloc_blocks( */ spin_lock(&mp->m_sb_lock); *count_fsb = min(*count_fsb, - rtg_blocks(oz->oz_rtg) - oz->oz_write_pointer); + rtg_blocks(oz->oz_rtg) - oz->oz_allocated); *count_fsb = min3(*count_fsb, mp->m_free[XC_FREE_RTEXTENTS].res_avail, mp->m_free[XC_FREE_RTAVAILABLE].res_avail); @@ -661,8 +657,8 @@ xfs_zone_gc_alloc_blocks( *daddr = xfs_gbno_to_daddr(&oz->oz_rtg->rtg_group, 0); *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *daddr); if (!*is_seq) - *daddr += XFS_FSB_TO_BB(mp, oz->oz_write_pointer); - oz->oz_write_pointer += *count_fsb; + *daddr += XFS_FSB_TO_BB(mp, oz->oz_allocated); + oz->oz_allocated += *count_fsb; atomic_inc(&oz->oz_ref); return oz; } diff --git a/fs/xfs/xfs_zone_info.c b/fs/xfs/xfs_zone_info.c index 733bcc2f8645..07e30c596975 100644 --- a/fs/xfs/xfs_zone_info.c +++ b/fs/xfs/xfs_zone_info.c @@ -32,7 +32,7 @@ xfs_show_open_zone( { seq_printf(m, "\t zone %d, wp %u, written %u, used %u, hint %s\n", rtg_rgno(oz->oz_rtg), - oz->oz_write_pointer, oz->oz_written, + oz->oz_allocated, oz->oz_written, rtg_rmap(oz->oz_rtg)->i_used_blocks, xfs_write_hint_to_str(oz->oz_write_hint)); } diff --git a/fs/xfs/xfs_zone_priv.h b/fs/xfs/xfs_zone_priv.h index ab696975a993..35e6de3d25ed 100644 --- a/fs/xfs/xfs_zone_priv.h +++ b/fs/xfs/xfs_zone_priv.h @@ -11,18 +11,18 @@ struct xfs_open_zone { atomic_t oz_ref; /* - * oz_write_pointer is the write pointer at which space is handed out - * for conventional zones, or simple the count of blocks handed out - * so far for sequential write required zones and is protected by - * oz_alloc_lock/ + * oz_allocated is the amount of space already allocated out of the zone + * and is protected by oz_alloc_lock. + * + * For conventional zones it also is the offset of the next write. */ spinlock_t oz_alloc_lock; - xfs_rgblock_t oz_write_pointer; + xfs_rgblock_t oz_allocated; /* - * oz_written is the number of blocks for which we've received a - * write completion. oz_written must always be <= oz_write_pointer - * and is protected by the ILOCK of the rmap inode. + * oz_written is the number of blocks for which we've received a write + * completion. oz_written must always be <= oz_allocated and is + * protected by the ILOCK of the rmap inode. */ xfs_rgblock_t oz_written; diff --git a/fs/xfs/xfs_zone_space_resv.c b/fs/xfs/xfs_zone_space_resv.c index 93c9a7721139..1313c55b8cbe 100644 --- a/fs/xfs/xfs_zone_space_resv.c +++ b/fs/xfs/xfs_zone_space_resv.c @@ -117,11 +117,10 @@ xfs_zoned_space_wait_error( static int xfs_zoned_reserve_available( - struct xfs_inode *ip, + struct xfs_mount *mp, xfs_filblks_t count_fsb, unsigned int flags) { - struct xfs_mount *mp = ip->i_mount; struct xfs_zone_info *zi = mp->m_zone_info; struct xfs_zone_reservation reservation = { .task = current, @@ -198,11 +197,10 @@ xfs_zoned_reserve_available( */ static int xfs_zoned_reserve_extents_greedy( - struct xfs_inode *ip, + struct xfs_mount *mp, xfs_filblks_t *count_fsb, unsigned int flags) { - struct xfs_mount *mp = ip->i_mount; struct xfs_zone_info *zi = mp->m_zone_info; s64 len = *count_fsb; int error = -ENOSPC; @@ -220,12 +218,11 @@ xfs_zoned_reserve_extents_greedy( int xfs_zoned_space_reserve( - struct xfs_inode *ip, + struct xfs_mount *mp, xfs_filblks_t count_fsb, unsigned int flags, struct xfs_zone_alloc_ctx *ac) { - struct xfs_mount *mp = ip->i_mount; int error; ASSERT(ac->reserved_blocks == 0); @@ -234,11 +231,11 @@ xfs_zoned_space_reserve( error = xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, count_fsb, flags & XFS_ZR_RESERVED); if (error == -ENOSPC && (flags & XFS_ZR_GREEDY) && count_fsb > 1) - error = xfs_zoned_reserve_extents_greedy(ip, &count_fsb, flags); + error = xfs_zoned_reserve_extents_greedy(mp, &count_fsb, flags); if (error) return error; - error = xfs_zoned_reserve_available(ip, count_fsb, flags); + error = xfs_zoned_reserve_available(mp, count_fsb, flags); if (error) { xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, count_fsb); return error; @@ -249,12 +246,10 @@ xfs_zoned_space_reserve( void xfs_zoned_space_unreserve( - struct xfs_inode *ip, + struct xfs_mount *mp, struct xfs_zone_alloc_ctx *ac) { if (ac->reserved_blocks > 0) { - struct xfs_mount *mp = ip->i_mount; - xfs_zoned_add_available(mp, ac->reserved_blocks); xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, ac->reserved_blocks); } diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c index 42e2c0065bb3..fd3a5922f6c3 100644 --- a/fs/zonefs/file.c +++ b/fs/zonefs/file.c @@ -124,37 +124,46 @@ static void zonefs_readahead(struct readahead_control *rac) * Map blocks for page writeback. This is used only on conventional zone files, * which implies that the page range can only be within the fixed inode size. */ -static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc, - struct inode *inode, loff_t offset, - unsigned int len) +static ssize_t zonefs_writeback_range(struct iomap_writepage_ctx *wpc, + struct folio *folio, u64 offset, unsigned len, u64 end_pos) { - struct zonefs_zone *z = zonefs_inode_zone(inode); + struct zonefs_zone *z = zonefs_inode_zone(wpc->inode); if (WARN_ON_ONCE(zonefs_zone_is_seq(z))) return -EIO; - if (WARN_ON_ONCE(offset >= i_size_read(inode))) + if (WARN_ON_ONCE(offset >= i_size_read(wpc->inode))) return -EIO; /* If the mapping is already OK, nothing needs to be done */ - if (offset >= wpc->iomap.offset && - offset < wpc->iomap.offset + wpc->iomap.length) - return 0; + if (offset < wpc->iomap.offset || + offset >= wpc->iomap.offset + wpc->iomap.length) { + int error; + + error = zonefs_write_iomap_begin(wpc->inode, offset, + z->z_capacity - offset, IOMAP_WRITE, + &wpc->iomap, NULL); + if (error) + return error; + } - return zonefs_write_iomap_begin(inode, offset, - z->z_capacity - offset, - IOMAP_WRITE, &wpc->iomap, NULL); + return iomap_add_to_ioend(wpc, folio, offset, end_pos, len); } static const struct iomap_writeback_ops zonefs_writeback_ops = { - .map_blocks = zonefs_write_map_blocks, + .writeback_range = zonefs_writeback_range, + .writeback_submit = iomap_ioend_writeback_submit, }; static int zonefs_writepages(struct address_space *mapping, struct writeback_control *wbc) { - struct iomap_writepage_ctx wpc = { }; + struct iomap_writepage_ctx wpc = { + .inode = mapping->host, + .wbc = wbc, + .ops = &zonefs_writeback_ops, + }; - return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops); + return iomap_writepages(&wpc); } static int zonefs_swap_activate(struct swap_info_struct *sis, @@ -312,8 +321,10 @@ static const struct vm_operations_struct zonefs_file_vm_ops = { .page_mkwrite = zonefs_filemap_page_mkwrite, }; -static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int zonefs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; + /* * Conventional zones accept random writes, so their files can support * shared writable mappings. For sequential zone files, only read @@ -321,11 +332,11 @@ static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma) * ordering between msync() and page cache writeback. */ if (zonefs_inode_is_seq(file_inode(file)) && - (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) + (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) return -EINVAL; file_accessed(file); - vma->vm_ops = &zonefs_file_vm_ops; + desc->vm_ops = &zonefs_file_vm_ops; return 0; } @@ -563,7 +574,8 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, if (ret <= 0) goto inode_unlock; - ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL); + ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, + NULL, NULL); if (ret == -EIO) zonefs_io_error(inode, true); @@ -850,7 +862,7 @@ const struct file_operations zonefs_file_operations = { .open = zonefs_file_open, .release = zonefs_file_release, .fsync = zonefs_file_fsync, - .mmap = zonefs_file_mmap, + .mmap_prepare = zonefs_file_mmap_prepare, .llseek = zonefs_file_llseek, .read_iter = zonefs_file_read_iter, .write_iter = zonefs_file_write_iter, |