diff options
Diffstat (limited to 'fs/btrfs/ioctl.c')
| -rw-r--r-- | fs/btrfs/ioctl.c | 4814 |
1 files changed, 2224 insertions, 2590 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9c8e1734429c..acb484546b1d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -26,16 +26,18 @@ #include <linux/btrfs.h> #include <linux/uaccess.h> #include <linux/iversion.h> +#include <linux/fileattr.h> +#include <linux/fsverity.h> +#include <linux/sched/xacct.h> +#include <linux/io_uring/cmd.h> #include "ctree.h" #include "disk-io.h" +#include "export.h" #include "transaction.h" #include "btrfs_inode.h" -#include "print-tree.h" #include "volumes.h" #include "locking.h" -#include "inode-map.h" #include "backref.h" -#include "rcu-string.h" #include "send.h" #include "dev-replace.h" #include "props.h" @@ -43,6 +45,19 @@ #include "qgroup.h" #include "tree-log.h" #include "compression.h" +#include "space-info.h" +#include "block-group.h" +#include "fs.h" +#include "accessors.h" +#include "extent-tree.h" +#include "root-tree.h" +#include "defrag.h" +#include "dir-item.h" +#include "uuid-tree.h" +#include "ioctl.h" +#include "file.h" +#include "scrub.h" +#include "super.h" #ifdef CONFIG_64BIT /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI @@ -76,20 +91,35 @@ struct btrfs_ioctl_send_args_32 { compat_uptr_t clone_sources; /* in */ __u64 parent_root; /* in */ __u64 flags; /* in */ - __u64 reserved[4]; /* in */ + __u32 version; /* in */ + __u8 reserved[28]; /* in */ } __attribute__ ((__packed__)); #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \ struct btrfs_ioctl_send_args_32) -#endif -static int btrfs_clone(struct inode *src, struct inode *inode, - u64 off, u64 olen, u64 olen_aligned, u64 destoff, - int no_time_update); +struct btrfs_ioctl_encoded_io_args_32 { + compat_uptr_t iov; + compat_ulong_t iovcnt; + __s64 offset; + __u64 flags; + __u64 len; + __u64 unencoded_len; + __u64 unencoded_offset; + __u32 compression; + __u32 encryption; + __u8 reserved[64]; +}; + +#define BTRFS_IOC_ENCODED_READ_32 _IOR(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args_32) +#define BTRFS_IOC_ENCODED_WRITE_32 _IOW(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args_32) +#endif /* Mask out flags that are inappropriate for the given type of inode. */ -static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode, - unsigned int flags) +static unsigned int btrfs_mask_fsflags_for_type(const struct inode *inode, + unsigned int flags) { if (S_ISDIR(inode->i_mode)) return flags; @@ -103,9 +133,11 @@ static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode, * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS * ioctl. */ -static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags) +static unsigned int btrfs_inode_flags_to_fsflags(const struct btrfs_inode *inode) { unsigned int iflags = 0; + u32 flags = inode->flags; + u32 ro_flags = inode->ro_flags; if (flags & BTRFS_INODE_SYNC) iflags |= FS_SYNC_FL; @@ -121,6 +153,8 @@ static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags) iflags |= FS_DIRSYNC_FL; if (flags & BTRFS_INODE_NODATACOW) iflags |= FS_NOCOW_FL; + if (ro_flags & BTRFS_INODE_RO_VERITY) + iflags |= FS_VERITY_FL; if (flags & BTRFS_INODE_NOCOMPRESS) iflags |= FS_NOCOMP_FL; @@ -133,39 +167,33 @@ static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags) /* * Update inode->i_flags based on the btrfs internal flags. */ -void btrfs_sync_inode_flags_to_i_flags(struct inode *inode) +void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode) { - struct btrfs_inode *binode = BTRFS_I(inode); unsigned int new_fl = 0; - if (binode->flags & BTRFS_INODE_SYNC) + if (inode->flags & BTRFS_INODE_SYNC) new_fl |= S_SYNC; - if (binode->flags & BTRFS_INODE_IMMUTABLE) + if (inode->flags & BTRFS_INODE_IMMUTABLE) new_fl |= S_IMMUTABLE; - if (binode->flags & BTRFS_INODE_APPEND) + if (inode->flags & BTRFS_INODE_APPEND) new_fl |= S_APPEND; - if (binode->flags & BTRFS_INODE_NOATIME) + if (inode->flags & BTRFS_INODE_NOATIME) new_fl |= S_NOATIME; - if (binode->flags & BTRFS_INODE_DIRSYNC) + if (inode->flags & BTRFS_INODE_DIRSYNC) new_fl |= S_DIRSYNC; + if (inode->ro_flags & BTRFS_INODE_RO_VERITY) + new_fl |= S_VERITY; - set_mask_bits(&inode->i_flags, - S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC, - new_fl); + set_mask_bits(&inode->vfs_inode.i_flags, + S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC | + S_VERITY, new_fl); } -static int btrfs_ioctl_getflags(struct file *file, void __user *arg) -{ - struct btrfs_inode *binode = BTRFS_I(file_inode(file)); - unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags); - - if (copy_to_user(arg, &flags, sizeof(flags))) - return -EFAULT; - return 0; -} - -/* Check if @flags are a supported and valid set of FS_*_FL flags */ -static int check_fsflags(unsigned int flags) +/* + * Check if @flags are a supported and valid set of FS_*_FL flags and that + * the old and new flags are not conflicting + */ +static int check_fsflags(unsigned int old_flags, unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ @@ -174,104 +202,143 @@ static int check_fsflags(unsigned int flags) FS_NOCOW_FL)) return -EOPNOTSUPP; + /* COMPR and NOCOMP on new/old are valid */ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; + if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL)) + return -EINVAL; + + /* NOCOW and compression options are mutually exclusive */ + if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + return 0; } -static int btrfs_ioctl_setflags(struct file *file, void __user *arg) +static int check_fsflags_compatible(const struct btrfs_fs_info *fs_info, + unsigned int flags) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_inode *binode = BTRFS_I(inode); - struct btrfs_root *root = binode->root; + if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL)) + return -EPERM; + + return 0; +} + +int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args) +{ + if (memchr(vol_args->name, 0, sizeof(vol_args->name)) == NULL) + return -ENAMETOOLONG; + return 0; +} + +static int btrfs_check_ioctl_vol_args2_subvol_name(const struct btrfs_ioctl_vol_args_v2 *vol_args2) +{ + if (memchr(vol_args2->name, 0, sizeof(vol_args2->name)) == NULL) + return -ENAMETOOLONG; + return 0; +} + +/* + * Set flags/xflags from the internal inode flags. The remaining items of + * fsxattr are zeroed. + */ +int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) +{ + const struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); + + fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(inode)); + return 0; +} + +int btrfs_fileattr_set(struct mnt_idmap *idmap, + struct dentry *dentry, struct file_kattr *fa) +{ + struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; unsigned int fsflags, old_fsflags; int ret; - u64 old_flags; - unsigned int old_i_flags; - umode_t mode; - - if (!inode_owner_or_capable(inode)) - return -EPERM; + const char *comp = NULL; + u32 inode_flags; if (btrfs_root_readonly(root)) return -EROFS; - if (copy_from_user(&fsflags, arg, sizeof(fsflags))) - return -EFAULT; + if (fileattr_has_fsx(fa)) + return -EOPNOTSUPP; - ret = check_fsflags(fsflags); + fsflags = btrfs_mask_fsflags_for_type(&inode->vfs_inode, fa->flags); + old_fsflags = btrfs_inode_flags_to_fsflags(inode); + ret = check_fsflags(old_fsflags, fsflags); if (ret) return ret; - ret = mnt_want_write_file(file); + ret = check_fsflags_compatible(fs_info, fsflags); if (ret) return ret; - inode_lock(inode); - - old_flags = binode->flags; - old_i_flags = inode->i_flags; - mode = inode->i_mode; - - fsflags = btrfs_mask_fsflags_for_type(inode, fsflags); - old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags); - if ((fsflags ^ old_fsflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { - if (!capable(CAP_LINUX_IMMUTABLE)) { - ret = -EPERM; - goto out_unlock; - } - } - + inode_flags = inode->flags; if (fsflags & FS_SYNC_FL) - binode->flags |= BTRFS_INODE_SYNC; + inode_flags |= BTRFS_INODE_SYNC; else - binode->flags &= ~BTRFS_INODE_SYNC; + inode_flags &= ~BTRFS_INODE_SYNC; if (fsflags & FS_IMMUTABLE_FL) - binode->flags |= BTRFS_INODE_IMMUTABLE; + inode_flags |= BTRFS_INODE_IMMUTABLE; else - binode->flags &= ~BTRFS_INODE_IMMUTABLE; + inode_flags &= ~BTRFS_INODE_IMMUTABLE; if (fsflags & FS_APPEND_FL) - binode->flags |= BTRFS_INODE_APPEND; + inode_flags |= BTRFS_INODE_APPEND; else - binode->flags &= ~BTRFS_INODE_APPEND; + inode_flags &= ~BTRFS_INODE_APPEND; if (fsflags & FS_NODUMP_FL) - binode->flags |= BTRFS_INODE_NODUMP; + inode_flags |= BTRFS_INODE_NODUMP; else - binode->flags &= ~BTRFS_INODE_NODUMP; + inode_flags &= ~BTRFS_INODE_NODUMP; if (fsflags & FS_NOATIME_FL) - binode->flags |= BTRFS_INODE_NOATIME; + inode_flags |= BTRFS_INODE_NOATIME; else - binode->flags &= ~BTRFS_INODE_NOATIME; + inode_flags &= ~BTRFS_INODE_NOATIME; + + /* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */ + if (!fa->flags_valid) { + /* 1 item for the inode */ + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); + goto update_flags; + } + if (fsflags & FS_DIRSYNC_FL) - binode->flags |= BTRFS_INODE_DIRSYNC; + inode_flags |= BTRFS_INODE_DIRSYNC; else - binode->flags &= ~BTRFS_INODE_DIRSYNC; + inode_flags &= ~BTRFS_INODE_DIRSYNC; if (fsflags & FS_NOCOW_FL) { - if (S_ISREG(mode)) { + if (S_ISREG(inode->vfs_inode.i_mode)) { /* * It's safe to turn csums off here, no extents exist. * Otherwise we want the flag to reflect the real COW * status of the file and will not set it. */ - if (inode->i_size == 0) - binode->flags |= BTRFS_INODE_NODATACOW - | BTRFS_INODE_NODATASUM; + if (inode->vfs_inode.i_size == 0) + inode_flags |= BTRFS_INODE_NODATACOW | + BTRFS_INODE_NODATASUM; } else { - binode->flags |= BTRFS_INODE_NODATACOW; + inode_flags |= BTRFS_INODE_NODATACOW; } } else { /* * Revert back under same assumptions as above */ - if (S_ISREG(mode)) { - if (inode->i_size == 0) - binode->flags &= ~(BTRFS_INODE_NODATACOW - | BTRFS_INODE_NODATASUM); + if (S_ISREG(inode->vfs_inode.i_mode)) { + if (inode->vfs_inode.i_size == 0) + inode_flags &= ~(BTRFS_INODE_NODATACOW | + BTRFS_INODE_NODATASUM); } else { - binode->flags &= ~BTRFS_INODE_NODATACOW; + inode_flags &= ~BTRFS_INODE_NODATACOW; } } @@ -281,218 +348,68 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) * things smaller. */ if (fsflags & FS_NOCOMP_FL) { - binode->flags &= ~BTRFS_INODE_COMPRESS; - binode->flags |= BTRFS_INODE_NOCOMPRESS; - - ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); - if (ret && ret != -ENODATA) - goto out_drop; + inode_flags &= ~BTRFS_INODE_COMPRESS; + inode_flags |= BTRFS_INODE_NOCOMPRESS; } else if (fsflags & FS_COMPR_FL) { - const char *comp; - if (IS_SWAPFILE(inode)) { - ret = -ETXTBSY; - goto out_unlock; - } + if (IS_SWAPFILE(&inode->vfs_inode)) + return -ETXTBSY; - binode->flags |= BTRFS_INODE_COMPRESS; - binode->flags &= ~BTRFS_INODE_NOCOMPRESS; + inode_flags |= BTRFS_INODE_COMPRESS; + inode_flags &= ~BTRFS_INODE_NOCOMPRESS; comp = btrfs_compress_type2str(fs_info->compress_type); if (!comp || comp[0] == 0) comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB); - - ret = btrfs_set_prop(inode, "btrfs.compression", - comp, strlen(comp), 0); - if (ret) - goto out_drop; - } else { - ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); - if (ret && ret != -ENODATA) - goto out_drop; - binode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); - } - - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out_drop; + inode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - btrfs_sync_inode_flags_to_i_flags(inode); - inode_inc_iversion(inode); - inode->i_ctime = current_time(inode); - ret = btrfs_update_inode(trans, root, inode); - - btrfs_end_transaction(trans); - out_drop: - if (ret) { - binode->flags = old_flags; - inode->i_flags = old_i_flags; - } - - out_unlock: - inode_unlock(inode); - mnt_drop_write_file(file); - return ret; -} - -/* - * Translate btrfs internal inode flags to xflags as expected by the - * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are - * silently dropped. - */ -static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags) -{ - unsigned int xflags = 0; - - if (flags & BTRFS_INODE_APPEND) - xflags |= FS_XFLAG_APPEND; - if (flags & BTRFS_INODE_IMMUTABLE) - xflags |= FS_XFLAG_IMMUTABLE; - if (flags & BTRFS_INODE_NOATIME) - xflags |= FS_XFLAG_NOATIME; - if (flags & BTRFS_INODE_NODUMP) - xflags |= FS_XFLAG_NODUMP; - if (flags & BTRFS_INODE_SYNC) - xflags |= FS_XFLAG_SYNC; - - return xflags; -} - -/* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */ -static int check_xflags(unsigned int flags) -{ - if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME | - FS_XFLAG_NODUMP | FS_XFLAG_SYNC)) - return -EOPNOTSUPP; - return 0; -} - -/* - * Set the xflags from the internal inode flags. The remaining items of fsxattr - * are zeroed. - */ -static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg) -{ - struct btrfs_inode *binode = BTRFS_I(file_inode(file)); - struct fsxattr fa; - - memset(&fa, 0, sizeof(fa)); - fa.fsx_xflags = btrfs_inode_flags_to_xflags(binode->flags); - - if (copy_to_user(arg, &fa, sizeof(fa))) - return -EFAULT; - - return 0; -} - -static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg) -{ - struct inode *inode = file_inode(file); - struct btrfs_inode *binode = BTRFS_I(inode); - struct btrfs_root *root = binode->root; - struct btrfs_trans_handle *trans; - struct fsxattr fa; - unsigned old_flags; - unsigned old_i_flags; - int ret = 0; - - if (!inode_owner_or_capable(inode)) - return -EPERM; - - if (btrfs_root_readonly(root)) - return -EROFS; - - memset(&fa, 0, sizeof(fa)); - if (copy_from_user(&fa, arg, sizeof(fa))) - return -EFAULT; - - ret = check_xflags(fa.fsx_xflags); - if (ret) - return ret; - - if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0) - return -EOPNOTSUPP; - - ret = mnt_want_write_file(file); - if (ret) - return ret; - - inode_lock(inode); - - old_flags = binode->flags; - old_i_flags = inode->i_flags; - - /* We need the capabilities to change append-only or immutable inode */ - if (((old_flags & (BTRFS_INODE_APPEND | BTRFS_INODE_IMMUTABLE)) || - (fa.fsx_xflags & (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE))) && - !capable(CAP_LINUX_IMMUTABLE)) { - ret = -EPERM; - goto out_unlock; - } - - if (fa.fsx_xflags & FS_XFLAG_SYNC) - binode->flags |= BTRFS_INODE_SYNC; - else - binode->flags &= ~BTRFS_INODE_SYNC; - if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE) - binode->flags |= BTRFS_INODE_IMMUTABLE; - else - binode->flags &= ~BTRFS_INODE_IMMUTABLE; - if (fa.fsx_xflags & FS_XFLAG_APPEND) - binode->flags |= BTRFS_INODE_APPEND; - else - binode->flags &= ~BTRFS_INODE_APPEND; - if (fa.fsx_xflags & FS_XFLAG_NODUMP) - binode->flags |= BTRFS_INODE_NODUMP; - else - binode->flags &= ~BTRFS_INODE_NODUMP; - if (fa.fsx_xflags & FS_XFLAG_NOATIME) - binode->flags |= BTRFS_INODE_NOATIME; - else - binode->flags &= ~BTRFS_INODE_NOATIME; + /* + * 1 for inode item + * 2 for properties + */ + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) + return PTR_ERR(trans); - /* 1 item for the inode */ - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out_unlock; + if (comp) { + ret = btrfs_set_prop(trans, inode, "btrfs.compression", + comp, strlen(comp), 0); + if (unlikely(ret)) { + btrfs_abort_transaction(trans, ret); + goto out_end_trans; + } + } else { + ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL, 0, 0); + if (unlikely(ret && ret != -ENODATA)) { + btrfs_abort_transaction(trans, ret); + goto out_end_trans; + } } +update_flags: + inode->flags = inode_flags; + btrfs_update_inode_mapping_flags(inode); btrfs_sync_inode_flags_to_i_flags(inode); - inode_inc_iversion(inode); - inode->i_ctime = current_time(inode); - ret = btrfs_update_inode(trans, root, inode); + inode_inc_iversion(&inode->vfs_inode); + inode_set_ctime_current(&inode->vfs_inode); + ret = btrfs_update_inode(trans, inode); + out_end_trans: btrfs_end_transaction(trans); - -out_unlock: - if (ret) { - binode->flags = old_flags; - inode->i_flags = old_i_flags; - } - - inode_unlock(inode); - mnt_drop_write_file(file); - return ret; } -static int btrfs_ioctl_getversion(struct file *file, int __user *arg) +static int btrfs_ioctl_getversion(const struct inode *inode, int __user *arg) { - struct inode *inode = file_inode(file); - return put_user(inode->i_generation, arg); } -static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) +static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_device *device; - struct request_queue *q; struct fstrim_range range; u64 minlen = ULLONG_MAX; u64 num_devices = 0; @@ -501,17 +418,32 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + /* + * btrfs_trim_block_group() depends on space cache, which is not + * available in zoned filesystem. So, disallow fitrim on a zoned + * filesystem for now. + */ + if (btrfs_is_zoned(fs_info)) + return -EOPNOTSUPP; + + /* + * If the fs is mounted with nologreplay, which requires it to be + * mounted in RO mode as well, we can not allow discard on free space + * inside block groups, because log trees refer to extents that are not + * pinned in a block group's free space cache (pinning the extents is + * precisely the first phase of replaying a log tree). + */ + if (btrfs_test_opt(fs_info, NOLOGREPLAY)) + return -EROFS; + rcu_read_lock(); list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, dev_list) { - if (!device->bdev) + if (!device->bdev || !bdev_max_discard_sectors(device->bdev)) continue; - q = bdev_get_queue(device->bdev); - if (blk_queue_discard(q)) { - num_devices++; - minlen = min_t(u64, q->limits.discard_granularity, - minlen); - } + num_devices++; + minlen = min_t(u64, bdev_discard_granularity(device->bdev), + minlen); } rcu_read_unlock(); @@ -525,101 +457,128 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) * block group is in the logical address space, which can be any * sectorsize aligned bytenr in the range [0, U64_MAX]. */ - if (range.len < fs_info->sb->s_blocksize) + if (range.len < fs_info->sectorsize) return -EINVAL; range.minlen = max(range.minlen, minlen); ret = btrfs_trim_fs(fs_info, &range); - if (ret < 0) - return ret; if (copy_to_user(arg, &range, sizeof(range))) return -EFAULT; - return 0; + return ret; } -int btrfs_is_empty_uuid(u8 *uuid) +/* + * Calculate the number of transaction items to reserve for creating a subvolume + * or snapshot, not including the inode, directory entries, or parent directory. + */ +static unsigned int create_subvol_num_items(const struct btrfs_qgroup_inherit *inherit) { - int i; + /* + * 1 to add root block + * 1 to add root item + * 1 to add root ref + * 1 to add root backref + * 1 to add UUID item + * 1 to add qgroup info + * 1 to add qgroup limit + * + * Ideally the last two would only be accounted if qgroups are enabled, + * but that can change between now and the time we would insert them. + */ + unsigned int num_items = 7; - for (i = 0; i < BTRFS_UUID_SIZE; i++) { - if (uuid[i]) - return 0; + if (inherit) { + /* 2 to add qgroup relations for each inherited qgroup */ + num_items += 2 * inherit->num_qgroups; } - return 1; + return num_items; } -static noinline int create_subvol(struct inode *dir, - struct dentry *dentry, - const char *name, int namelen, - u64 *async_transid, +static noinline int create_subvol(struct mnt_idmap *idmap, + struct inode *dir, struct dentry *dentry, struct btrfs_qgroup_inherit *inherit) { - struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct btrfs_trans_handle *trans; struct btrfs_key key; - struct btrfs_root_item *root_item; + struct btrfs_root_item AUTO_KFREE(root_item); struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *new_root; struct btrfs_block_rsv block_rsv; struct timespec64 cur_time = current_time(dir); - struct inode *inode; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, + .dentry = dentry, + .subvol = true, + }; + unsigned int trans_num_items; int ret; - int err; + dev_t anon_dev; u64 objectid; - u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; - u64 index = 0; - uuid_le new_uuid; + u64 qgroup_reserved = 0; root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); if (!root_item) return -ENOMEM; - ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid); + ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid); if (ret) - goto fail_free; + return ret; /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assumes subvolume qgroup's level to be 0. */ - if (btrfs_qgroup_level(objectid)) { - ret = -ENOSPC; - goto fail_free; + if (btrfs_qgroup_level(objectid)) + return -ENOSPC; + + ret = get_anon_bdev(&anon_dev); + if (ret < 0) + return ret; + + new_inode_args.inode = btrfs_new_subvol_inode(idmap, dir); + if (!new_inode_args.inode) { + ret = -ENOMEM; + goto out_anon_dev; } + ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (ret) + goto out_inode; + trans_num_items += create_subvol_num_items(inherit); btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); - /* - * The same as the snapshot creation, please see the comment - * of create_snapshot(). - */ - ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false); + ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, + trans_num_items, false); if (ret) - goto fail_free; + goto out_new_inode_args; + qgroup_reserved = block_rsv.qgroup_rsv_reserved; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); - btrfs_subvolume_release_metadata(fs_info, &block_rsv); - goto fail_free; + goto out_release_rsv; } + btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); + qgroup_reserved = 0; trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; - ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit); + ret = btrfs_qgroup_inherit(trans, 0, objectid, btrfs_root_id(root), inherit); if (ret) - goto fail; + goto out; - leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); + leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, + 0, BTRFS_NESTING_NORMAL); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); - goto fail; + goto out; } - btrfs_mark_buffer_dirty(leaf); + btrfs_mark_buffer_dirty(trans, leaf); inode_item = &root_item->inode; btrfs_set_stack_inode_generation(inode_item, 1); @@ -642,8 +601,7 @@ static noinline int create_subvol(struct inode *dir, btrfs_set_root_generation_v2(root_item, btrfs_root_generation(root_item)); - uuid_le_gen(&new_uuid); - memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); + generate_random_guid(root_item->uuid); btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec); btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec); root_item->ctime = root_item->otime; @@ -651,112 +609,115 @@ static noinline int create_subvol(struct inode *dir, btrfs_set_root_otransid(root_item, trans->transid); btrfs_tree_unlock(leaf); - free_extent_buffer(leaf); - leaf = NULL; - btrfs_set_root_dirid(root_item, new_dirid); + btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID); key.objectid = objectid; - key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = 0; ret = btrfs_insert_root(trans, fs_info->tree_root, &key, root_item); - if (ret) - goto fail; + if (ret) { + int ret2; - key.offset = (u64)-1; - new_root = btrfs_read_fs_root_no_name(fs_info, &key); - if (IS_ERR(new_root)) { - ret = PTR_ERR(new_root); - btrfs_abort_transaction(trans, ret); - goto fail; + /* + * Since we don't abort the transaction in this case, free the + * tree block so that we don't leak space and leave the + * filesystem in an inconsistent state (an extent item in the + * extent tree with a backreference for a root that does not + * exists). + */ + btrfs_tree_lock(leaf); + btrfs_clear_buffer_dirty(trans, leaf); + btrfs_tree_unlock(leaf); + ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1); + if (unlikely(ret2 < 0)) + btrfs_abort_transaction(trans, ret2); + free_extent_buffer(leaf); + goto out; } - btrfs_record_root_in_trans(trans, new_root); + free_extent_buffer(leaf); + leaf = NULL; - ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid); - if (ret) { - /* We potentially lose an unused inode item here */ + new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev); + if (IS_ERR(new_root)) { + ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); - goto fail; + goto out; } + /* anon_dev is owned by new_root now. */ + anon_dev = 0; + BTRFS_I(new_inode_args.inode)->root = new_root; + /* ... and new_root is owned by new_inode_args.inode now. */ - mutex_lock(&new_root->objectid_mutex); - new_root->highest_objectid = new_dirid; - mutex_unlock(&new_root->objectid_mutex); - - /* - * insert the directory item - */ - ret = btrfs_set_inode_index(BTRFS_I(dir), &index); - if (ret) { + ret = btrfs_record_root_in_trans(trans, new_root); + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); - goto fail; + goto out; } - ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key, - BTRFS_FT_DIR, index); - if (ret) { + ret = btrfs_uuid_tree_add(trans, root_item->uuid, + BTRFS_UUID_KEY_SUBVOL, objectid); + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); - goto fail; + goto out; } - btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); - ret = btrfs_update_inode(trans, root, dir); - BUG_ON(ret); - - ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid, - btrfs_ino(BTRFS_I(dir)), index, name, namelen); - BUG_ON(ret); + btrfs_record_new_subvolume(trans, BTRFS_I(dir)); - ret = btrfs_uuid_tree_add(trans, root_item->uuid, - BTRFS_UUID_KEY_SUBVOL, objectid); - if (ret) + ret = btrfs_create_new_inode(trans, &new_inode_args); + if (unlikely(ret)) { btrfs_abort_transaction(trans, ret); + goto out; + } -fail: - kfree(root_item); + d_instantiate_new(dentry, new_inode_args.inode); + new_inode_args.inode = NULL; + +out: trans->block_rsv = NULL; trans->bytes_reserved = 0; - btrfs_subvolume_release_metadata(fs_info, &block_rsv); - - if (async_transid) { - *async_transid = trans->transid; - err = btrfs_commit_transaction_async(trans, 1); - if (err) - err = btrfs_commit_transaction(trans); - } else { - err = btrfs_commit_transaction(trans); - } - if (err && !ret) - ret = err; - - if (!ret) { - inode = btrfs_lookup_dentry(dir, dentry); - if (IS_ERR(inode)) - return PTR_ERR(inode); - d_instantiate(dentry, inode); - } - return ret; + btrfs_end_transaction(trans); +out_release_rsv: + btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); + if (qgroup_reserved) + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); +out_new_inode_args: + btrfs_new_inode_args_destroy(&new_inode_args); +out_inode: + iput(new_inode_args.inode); +out_anon_dev: + if (anon_dev) + free_anon_bdev(anon_dev); -fail_free: - kfree(root_item); return ret; } static int create_snapshot(struct btrfs_root *root, struct inode *dir, - struct dentry *dentry, - u64 *async_transid, bool readonly, + struct dentry *dentry, bool readonly, struct btrfs_qgroup_inherit *inherit) { - struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; + unsigned int trans_num_items; struct btrfs_trans_handle *trans; + struct btrfs_block_rsv *block_rsv; + u64 qgroup_reserved = 0; int ret; - bool snapshot_force_cow = false; - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + /* We do not support snapshotting right now. */ + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_warn(fs_info, + "extent tree v2 doesn't support snapshotting yet"); + return -EOPNOTSUPP; + } + + if (btrfs_root_refs(&root->root_item) == 0) + return -ENOENT; + + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return -EINVAL; if (atomic_read(&root->nr_swapfiles)) { @@ -769,6 +730,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (!pending_snapshot) return -ENOMEM; + ret = get_anon_bdev(&pending_snapshot->anon_dev); + if (ret < 0) + goto free_pending; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); @@ -777,51 +741,24 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto free_pending; } + block_rsv = &pending_snapshot->block_rsv; + btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP); /* - * Force new buffered writes to reserve space even when NOCOW is - * possible. This is to avoid later writeback (running dealloc) to - * fallback to COW mode and unexpectedly fail with ENOSPC. - */ - atomic_inc(&root->will_be_snapshotted); - smp_mb__after_atomic(); - /* wait for no snapshot writes */ - wait_event(root->subv_writers->wait, - percpu_counter_sum(&root->subv_writers->counter) == 0); - - ret = btrfs_start_delalloc_snapshot(root); - if (ret) - goto dec_and_free; - - /* - * All previous writes have started writeback in NOCOW mode, so now - * we force future writes to fallback to COW mode during snapshot - * creation. - */ - atomic_inc(&root->snapshot_force_cow); - snapshot_force_cow = true; - - btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); - - btrfs_init_block_rsv(&pending_snapshot->block_rsv, - BTRFS_BLOCK_RSV_TEMP); - /* - * 1 - parent dir inode - * 2 - dir entries - * 1 - root item - * 2 - root ref/backref - * 1 - root of snapshot - * 1 - UUID item + * 1 to add dir item + * 1 to add dir index + * 1 to update parent inode item */ - ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, - &pending_snapshot->block_rsv, 8, - false); + trans_num_items = create_subvol_num_items(inherit) + 3; + ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv, + trans_num_items, false); if (ret) - goto dec_and_free; + goto free_pending; + qgroup_reserved = block_rsv->qgroup_rsv_reserved; pending_snapshot->dentry = dentry; pending_snapshot->root = root; pending_snapshot->readonly = readonly; - pending_snapshot->dir = dir; + pending_snapshot->dir = BTRFS_I(dir); pending_snapshot->inherit = inherit; trans = btrfs_start_transaction(root, 0); @@ -829,19 +766,17 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ret = PTR_ERR(trans); goto fail; } - - spin_lock(&fs_info->trans_lock); - list_add(&pending_snapshot->list, - &trans->transaction->pending_snapshots); - spin_unlock(&fs_info->trans_lock); - if (async_transid) { - *async_transid = trans->transid; - ret = btrfs_commit_transaction_async(trans, 1); - if (ret) - ret = btrfs_commit_transaction(trans); - } else { - ret = btrfs_commit_transaction(trans); + ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root); + if (ret) { + btrfs_end_transaction(trans); + goto fail; } + btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); + qgroup_reserved = 0; + + trans->pending_snapshot = pending_snapshot; + + ret = btrfs_commit_transaction(trans); if (ret) goto fail; @@ -861,14 +796,18 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, d_instantiate(dentry, inode); ret = 0; + pending_snapshot->anon_dev = 0; fail: - btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); -dec_and_free: - if (snapshot_force_cow) - atomic_dec(&root->snapshot_force_cow); - if (atomic_dec_and_test(&root->will_be_snapshotted)) - wake_up_var(&root->will_be_snapshotted); + /* Prevent double freeing of anon_dev */ + if (ret && pending_snapshot->snap) + pending_snapshot->snap->anon_dev = 0; + btrfs_put_root(pending_snapshot->snap); + btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL); + if (qgroup_reserved) + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); free_pending: + if (pending_snapshot->anon_dev) + free_anon_bdev(pending_snapshot->anon_dev); kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); kfree(pending_snapshot); @@ -896,23 +835,27 @@ free_pending: * nfs_async_unlink(). */ -static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) +static int btrfs_may_delete(struct mnt_idmap *idmap, + struct inode *dir, struct dentry *victim, int isdir) { - int error; + int ret; if (d_really_is_negative(victim)) return -ENOENT; - BUG_ON(d_inode(victim->d_parent) != dir); + /* The @victim is not inside @dir. */ + if (d_inode(victim->d_parent) != dir) + return -EINVAL; audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); - error = inode_permission(dir, MAY_WRITE | MAY_EXEC); - if (error) - return error; + ret = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); + if (ret) + return ret; if (IS_APPEND(dir)) return -EPERM; - if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) || - IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim))) + if (check_sticky(idmap, dir, d_inode(victim)) || + IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) || + IS_SWAPFILE(d_inode(victim))) return -EPERM; if (isdir) { if (!d_is_dir(victim)) @@ -929,13 +872,16 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) } /* copy of may_create in fs/namei.c() */ -static inline int btrfs_may_create(struct inode *dir, struct dentry *child) +static inline int btrfs_may_create(struct mnt_idmap *idmap, + struct inode *dir, const struct dentry *child) { if (d_really_is_positive(child)) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; - return inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (!fsuidgid_has_mapping(dir->i_sb, idmap)) + return -EOVERFLOW; + return inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); } /* @@ -943,38 +889,32 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) * sys_mkdirat and vfs_mkdir, but we only do a single component lookup * inside this filesystem so it's quite a bit simpler. */ -static noinline int btrfs_mksubvol(const struct path *parent, - const char *name, int namelen, - struct btrfs_root *snap_src, - u64 *async_transid, bool readonly, +static noinline int btrfs_mksubvol(struct dentry *parent, + struct mnt_idmap *idmap, + struct qstr *qname, struct btrfs_root *snap_src, + bool readonly, struct btrfs_qgroup_inherit *inherit) { - struct inode *dir = d_inode(parent->dentry); - struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); + struct inode *dir = d_inode(parent); + struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct dentry *dentry; - int error; - - error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); - if (error == -EINTR) - return error; + struct fscrypt_str name_str = FSTR_INIT((char *)qname->name, qname->len); + int ret; - dentry = lookup_one_len(name, parent->dentry, namelen); - error = PTR_ERR(dentry); + dentry = start_creating_killable(idmap, parent, qname); if (IS_ERR(dentry)) - goto out_unlock; + return PTR_ERR(dentry); - error = btrfs_may_create(dir, dentry); - if (error) + ret = btrfs_may_create(idmap, dir, dentry); + if (ret) goto out_dput; /* * even if this name doesn't exist, we may get hash collisions. * check for them now when we can safely fail */ - error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, - dir->i_ino, name, - namelen); - if (error) + ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, &name_str); + if (ret) goto out_dput; down_read(&fs_info->subvol_sem); @@ -982,629 +922,115 @@ static noinline int btrfs_mksubvol(const struct path *parent, if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0) goto out_up_read; - if (snap_src) { - error = create_snapshot(snap_src, dir, dentry, - async_transid, readonly, inherit); - } else { - error = create_subvol(dir, dentry, name, namelen, - async_transid, inherit); - } - if (!error) + if (snap_src) + ret = create_snapshot(snap_src, dir, dentry, readonly, inherit); + else + ret = create_subvol(idmap, dir, dentry, inherit); + + if (!ret) fsnotify_mkdir(dir, dentry); out_up_read: up_read(&fs_info->subvol_sem); out_dput: - dput(dentry); -out_unlock: - inode_unlock(dir); - return error; -} - -/* - * When we're defragging a range, we don't want to kick it off again - * if it is really just waiting for delalloc to send it down. - * If we find a nice big extent or delalloc range for the bytes in the - * file you want to defrag, we return 0 to let you know to skip this - * part of the file - */ -static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh) -{ - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct extent_map *em = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - u64 end; - - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE); - read_unlock(&em_tree->lock); - - if (em) { - end = extent_map_end(em); - free_extent_map(em); - if (end - offset > thresh) - return 0; - } - /* if we already have a nice delalloc here, just stop */ - thresh /= 2; - end = count_range_bits(io_tree, &offset, offset + thresh, - thresh, EXTENT_DELALLOC, 1); - if (end >= thresh) - return 0; - return 1; + end_creating(dentry); + return ret; } -/* - * helper function to walk through a file and find extents - * newer than a specific transid, and smaller than thresh. - * - * This is used by the defragging code to find new and small - * extents - */ -static int find_new_extents(struct btrfs_root *root, - struct inode *inode, u64 newer_than, - u64 *off, u32 thresh) +static noinline int btrfs_mksnapshot(struct dentry *parent, + struct mnt_idmap *idmap, + struct qstr *qname, + struct btrfs_root *root, + bool readonly, + struct btrfs_qgroup_inherit *inherit) { - struct btrfs_path *path; - struct btrfs_key min_key; - struct extent_buffer *leaf; - struct btrfs_file_extent_item *extent; - int type; int ret; - u64 ino = btrfs_ino(BTRFS_I(inode)); - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - min_key.objectid = ino; - min_key.type = BTRFS_EXTENT_DATA_KEY; - min_key.offset = *off; - - while (1) { - ret = btrfs_search_forward(root, &min_key, path, newer_than); - if (ret != 0) - goto none; -process_slot: - if (min_key.objectid != ino) - goto none; - if (min_key.type != BTRFS_EXTENT_DATA_KEY) - goto none; - - leaf = path->nodes[0]; - extent = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - type = btrfs_file_extent_type(leaf, extent); - if (type == BTRFS_FILE_EXTENT_REG && - btrfs_file_extent_num_bytes(leaf, extent) < thresh && - check_defrag_in_cache(inode, min_key.offset, thresh)) { - *off = min_key.offset; - btrfs_free_path(path); - return 0; - } - - path->slots[0]++; - if (path->slots[0] < btrfs_header_nritems(leaf)) { - btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]); - goto process_slot; - } - - if (min_key.offset == (u64)-1) - goto none; - - min_key.offset++; - btrfs_release_path(path); - } -none: - btrfs_free_path(path); - return -ENOENT; -} - -static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) -{ - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct extent_map *em; - u64 len = PAGE_SIZE; /* - * hopefully we have this extent in the tree already, try without - * the full extent lock + * Force new buffered writes to reserve space even when NOCOW is + * possible. This is to avoid later writeback (running delalloc) to + * fallback to COW mode and unexpectedly fail with ENOSPC. */ - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, len); - read_unlock(&em_tree->lock); - - if (!em) { - struct extent_state *cached = NULL; - u64 end = start + len - 1; - - /* get the big lock and read metadata off disk */ - lock_extent_bits(io_tree, start, end, &cached); - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); - unlock_extent_cached(io_tree, start, end, &cached); - - if (IS_ERR(em)) - return NULL; - } - - return em; -} - -static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) -{ - struct extent_map *next; - bool ret = true; - - /* this is the last extent */ - if (em->start + em->len >= i_size_read(inode)) - return false; - - next = defrag_lookup_extent(inode, em->start + em->len); - if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) - ret = false; - else if ((em->block_start + em->block_len == next->block_start) && - (em->block_len > SZ_128K && next->block_len > SZ_128K)) - ret = false; + btrfs_drew_read_lock(&root->snapshot_lock); - free_extent_map(next); - return ret; -} - -static int should_defrag_range(struct inode *inode, u64 start, u32 thresh, - u64 *last_len, u64 *skip, u64 *defrag_end, - int compress) -{ - struct extent_map *em; - int ret = 1; - bool next_mergeable = true; - bool prev_mergeable = true; + ret = btrfs_start_delalloc_snapshot(root, false); + if (ret) + goto out; /* - * make sure that once we start defragging an extent, we keep on - * defragging it + * All previous writes have started writeback in NOCOW mode, so now + * we force future writes to fallback to COW mode during snapshot + * creation. */ - if (start < *defrag_end) - return 1; + atomic_inc(&root->snapshot_force_cow); - *skip = 0; + btrfs_wait_ordered_extents(root, U64_MAX, NULL); - em = defrag_lookup_extent(inode, start); - if (!em) - return 0; + ret = btrfs_mksubvol(parent, idmap, qname, root, readonly, inherit); - /* this will cover holes, and inline extents */ - if (em->block_start >= EXTENT_MAP_LAST_BYTE) { - ret = 0; - goto out; - } - - if (!*defrag_end) - prev_mergeable = false; - - next_mergeable = defrag_check_next_extent(inode, em); - /* - * we hit a real extent, if it is big or the next extent is not a - * real extent, don't bother defragging it - */ - if (!compress && (*last_len == 0 || *last_len >= thresh) && - (em->len >= thresh || (!next_mergeable && !prev_mergeable))) - ret = 0; + atomic_dec(&root->snapshot_force_cow); out: - /* - * last_len ends up being a counter of how many bytes we've defragged. - * every time we choose not to defrag an extent, we reset *last_len - * so that the next tiny extent will force a defrag. - * - * The end result of this is that tiny extents before a single big - * extent will force at least part of that big extent to be defragged. - */ - if (ret) { - *defrag_end = extent_map_end(em); - } else { - *last_len = 0; - *skip = extent_map_end(em); - *defrag_end = 0; - } - - free_extent_map(em); + btrfs_drew_read_unlock(&root->snapshot_lock); return ret; } /* - * it doesn't do much good to defrag one or two pages - * at a time. This pulls in a nice chunk of pages - * to COW and defrag. + * Try to start exclusive operation @type or cancel it if it's running. * - * It also makes sure the delalloc code has enough - * dirty data to avoid making new small extents as part - * of the defrag - * - * It's a good idea to start RA on this range - * before calling this. + * Return: + * 0 - normal mode, newly claimed op started + * >0 - normal mode, something else is running, + * return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space + * ECANCELED - cancel mode, successful cancel + * ENOTCONN - cancel mode, operation not running anymore */ -static int cluster_pages_for_defrag(struct inode *inode, - struct page **pages, - unsigned long start_index, - unsigned long num_pages) +static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info, + enum btrfs_exclusive_operation type, bool cancel) { - unsigned long file_end; - u64 isize = i_size_read(inode); - u64 page_start; - u64 page_end; - u64 page_cnt; - int ret; - int i; - int i_done; - struct btrfs_ordered_extent *ordered; - struct extent_state *cached_state = NULL; - struct extent_io_tree *tree; - struct extent_changeset *data_reserved = NULL; - gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); - - file_end = (isize - 1) >> PAGE_SHIFT; - if (!isize || start_index > file_end) + if (!cancel) { + /* Start normal op */ + if (!btrfs_exclop_start(fs_info, type)) + return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + /* Exclusive operation is now claimed */ return 0; - - page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); - - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, - start_index << PAGE_SHIFT, - page_cnt << PAGE_SHIFT); - if (ret) - return ret; - i_done = 0; - tree = &BTRFS_I(inode)->io_tree; - - /* step one, lock all the pages */ - for (i = 0; i < page_cnt; i++) { - struct page *page; -again: - page = find_or_create_page(inode->i_mapping, - start_index + i, mask); - if (!page) - break; - - page_start = page_offset(page); - page_end = page_start + PAGE_SIZE - 1; - while (1) { - lock_extent_bits(tree, page_start, page_end, - &cached_state); - ordered = btrfs_lookup_ordered_extent(inode, - page_start); - unlock_extent_cached(tree, page_start, page_end, - &cached_state); - if (!ordered) - break; - - unlock_page(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - lock_page(page); - /* - * we unlocked the page above, so we need check if - * it was released or not. - */ - if (page->mapping != inode->i_mapping) { - unlock_page(page); - put_page(page); - goto again; - } - } - - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - put_page(page); - ret = -EIO; - break; - } - } - - if (page->mapping != inode->i_mapping) { - unlock_page(page); - put_page(page); - goto again; - } - - pages[i] = page; - i_done++; } - if (!i_done || ret) - goto out; - if (!(inode->i_sb->s_flags & SB_ACTIVE)) - goto out; - - /* - * so now we have a nice long stream of locked - * and up to date pages, lets wait on them - */ - for (i = 0; i < i_done; i++) - wait_on_page_writeback(pages[i]); - - page_start = page_offset(pages[0]); - page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; - - lock_extent_bits(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, &cached_state); - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, - page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, - &cached_state); - - if (i_done != page_cnt) { - spin_lock(&BTRFS_I(inode)->lock); - btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); - spin_unlock(&BTRFS_I(inode)->lock); - btrfs_delalloc_release_space(inode, data_reserved, - start_index << PAGE_SHIFT, - (page_cnt - i_done) << PAGE_SHIFT, true); - } - - - set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, - &cached_state); - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, &cached_state); - - for (i = 0; i < i_done; i++) { - clear_page_dirty_for_io(pages[i]); - ClearPageChecked(pages[i]); - set_page_extent_mapped(pages[i]); - set_page_dirty(pages[i]); - unlock_page(pages[i]); - put_page(pages[i]); - } - btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, - false); - extent_changeset_free(data_reserved); - return i_done; -out: - for (i = 0; i < i_done; i++) { - unlock_page(pages[i]); - put_page(pages[i]); - } - btrfs_delalloc_release_space(inode, data_reserved, - start_index << PAGE_SHIFT, - page_cnt << PAGE_SHIFT, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, - true); - extent_changeset_free(data_reserved); - return ret; - -} - -int btrfs_defrag_file(struct inode *inode, struct file *file, - struct btrfs_ioctl_defrag_range_args *range, - u64 newer_than, unsigned long max_to_defrag) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; - struct file_ra_state *ra = NULL; - unsigned long last_index; - u64 isize = i_size_read(inode); - u64 last_len = 0; - u64 skip = 0; - u64 defrag_end = 0; - u64 newer_off = range->start; - unsigned long i; - unsigned long ra_index = 0; - int ret; - int defrag_count = 0; - int compress_type = BTRFS_COMPRESS_ZLIB; - u32 extent_thresh = range->extent_thresh; - unsigned long max_cluster = SZ_256K >> PAGE_SHIFT; - unsigned long cluster = max_cluster; - u64 new_align = ~((u64)SZ_128K - 1); - struct page **pages = NULL; - bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS; - - if (isize == 0) - return 0; - - if (range->start >= isize) - return -EINVAL; - - if (do_compress) { - if (range->compress_type > BTRFS_COMPRESS_TYPES) - return -EINVAL; - if (range->compress_type) - compress_type = range->compress_type; - } - - if (extent_thresh == 0) - extent_thresh = SZ_256K; - - /* - * If we were not given a file, allocate a readahead context. As - * readahead is just an optimization, defrag will work without it so - * we don't error out. - */ - if (!file) { - ra = kzalloc(sizeof(*ra), GFP_KERNEL); - if (ra) - file_ra_state_init(ra, inode->i_mapping); - } else { - ra = &file->f_ra; - } - - pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - goto out_ra; - } - - /* find the last page to defrag */ - if (range->start + range->len > range->start) { - last_index = min_t(u64, isize - 1, - range->start + range->len - 1) >> PAGE_SHIFT; - } else { - last_index = (isize - 1) >> PAGE_SHIFT; - } - - if (newer_than) { - ret = find_new_extents(root, inode, newer_than, - &newer_off, SZ_64K); - if (!ret) { - range->start = newer_off; - /* - * we always align our defrag to help keep - * the extents in the file evenly spaced - */ - i = (newer_off & new_align) >> PAGE_SHIFT; - } else - goto out_ra; - } else { - i = range->start >> PAGE_SHIFT; - } - if (!max_to_defrag) - max_to_defrag = last_index - i + 1; - - /* - * make writeback starts from i, so the defrag range can be - * written sequentially. - */ - if (i < inode->i_mapping->writeback_index) - inode->i_mapping->writeback_index = i; - - while (i <= last_index && defrag_count < max_to_defrag && - (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) { + /* Cancel running op */ + if (btrfs_exclop_start_try_lock(fs_info, type)) { /* - * make sure we stop running if someone unmounts - * the FS + * This blocks any exclop finish from setting it to NONE, so we + * request cancellation. Either it runs and we will wait for it, + * or it has finished and no waiting will happen. */ - if (!(inode->i_sb->s_flags & SB_ACTIVE)) - break; - - if (btrfs_defrag_cancelled(fs_info)) { - btrfs_debug(fs_info, "defrag_file cancelled"); - ret = -EAGAIN; - break; - } - - if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT, - extent_thresh, &last_len, &skip, - &defrag_end, do_compress)){ - unsigned long next; - /* - * the should_defrag function tells us how much to skip - * bump our counter by the suggested amount - */ - next = DIV_ROUND_UP(skip, PAGE_SIZE); - i = max(i + 1, next); - continue; - } - - if (!newer_than) { - cluster = (PAGE_ALIGN(defrag_end) >> - PAGE_SHIFT) - i; - cluster = min(cluster, max_cluster); - } else { - cluster = max_cluster; - } - - if (i + cluster > ra_index) { - ra_index = max(i, ra_index); - if (ra) - page_cache_sync_readahead(inode->i_mapping, ra, - file, ra_index, cluster); - ra_index += cluster; - } - - inode_lock(inode); - if (IS_SWAPFILE(inode)) { - ret = -ETXTBSY; - } else { - if (do_compress) - BTRFS_I(inode)->defrag_compress = compress_type; - ret = cluster_pages_for_defrag(inode, pages, i, cluster); - } - if (ret < 0) { - inode_unlock(inode); - goto out_ra; - } - - defrag_count += ret; - balance_dirty_pages_ratelimited(inode->i_mapping); - inode_unlock(inode); - - if (newer_than) { - if (newer_off == (u64)-1) - break; - - if (ret > 0) - i += ret; - - newer_off = max(newer_off + 1, - (u64)i << PAGE_SHIFT); - - ret = find_new_extents(root, inode, newer_than, - &newer_off, SZ_64K); - if (!ret) { - range->start = newer_off; - i = (newer_off & new_align) >> PAGE_SHIFT; - } else { - break; - } - } else { - if (ret > 0) { - i += ret; - last_len += ret << PAGE_SHIFT; - } else { - i++; - last_len = 0; - } - } - } + atomic_inc(&fs_info->reloc_cancel_req); + btrfs_exclop_start_unlock(fs_info); - if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) { - filemap_flush(inode->i_mapping); - if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, - &BTRFS_I(inode)->runtime_flags)) - filemap_flush(inode->i_mapping); - } + if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) + wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING, + TASK_INTERRUPTIBLE); - if (range->compress_type == BTRFS_COMPRESS_LZO) { - btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); - } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) { - btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); + return -ECANCELED; } - ret = defrag_count; - -out_ra: - if (do_compress) { - inode_lock(inode); - BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; - inode_unlock(inode); - } - if (!file) - kfree(ra); - kfree(pages); - return ret; + /* Something else is running or none */ + return -ENOTCONN; } static noinline int btrfs_ioctl_resize(struct file *file, void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + BTRFS_DEV_LOOKUP_ARGS(args); + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; + struct btrfs_fs_info *fs_info = root->fs_info; u64 new_size; u64 old_size; u64 devid = 1; - struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_vol_args *vol_args; - struct btrfs_trans_handle *trans; struct btrfs_device *device = NULL; char *sizestr; - char *retptr; char *devstr = NULL; int ret = 0; int mod = 0; + bool cancel; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1613,20 +1039,26 @@ static noinline int btrfs_ioctl_resize(struct file *file, if (ret) return ret; - if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { - mnt_drop_write_file(file); - return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; - } - + /* + * Read the arguments before checking exclusivity to be able to + * distinguish regular resize and cancel + */ vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); - goto out; + goto out_drop; } - - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + ret = btrfs_check_ioctl_vol_args_path(vol_args); + if (ret < 0) + goto out_free; sizestr = vol_args->name; + cancel = (strcmp("cancel", sizestr) == 0); + ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel); + if (ret) + goto out_free; + /* Exclusive operation is now claimed */ + devstr = strchr(sizestr, ':'); if (devstr) { sizestr = devstr + 1; @@ -1634,20 +1066,21 @@ static noinline int btrfs_ioctl_resize(struct file *file, devstr = vol_args->name; ret = kstrtoull(devstr, 10, &devid); if (ret) - goto out_free; + goto out_finish; if (!devid) { ret = -EINVAL; - goto out_free; + goto out_finish; } btrfs_info(fs_info, "resizing devid %llu", devid); } - device = btrfs_find_device(fs_info, devid, NULL, NULL); + args.devid = devid; + device = btrfs_find_device(fs_info->fs_devices, &args); if (!device) { btrfs_info(fs_info, "resizer unable to find device %llu", devid); ret = -ENODEV; - goto out_free; + goto out_finish; } if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { @@ -1655,12 +1088,14 @@ static noinline int btrfs_ioctl_resize(struct file *file, "resizer unable to apply on readonly device %llu", devid); ret = -EPERM; - goto out_free; + goto out_finish; } if (!strcmp(sizestr, "max")) - new_size = device->bdev->bd_inode->i_size; + new_size = bdev_nr_bytes(device->bdev); else { + char *retptr; + if (sizestr[0] == '-') { mod = -1; sizestr++; @@ -1671,13 +1106,13 @@ static noinline int btrfs_ioctl_resize(struct file *file, new_size = memparse(sizestr, &retptr); if (*retptr != '\0' || new_size == 0) { ret = -EINVAL; - goto out_free; + goto out_finish; } } if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = -EPERM; - goto out_free; + goto out_finish; } old_size = btrfs_device_get_total_bytes(device); @@ -1685,36 +1120,35 @@ static noinline int btrfs_ioctl_resize(struct file *file, if (mod < 0) { if (new_size > old_size) { ret = -EINVAL; - goto out_free; + goto out_finish; } new_size = old_size - new_size; } else if (mod > 0) { if (new_size > ULLONG_MAX - old_size) { ret = -ERANGE; - goto out_free; + goto out_finish; } new_size = old_size + new_size; } if (new_size < SZ_256M) { ret = -EINVAL; - goto out_free; + goto out_finish; } - if (new_size > device->bdev->bd_inode->i_size) { + if (new_size > bdev_nr_bytes(device->bdev)) { ret = -EFBIG; - goto out_free; + goto out_finish; } new_size = round_down(new_size, fs_info->sectorsize); - btrfs_info_in_rcu(fs_info, "new size for %s is %llu", - rcu_str_deref(device->name), new_size); - if (new_size > old_size) { + struct btrfs_trans_handle *trans; + trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); - goto out_free; + goto out_finish; } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans); @@ -1722,21 +1156,28 @@ static noinline int btrfs_ioctl_resize(struct file *file, ret = btrfs_shrink_device(device, new_size); } /* equal, nothing need to do */ + if (ret == 0 && new_size != old_size) + btrfs_info(fs_info, + "resize device %s (devid %llu) from %llu to %llu", + btrfs_dev_name(device), device->devid, + old_size, new_size); +out_finish: + btrfs_exclop_finish(fs_info); out_free: kfree(vol_args); -out: - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); +out_drop: mnt_drop_write_file(file); return ret; } -static noinline int btrfs_ioctl_snap_create_transid(struct file *file, - const char *name, unsigned long fd, int subvol, - u64 *transid, bool readonly, +static noinline int __btrfs_ioctl_snap_create(struct file *file, + struct mnt_idmap *idmap, + const char *name, unsigned long fd, bool subvol, + bool readonly, struct btrfs_qgroup_inherit *inherit) { - int namelen; int ret = 0; + struct qstr qname = QSTR_INIT(name, strlen(name)); if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; @@ -1745,46 +1186,53 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, if (ret) goto out; - namelen = strlen(name); if (strchr(name, '/')) { ret = -EINVAL; goto out_drop_write; } - if (name[0] == '.' && - (namelen == 1 || (name[1] == '.' && namelen == 2))) { + if (qname.name[0] == '.' && + (qname.len == 1 || (qname.name[1] == '.' && qname.len == 2))) { ret = -EEXIST; goto out_drop_write; } if (subvol) { - ret = btrfs_mksubvol(&file->f_path, name, namelen, - NULL, transid, readonly, inherit); + ret = btrfs_mksubvol(file_dentry(file), idmap, &qname, NULL, + readonly, inherit); } else { - struct fd src = fdget(fd); + CLASS(fd, src)(fd); struct inode *src_inode; - if (!src.file) { + if (fd_empty(src)) { ret = -EINVAL; goto out_drop_write; } - src_inode = file_inode(src.file); + src_inode = file_inode(fd_file(src)); if (src_inode->i_sb != file_inode(file)->i_sb) { btrfs_info(BTRFS_I(file_inode(file))->root->fs_info, "Snapshot src from another FS"); ret = -EXDEV; - } else if (!inode_owner_or_capable(src_inode)) { + } else if (!inode_owner_or_capable(idmap, src_inode)) { /* * Subvolume creation is not restricted, but snapshots * are limited to own subvolumes only */ ret = -EPERM; + } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) { + /* + * Snapshots must be made with the src_inode referring + * to the subvolume inode, otherwise the permission + * checking above is useless because we may have + * permission on a lower directory but not the subvol + * itself. + */ + ret = -EINVAL; } else { - ret = btrfs_mksubvol(&file->f_path, name, namelen, - BTRFS_I(src_inode)->root, - transid, readonly, inherit); + ret = btrfs_mksnapshot(file_dentry(file), idmap, &qname, + BTRFS_I(src_inode)->root, + readonly, inherit); } - fdput(src); } out_drop_write: mnt_drop_write_file(file); @@ -1793,7 +1241,7 @@ out: } static noinline int btrfs_ioctl_snap_create(struct file *file, - void __user *arg, int subvol) + void __user *arg, bool subvol) { struct btrfs_ioctl_vol_args *vol_args; int ret; @@ -1804,23 +1252,24 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + ret = btrfs_check_ioctl_vol_args_path(vol_args); + if (ret < 0) + goto out; - ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, - NULL, false, NULL); + ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file), + vol_args->name, vol_args->fd, subvol, + false, NULL); +out: kfree(vol_args); return ret; } static noinline int btrfs_ioctl_snap_create_v2(struct file *file, - void __user *arg, int subvol) + void __user *arg, bool subvol) { struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; - u64 transid = 0; - u64 *ptr = NULL; bool readonly = false; struct btrfs_qgroup_inherit *inherit = NULL; @@ -1830,21 +1279,22 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); - vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; + ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args); + if (ret < 0) + goto free_args; - if (vol_args->flags & - ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY | - BTRFS_SUBVOL_QGROUP_INHERIT)) { + if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) { ret = -EOPNOTSUPP; goto free_args; } - if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) - ptr = &transid; if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { - if (vol_args->size > PAGE_SIZE) { + struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); + + if (vol_args->size < sizeof(*inherit) || + vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } @@ -1853,20 +1303,17 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, ret = PTR_ERR(inherit); goto free_args; } + + ret = btrfs_qgroup_check_inherit(fs_info, inherit, vol_args->size); + if (ret < 0) + goto free_inherit; } - ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, ptr, - readonly, inherit); + ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file), + vol_args->name, vol_args->fd, subvol, + readonly, inherit); if (ret) goto free_inherit; - - if (ptr && copy_to_user(arg + - offsetof(struct btrfs_ioctl_vol_args_v2, - transid), - ptr, sizeof(*ptr))) - ret = -EFAULT; - free_inherit: kfree(inherit); free_args: @@ -1874,16 +1321,15 @@ free_args: return ret; } -static noinline int btrfs_ioctl_subvol_getflags(struct file *file, +static noinline int btrfs_ioctl_subvol_getflags(struct btrfs_inode *inode, void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; u64 flags = 0; - if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&fs_info->subvol_sem); @@ -1901,14 +1347,14 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 root_flags; u64 flags; int ret = 0; - if (!inode_owner_or_capable(inode)) + if (!inode_owner_or_capable(file_mnt_idmap(file), inode)) return -EPERM; ret = mnt_want_write_file(file); @@ -1925,11 +1371,6 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, goto out_drop_write; } - if (flags & BTRFS_SUBVOL_CREATE_ASYNC) { - ret = -EINVAL; - goto out_drop_write; - } - if (flags & ~BTRFS_SUBVOL_RDONLY) { ret = -EOPNOTSUPP; goto out_drop_write; @@ -1959,7 +1400,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, spin_unlock(&root->root_item_lock); btrfs_warn(fs_info, "Attempt to set subvolume %llu read-write during send", - root->root_key.objectid); + btrfs_root_id(root)); ret = -EPERM; goto out_drop_sem; } @@ -1991,8 +1432,8 @@ out: return ret; } -static noinline int key_in_sk(struct btrfs_key *key, - struct btrfs_ioctl_search_key *sk) +static noinline bool key_in_sk(const struct btrfs_key *key, + const struct btrfs_ioctl_search_key *sk) { struct btrfs_key test; int ret; @@ -2003,7 +1444,7 @@ static noinline int key_in_sk(struct btrfs_key *key, ret = btrfs_comp_cpu_keys(key, &test); if (ret < 0) - return 0; + return false; test.objectid = sk->max_objectid; test.type = sk->max_type; @@ -2011,14 +1452,14 @@ static noinline int key_in_sk(struct btrfs_key *key, ret = btrfs_comp_cpu_keys(key, &test); if (ret > 0) - return 0; - return 1; + return false; + return true; } static noinline int copy_to_sk(struct btrfs_path *path, struct btrfs_key *key, - struct btrfs_ioctl_search_key *sk, - size_t *buf_size, + const struct btrfs_ioctl_search_key *sk, + u64 *buf_size, char __user *ubuf, unsigned long *sk_offset, int *num_found) @@ -2046,7 +1487,7 @@ static noinline int copy_to_sk(struct btrfs_path *path, for (i = slot; i < nritems; i++) { item_off = btrfs_item_ptr_offset(leaf, i); - item_len = btrfs_item_size_nr(leaf, i); + item_len = btrfs_item_size(leaf, i); btrfs_item_key_to_cpu(leaf, key, i); if (!key_in_sk(key, sk)) @@ -2074,14 +1515,19 @@ static noinline int copy_to_sk(struct btrfs_path *path, } sh.objectid = key->objectid; - sh.offset = key->offset; sh.type = key->type; + sh.offset = key->offset; sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2089,10 +1535,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2139,15 +1589,14 @@ out: return ret; } -static noinline int search_ioctl(struct inode *inode, +static noinline int search_ioctl(struct btrfs_root *root, struct btrfs_ioctl_search_key *sk, - size_t *buf_size, + u64 *buf_size, char __user *ubuf) { - struct btrfs_fs_info *info = btrfs_sb(inode->i_sb); - struct btrfs_root *root; + struct btrfs_fs_info *info = root->fs_info; struct btrfs_key key; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); int ret; int num_found = 0; unsigned long sk_offset = 0; @@ -2162,17 +1611,13 @@ static noinline int search_ioctl(struct inode *inode, return -ENOMEM; if (sk->tree_id == 0) { - /* search the root of the inode that was passed */ - root = BTRFS_I(inode)->root; + /* Search the root that we got passed. */ + root = btrfs_grab_root(root); } else { - key.objectid = sk->tree_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_read_fs_root_no_name(info, &key); - if (IS_ERR(root)) { - btrfs_free_path(path); + /* Look up the root from the arguments. */ + root = btrfs_get_fs_root(info, sk->tree_id, true); + if (IS_ERR(root)) return PTR_ERR(root); - } } key.objectid = sk->min_objectid; @@ -2180,12 +1625,19 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { - ret = btrfs_search_forward(root, &key, path, sk->min_transid); - if (ret != 0) { - if (ret > 0) - ret = 0; - goto err; + /* + * Ensure that the whole user buffer is faulted in at sub-page + * granularity, otherwise the loop may live-lock. + */ + if (fault_in_subpage_writeable(ubuf + sk_offset, *buf_size - sk_offset)) { + ret = -EFAULT; + break; } + + ret = btrfs_search_forward(root, &key, path, sk->min_transid); + if (ret) + break; + ret = copy_to_sk(path, &key, sk, buf_size, ubuf, &sk_offset, &num_found); btrfs_release_path(path); @@ -2193,35 +1645,32 @@ static noinline int search_ioctl(struct inode *inode, break; } + /* Normalize return values from btrfs_search_forward() and copy_to_sk(). */ if (ret > 0) ret = 0; -err: + sk->nr_items = num_found; - btrfs_free_path(path); + btrfs_put_root(root); return ret; } -static noinline int btrfs_ioctl_tree_search(struct file *file, - void __user *argp) +static noinline int btrfs_ioctl_tree_search(struct btrfs_root *root, + void __user *argp) { - struct btrfs_ioctl_search_args __user *uargs; + struct btrfs_ioctl_search_args __user *uargs = argp; struct btrfs_ioctl_search_key sk; - struct inode *inode; int ret; - size_t buf_size; + u64 buf_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - uargs = (struct btrfs_ioctl_search_args __user *)argp; - if (copy_from_user(&sk, &uargs->key, sizeof(sk))) return -EFAULT; buf_size = sizeof(uargs->buf); - inode = file_inode(file); - ret = search_ioctl(inode, &sk, &buf_size, uargs->buf); + ret = search_ioctl(root, &sk, &buf_size, uargs->buf); /* * In the origin implementation an overflow is handled by returning a @@ -2235,21 +1684,19 @@ static noinline int btrfs_ioctl_tree_search(struct file *file, return ret; } -static noinline int btrfs_ioctl_tree_search_v2(struct file *file, +static noinline int btrfs_ioctl_tree_search_v2(struct btrfs_root *root, void __user *argp) { - struct btrfs_ioctl_search_args_v2 __user *uarg; + struct btrfs_ioctl_search_args_v2 __user *uarg = argp; struct btrfs_ioctl_search_args_v2 args; - struct inode *inode; int ret; - size_t buf_size; - const size_t buf_limit = SZ_16M; + u64 buf_size; + const u64 buf_limit = SZ_16M; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* copy search header and buffer size */ - uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp; if (copy_from_user(&args, uarg, sizeof(args))) return -EFAULT; @@ -2259,8 +1706,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file, if (buf_size > buf_limit) buf_size = buf_limit; - inode = file_inode(file); - ret = search_ioctl(inode, &args.key, &buf_size, + ret = search_ioctl(root, &args.key, &buf_size, (char __user *)(&uarg->buf[0])); if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key))) ret = -EFAULT; @@ -2287,7 +1733,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, int total_len = 0; struct btrfs_inode_ref *iref; struct extent_buffer *l; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); if (dirid == BTRFS_FIRST_FREE_OBJECTID) { name[0]='\0'; @@ -2300,12 +1746,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1]; - key.objectid = tree_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_read_fs_root_no_name(info, &key); + root = btrfs_get_fs_root(info, tree_id, true); if (IS_ERR(root)) { ret = PTR_ERR(root); + root = NULL; goto out; } @@ -2314,23 +1758,16 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, key.offset = (u64)-1; while (1) { - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + ret = btrfs_search_backwards(root, &key, path); if (ret < 0) goto out; else if (ret > 0) { - ret = btrfs_previous_item(root, path, dirid, - BTRFS_INODE_REF_KEY); - if (ret < 0) - goto out; - else if (ret > 0) { - ret = -ENOENT; - goto out; - } + ret = -ENOENT; + goto out; } l = path->nodes[0]; slot = path->slots[0]; - btrfs_item_key_to_cpu(l, &key, slot); iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(l, iref); @@ -2356,27 +1793,26 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, name[total_len] = '\0'; ret = 0; out: - btrfs_free_path(path); + btrfs_put_root(root); return ret; } -static int btrfs_search_path_in_tree_user(struct inode *inode, +static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap, + struct inode *inode, struct btrfs_ioctl_ino_lookup_user_args *args) { struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; - struct super_block *sb = inode->i_sb; - struct btrfs_key upper_limit = BTRFS_I(inode)->location; - u64 treeid = BTRFS_I(inode)->root->root_key.objectid; + u64 upper_limit = btrfs_ino(BTRFS_I(inode)); + u64 treeid = btrfs_root_id(BTRFS_I(inode)->root); u64 dirid = args->dirid; unsigned long item_off; unsigned long item_len; struct btrfs_inode_ref *iref; struct btrfs_root_ref *rref; - struct btrfs_root *root; - struct btrfs_path *path; - struct btrfs_key key, key2; + struct btrfs_root *root = NULL; + BTRFS_PATH_AUTO_FREE(path); + struct btrfs_key key; struct extent_buffer *leaf; - struct inode *temp_inode; char *ptr; int slot; int len; @@ -2391,39 +1827,29 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, * If the bottom subvolume does not exist directly under upper_limit, * construct the path in from the bottom up. */ - if (dirid != upper_limit.objectid) { + if (dirid != upper_limit) { ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1]; - key.objectid = treeid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_read_fs_root_no_name(fs_info, &key); - if (IS_ERR(root)) { - ret = PTR_ERR(root); - goto out; - } + root = btrfs_get_fs_root(fs_info, treeid, true); + if (IS_ERR(root)) + return PTR_ERR(root); key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; while (1) { - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - goto out; - } else if (ret > 0) { - ret = btrfs_previous_item(root, path, dirid, - BTRFS_INODE_REF_KEY); - if (ret < 0) { - goto out; - } else if (ret > 0) { - ret = -ENOENT; - goto out; - } + struct btrfs_inode *temp_inode; + + ret = btrfs_search_backwards(root, &key, path); + if (ret < 0) + goto out_put; + else if (ret > 0) { + ret = -ENOENT; + goto out_put; } leaf = path->nodes[0]; slot = path->slots[0]; - btrfs_item_key_to_cpu(leaf, &key, slot); iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(leaf, iref); @@ -2431,51 +1857,39 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, total_len += len + 1; if (ptr < args->path) { ret = -ENAMETOOLONG; - goto out; + goto out_put; } *(ptr + len) = '/'; read_extent_buffer(leaf, ptr, (unsigned long)(iref + 1), len); - /* Check the read+exec permission of this directory */ - ret = btrfs_previous_item(root, path, dirid, - BTRFS_INODE_ITEM_KEY); - if (ret < 0) { - goto out; - } else if (ret > 0) { - ret = -ENOENT; - goto out; - } - - leaf = path->nodes[0]; - slot = path->slots[0]; - btrfs_item_key_to_cpu(leaf, &key2, slot); - if (key2.objectid != dirid) { - ret = -ENOENT; - goto out; - } - - temp_inode = btrfs_iget(sb, &key2, root, NULL); + /* + * We don't need the path anymore, so release it and + * avoid deadlocks and lockdep warnings in case + * btrfs_iget() needs to lookup the inode from its root + * btree and lock the same leaf. + */ + btrfs_release_path(path); + temp_inode = btrfs_iget(key.offset, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); - goto out; - } - ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC); - iput(temp_inode); - if (ret) { - ret = -EACCES; - goto out; + goto out_put; } + /* Check the read+exec permission of this directory. */ + ret = inode_permission(idmap, &temp_inode->vfs_inode, + MAY_READ | MAY_EXEC); + iput(&temp_inode->vfs_inode); + if (ret) + goto out_put; - if (key.offset == upper_limit.objectid) + if (key.offset == upper_limit) break; if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) { ret = -EACCES; - goto out; + goto out_put; } - btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; @@ -2483,34 +1897,31 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, memmove(args->path, ptr, total_len); args->path[total_len] = '\0'; + btrfs_put_root(root); + root = NULL; btrfs_release_path(path); } /* Get the bottom subvolume's name from ROOT_REF */ - root = fs_info->tree_root; key.objectid = treeid; key.type = BTRFS_ROOT_REF_KEY; key.offset = args->treeid; - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - goto out; - } else if (ret > 0) { - ret = -ENOENT; - goto out; - } + ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); + if (ret < 0) + return ret; + else if (ret > 0) + return -ENOENT; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); item_off = btrfs_item_ptr_offset(leaf, slot); - item_len = btrfs_item_size_nr(leaf, slot); + item_len = btrfs_item_size(leaf, slot); /* Check if dirid in ROOT_REF corresponds to passed dirid */ rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref); - if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) { - ret = -EINVAL; - goto out; - } + if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) + return -EINVAL; /* Copy subvolume's name */ item_off += sizeof(struct btrfs_root_ref); @@ -2518,30 +1929,28 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, read_extent_buffer(leaf, args->name, item_off, item_len); args->name[item_len] = 0; -out: - btrfs_free_path(path); +out_put: + btrfs_put_root(root); + return ret; } -static noinline int btrfs_ioctl_ino_lookup(struct file *file, +static noinline int btrfs_ioctl_ino_lookup(struct btrfs_root *root, void __user *argp) { struct btrfs_ioctl_ino_lookup_args *args; - struct inode *inode; int ret = 0; args = memdup_user(argp, sizeof(*args)); if (IS_ERR(args)) return PTR_ERR(args); - inode = file_inode(file); - /* * Unprivileged query to obtain the containing subvolume root id. The * path is reset so it's consistent with btrfs_search_path_in_tree. */ if (args->treeid == 0) - args->treeid = BTRFS_I(inode)->root->root_key.objectid; + args->treeid = btrfs_root_id(root); if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) { args->name[0] = 0; @@ -2553,7 +1962,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file, goto out; } - ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, + ret = btrfs_search_path_in_tree(root->fs_info, args->treeid, args->objectid, args->name); @@ -2590,7 +1999,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp) inode = file_inode(file); if (args->dirid == BTRFS_FIRST_FREE_OBJECTID && - BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) { + btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { /* * The subvolume does not exist under fd with which this is * called @@ -2599,7 +2008,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp) return -EACCES; } - ret = btrfs_search_path_in_tree_user(inode, args); + ret = btrfs_search_path_in_tree_user(file_mnt_idmap(file), inode, args); if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT; @@ -2609,7 +2018,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp) } /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */ -static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) +static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp) { struct btrfs_ioctl_get_subvol_info_args *subvol_info; struct btrfs_fs_info *fs_info; @@ -2621,7 +2030,6 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) struct extent_buffer *leaf; unsigned long item_off; unsigned long item_len; - struct inode *inode; int slot; int ret = 0; @@ -2635,17 +2043,14 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) return -ENOMEM; } - inode = file_inode(file); fs_info = BTRFS_I(inode)->root->fs_info; /* Get root_item of inode's subvolume */ - key.objectid = BTRFS_I(inode)->root->root_key.objectid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_read_fs_root_no_name(fs_info, &key); + key.objectid = btrfs_root_id(BTRFS_I(inode)->root); + root = btrfs_get_fs_root(fs_info, key.objectid, true); if (IS_ERR(root)) { ret = PTR_ERR(root); - goto out; + goto out_free; } root_item = &root->root_item; @@ -2678,19 +2083,17 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) if (key.objectid != BTRFS_FS_TREE_OBJECTID) { /* Search root tree for ROOT_BACKREF of this subvolume */ - root = fs_info->tree_root; - key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = 0; - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); + ret = btrfs_next_leaf(fs_info->tree_root, path); if (ret < 0) { goto out; - } else if (ret > 0) { + } else if (unlikely(ret > 0)) { ret = -EUCLEAN; goto out; } @@ -2708,7 +2111,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) item_off = btrfs_item_ptr_offset(leaf, slot) + sizeof(struct btrfs_root_ref); - item_len = btrfs_item_size_nr(leaf, slot) + item_len = btrfs_item_size(leaf, slot) - sizeof(struct btrfs_root_ref); read_extent_buffer(leaf, subvol_info->name, item_off, item_len); @@ -2718,12 +2121,16 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) } } + btrfs_free_path(path); + path = NULL; if (copy_to_user(argp, subvol_info, sizeof(*subvol_info))) ret = -EFAULT; out: + btrfs_put_root(root); +out_free: btrfs_free_path(path); - kzfree(subvol_info); + kfree(subvol_info); return ret; } @@ -2731,15 +2138,14 @@ out: * Return ROOT_REF information of the subvolume containing this inode * except the subvolume name. */ -static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) +static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root, + void __user *argp) { struct btrfs_ioctl_get_subvol_rootref_args *rootrefs; struct btrfs_root_ref *rref; - struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *leaf; - struct inode *inode; u64 objectid; int slot; int ret; @@ -2755,15 +2161,13 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) return PTR_ERR(rootrefs); } - inode = file_inode(file); - root = BTRFS_I(inode)->root->fs_info->tree_root; - objectid = BTRFS_I(inode)->root->root_key.objectid; - + objectid = btrfs_root_id(root); key.objectid = objectid; key.type = BTRFS_ROOT_REF_KEY; key.offset = rootrefs->min_treeid; found = 0; + root = root->fs_info->tree_root; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { goto out; @@ -2772,7 +2176,7 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) ret = btrfs_next_leaf(root, path); if (ret < 0) { goto out; - } else if (ret > 0) { + } else if (unlikely(ret > 0)) { ret = -EUCLEAN; goto out; } @@ -2801,13 +2205,15 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) ret = btrfs_next_item(root, path); if (ret < 0) { goto out; - } else if (ret > 0) { + } else if (unlikely(ret > 0)) { ret = -EUCLEAN; goto out; } } out: + btrfs_free_path(path); + if (!ret || ret == -EOVERFLOW) { rootrefs->num_items = found; /* update min_treeid for next search */ @@ -2819,57 +2225,162 @@ out: } kfree(rootrefs); - btrfs_free_path(path); return ret; } static noinline int btrfs_ioctl_snap_destroy(struct file *file, - void __user *arg) + void __user *arg, + bool destroy_v2) { struct dentry *parent = file->f_path.dentry; - struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb); struct dentry *dentry; struct inode *dir = d_inode(parent); + struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct inode *inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *dest = NULL; - struct btrfs_ioctl_vol_args *vol_args; - int namelen; - int err = 0; + struct btrfs_ioctl_vol_args *vol_args = NULL; + struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL; + struct mnt_idmap *idmap = file_mnt_idmap(file); + char *subvol_name, *subvol_name_ptr = NULL; + int ret = 0; + bool destroy_parent = false; - if (!S_ISDIR(dir->i_mode)) - return -ENOTDIR; + /* We don't support snapshots with extent tree v2 yet. */ + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_err(fs_info, + "extent tree v2 doesn't support snapshot deletion yet"); + return -EOPNOTSUPP; + } - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) - return PTR_ERR(vol_args); + if (destroy_v2) { + vol_args2 = memdup_user(arg, sizeof(*vol_args2)); + if (IS_ERR(vol_args2)) + return PTR_ERR(vol_args2); - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - namelen = strlen(vol_args->name); - if (strchr(vol_args->name, '/') || - strncmp(vol_args->name, "..", namelen) == 0) { - err = -EINVAL; - goto out; - } + if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) { + ret = -EOPNOTSUPP; + goto out; + } - err = mnt_want_write_file(file); - if (err) - goto out; + /* + * If SPEC_BY_ID is not set, we are looking for the subvolume by + * name, same as v1 currently does. + */ + if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) { + ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args2); + if (ret < 0) + goto out; + subvol_name = vol_args2->name; + ret = mnt_want_write_file(file); + if (ret) + goto out; + } else { + struct inode *old_dir; - err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); - if (err == -EINTR) - goto out_drop_write; - dentry = lookup_one_len(vol_args->name, parent, namelen); - if (IS_ERR(dentry)) { - err = PTR_ERR(dentry); - goto out_unlock_dir; + if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) { + ret = -EINVAL; + goto out; + } + + ret = mnt_want_write_file(file); + if (ret) + goto out; + + dentry = btrfs_get_dentry(fs_info->sb, + BTRFS_FIRST_FREE_OBJECTID, + vol_args2->subvolid, 0); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + goto out_drop_write; + } + + /* + * Change the default parent since the subvolume being + * deleted can be outside of the current mount point. + */ + parent = btrfs_get_parent(dentry); + + /* + * At this point dentry->d_name can point to '/' if the + * subvolume we want to destroy is outsite of the + * current mount point, so we need to release the + * current dentry and execute the lookup to return a new + * one with ->d_name pointing to the + * <mount point>/subvol_name. + */ + dput(dentry); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + goto out_drop_write; + } + old_dir = dir; + dir = d_inode(parent); + + /* + * If v2 was used with SPEC_BY_ID, a new parent was + * allocated since the subvolume can be outside of the + * current mount point. Later on we need to release this + * new parent dentry. + */ + destroy_parent = true; + + /* + * On idmapped mounts, deletion via subvolid is + * restricted to subvolumes that are immediate + * ancestors of the inode referenced by the file + * descriptor in the ioctl. Otherwise the idmapping + * could potentially be abused to delete subvolumes + * anywhere in the filesystem the user wouldn't be able + * to delete without an idmapped mount. + */ + if (old_dir != dir && idmap != &nop_mnt_idmap) { + ret = -EOPNOTSUPP; + goto free_parent; + } + + subvol_name_ptr = btrfs_get_subvol_name_from_objectid( + fs_info, vol_args2->subvolid); + if (IS_ERR(subvol_name_ptr)) { + ret = PTR_ERR(subvol_name_ptr); + goto free_parent; + } + /* subvol_name_ptr is already nul terminated */ + subvol_name = (char *)kbasename(subvol_name_ptr); + } + } else { + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + + ret = btrfs_check_ioctl_vol_args_path(vol_args); + if (ret < 0) + goto out; + + subvol_name = vol_args->name; + + ret = mnt_want_write_file(file); + if (ret) + goto out; } - if (d_really_is_negative(dentry)) { - err = -ENOENT; - goto out_dput; + if (strchr(subvol_name, '/') || + strcmp(subvol_name, "..") == 0) { + ret = -EINVAL; + goto free_subvol_name; + } + + if (!S_ISDIR(dir->i_mode)) { + ret = -ENOTDIR; + goto free_subvol_name; + } + + dentry = start_removing_killable(idmap, parent, &QSTR(subvol_name)); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + goto out_end_removing; } inode = d_inode(dentry); @@ -2888,9 +2399,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, * Users who want to delete empty subvols should try * rmdir(2). */ - err = -EPERM; + ret = -EPERM; if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED)) - goto out_dput; + goto out_end_removing; /* * Do not allow deletion if the parent dir is the same @@ -2899,47 +2410,51 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, * of the subvol, not a random directory contained * within it. */ - err = -EINVAL; + ret = -EINVAL; if (root == dest) - goto out_dput; + goto out_end_removing; - err = inode_permission(inode, MAY_WRITE | MAY_EXEC); - if (err) - goto out_dput; + ret = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC); + if (ret) + goto out_end_removing; } /* check if subvolume may be deleted by a user */ - err = btrfs_may_delete(dir, dentry, 1); - if (err) - goto out_dput; + ret = btrfs_may_delete(idmap, dir, dentry, 1); + if (ret) + goto out_end_removing; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { - err = -EINVAL; - goto out_dput; + ret = -EINVAL; + goto out_end_removing; } - inode_lock(inode); - err = btrfs_delete_subvolume(dir, dentry); - inode_unlock(inode); - if (!err) - d_delete(dentry); - -out_dput: - dput(dentry); -out_unlock_dir: - inode_unlock(dir); + btrfs_inode_lock(BTRFS_I(inode), 0); + ret = btrfs_delete_subvolume(BTRFS_I(dir), dentry); + btrfs_inode_unlock(BTRFS_I(inode), 0); + if (!ret) + d_delete_notify(dir, dentry); + +out_end_removing: + end_removing(dentry); +free_subvol_name: + kfree(subvol_name_ptr); +free_parent: + if (destroy_parent) + dput(parent); out_drop_write: mnt_drop_write_file(file); out: + kfree(vol_args2); kfree(vol_args); - return err; + return ret; } static int btrfs_ioctl_defrag(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_ioctl_defrag_range_args *range; + struct btrfs_ioctl_defrag_range_args range = {0}; int ret; ret = mnt_want_write_file(file); @@ -2966,38 +2481,48 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) * running and allows defrag on files open in read-only mode. */ if (!capable(CAP_SYS_ADMIN) && - inode_permission(inode, MAY_WRITE)) { + inode_permission(&nop_mnt_idmap, inode, MAY_WRITE)) { ret = -EPERM; goto out; } - range = kzalloc(sizeof(*range), GFP_KERNEL); - if (!range) { - ret = -ENOMEM; + /* + * Don't allow defrag on pre-content watched files, as it could + * populate the page cache with 0's via readahead. + */ + if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { + ret = -EINVAL; goto out; } if (argp) { - if (copy_from_user(range, argp, - sizeof(*range))) { + if (copy_from_user(&range, argp, sizeof(range))) { ret = -EFAULT; - kfree(range); goto out; } - /* compression requires us to start the IO */ - if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { - range->flags |= BTRFS_DEFRAG_RANGE_START_IO; - range->extent_thresh = (u32)-1; + if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) { + ret = -EOPNOTSUPP; + goto out; + } + if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) && + (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) { + ret = -EINVAL; + goto out; + } + /* Compression or no-compression require to start the IO. */ + if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) || + (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) { + range.flags |= BTRFS_DEFRAG_RANGE_START_IO; + range.extent_thresh = (u32)-1; } } else { /* the rest are all set to zero by kzalloc */ - range->len = (u64)-1; + range.len = (u64)-1; } - ret = btrfs_defrag_file(file_inode(file), file, - range, BTRFS_OLDEST_GENERATION, 0); + ret = btrfs_defrag_file(BTRFS_I(file_inode(file)), &file->f_ra, + &range, BTRFS_OLDEST_GENERATION, 0); if (ret > 0) ret = 0; - kfree(range); break; default: ret = -EINVAL; @@ -3010,13 +2535,36 @@ out: static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; + bool restore_op = false; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) - return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_err(fs_info, "device add not supported on extent tree v2 yet"); + return -EINVAL; + } + + if (fs_info->fs_devices->temp_fsid) { + btrfs_err(fs_info, + "device add not supported on cloned temp-fsid mount"); + return -EINVAL; + } + + if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) { + if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD)) + return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + + /* + * We can do the device add because we have a paused balanced, + * change the exclusive op type and remember we should bring + * back the paused balance + */ + fs_info->exclusive_operation = BTRFS_EXCLOP_DEV_ADD; + btrfs_exclop_start_unlock(fs_info); + restore_op = true; + } vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { @@ -3024,56 +2572,74 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) goto out; } - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + ret = btrfs_check_ioctl_vol_args_path(vol_args); + if (ret < 0) + goto out_free; + ret = btrfs_init_new_device(fs_info, vol_args->name); if (!ret) btrfs_info(fs_info, "disk added %s", vol_args->name); +out_free: kfree(vol_args); out: - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); + if (restore_op) + btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); + else + btrfs_exclop_finish(fs_info); return ret; } static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) { + BTRFS_DEV_LOOKUP_ARGS(args); struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_vol_args_v2 *vol_args; + struct file *bdev_file = NULL; int ret; + bool cancel = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - ret = mnt_want_write_file(file); - if (ret) - return ret; - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) { - ret = PTR_ERR(vol_args); - goto err_drop; - } + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); - /* Check for compatibility reject unknown flags */ - if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) { + if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) { ret = -EOPNOTSUPP; goto out; } - if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { - ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args); + if (ret < 0) goto out; - } if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) { - ret = btrfs_rm_device(fs_info, NULL, vol_args->devid); + args.devid = vol_args->devid; + } else if (!strcmp("cancel", vol_args->name)) { + cancel = true; } else { - vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; - ret = btrfs_rm_device(fs_info, vol_args->name, 0); + ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name); + if (ret) + goto out; } - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); + + ret = mnt_want_write_file(file); + if (ret) + goto out; + + ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE, + cancel); + if (ret) + goto err_drop; + + /* Exclusive operation is now claimed */ + ret = btrfs_rm_device(fs_info, &args, &bdev_file); + + btrfs_exclop_finish(fs_info); if (!ret) { if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) @@ -3083,63 +2649,83 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) btrfs_info(fs_info, "device deleted: %s", vol_args->name); } -out: - kfree(vol_args); err_drop: mnt_drop_write_file(file); + if (bdev_file) + bdev_fput(bdev_file); +out: + btrfs_put_dev_args_from_path(&args); + kfree(vol_args); return ret; } static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) { + BTRFS_DEV_LOOKUP_ARGS(args); struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_vol_args *vol_args; + struct file *bdev_file = NULL; int ret; + bool cancel = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - ret = mnt_want_write_file(file); - if (ret) - return ret; + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); - if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { - ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; - goto out_drop_write; + ret = btrfs_check_ioctl_vol_args_path(vol_args); + if (ret < 0) + goto out_free; + + if (!strcmp("cancel", vol_args->name)) { + cancel = true; + } else { + ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name); + if (ret) + goto out; } - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) { - ret = PTR_ERR(vol_args); + ret = mnt_want_write_file(file); + if (ret) goto out; - } - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - ret = btrfs_rm_device(fs_info, vol_args->name, 0); + ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE, + cancel); + if (ret == 0) { + ret = btrfs_rm_device(fs_info, &args, &bdev_file); + if (!ret) + btrfs_info(fs_info, "disk deleted %s", vol_args->name); + btrfs_exclop_finish(fs_info); + } - if (!ret) - btrfs_info(fs_info, "disk deleted %s", vol_args->name); - kfree(vol_args); -out: - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); -out_drop_write: mnt_drop_write_file(file); - + if (bdev_file) + bdev_fput(bdev_file); +out: + btrfs_put_dev_args_from_path(&args); +out_free: + kfree(vol_args); return ret; } -static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, +static long btrfs_ioctl_fs_info(const struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + u64 flags_in; int ret = 0; - fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); - if (!fi_args) - return -ENOMEM; + fi_args = memdup_user(arg, sizeof(*fi_args)); + if (IS_ERR(fi_args)) + return PTR_ERR(fi_args); + + flags_in = fi_args->flags; + memset(fi_args, 0, sizeof(*fi_args)); rcu_read_lock(); fi_args->num_devices = fs_devices->num_devices; @@ -3155,6 +2741,23 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, fi_args->sectorsize = fs_info->sectorsize; fi_args->clone_alignment = fs_info->sectorsize; + if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) { + fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy); + fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy); + fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO; + } + + if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) { + fi_args->generation = btrfs_get_fs_generation(fs_info); + fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION; + } + + if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) { + memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid, + sizeof(fi_args->metadata_uuid)); + fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID; + } + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; @@ -3162,24 +2765,24 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, return ret; } -static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, +static long btrfs_ioctl_dev_info(const struct btrfs_fs_info *fs_info, void __user *arg) { + BTRFS_DEV_LOOKUP_ARGS(args); struct btrfs_ioctl_dev_info_args *di_args; struct btrfs_device *dev; int ret = 0; - char *s_uuid = NULL; di_args = memdup_user(arg, sizeof(*di_args)); if (IS_ERR(di_args)) return PTR_ERR(di_args); + args.devid = di_args->devid; if (!btrfs_is_empty_uuid(di_args->uuid)) - s_uuid = di_args->uuid; + args.uuid = di_args->uuid; rcu_read_lock(); - dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL); - + dev = btrfs_find_device(fs_info->fs_devices, &args); if (!dev) { ret = -ENODEV; goto out; @@ -3189,13 +2792,11 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, di_args->bytes_used = btrfs_device_get_bytes_used(dev); di_args->total_bytes = btrfs_device_get_total_bytes(dev); memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); - if (dev->name) { - strncpy(di_args->path, rcu_str_deref(dev->name), - sizeof(di_args->path) - 1); - di_args->path[sizeof(di_args->path) - 1] = 0; - } else { + memcpy(di_args->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); + if (dev->name) + strscpy(di_args->path, btrfs_dev_name(dev), sizeof(di_args->path)); + else di_args->path[0] = '\0'; - } out: rcu_read_unlock(); @@ -3206,885 +2807,17 @@ out: return ret; } -static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2) -{ - inode_unlock(inode1); - inode_unlock(inode2); -} - -static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2) -{ - if (inode1 < inode2) - swap(inode1, inode2); - - inode_lock_nested(inode1, I_MUTEX_PARENT); - inode_lock_nested(inode2, I_MUTEX_CHILD); -} - -static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) -{ - unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); -} - -static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) -{ - if (inode1 < inode2) { - swap(inode1, inode2); - swap(loff1, loff2); - } else if (inode1 == inode2 && loff2 < loff1) { - swap(loff1, loff2); - } - lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); -} - -static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, - struct inode *dst, u64 dst_loff) -{ - u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; - int ret; - u64 len = olen; - - if (loff + len == src->i_size) - len = ALIGN(src->i_size, bs) - loff; - /* - * For same inode case we don't want our length pushed out past i_size - * as comparing that data range makes no sense. - * - * This effectively means we require aligned extents for the single - * inode case, whereas the other cases allow an unaligned length so long - * as it ends at i_size. - */ - if (dst == src && len != olen) - return -EINVAL; - - /* - * Lock destination range to serialize with concurrent readpages() and - * source range to serialize with relocation. - */ - btrfs_double_extent_lock(src, loff, dst, dst_loff, len); - ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); - btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); - - return ret; -} - -#define BTRFS_MAX_DEDUPE_LEN SZ_16M - -static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, - struct inode *dst, u64 dst_loff) -{ - int ret; - int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT; - u64 i, tail_len, chunk_count; - - /* don't make the dst file partly checksummed */ - if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != - (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) - return -EINVAL; - - if (IS_SWAPFILE(src) || IS_SWAPFILE(dst)) - return -ETXTBSY; - - tail_len = olen % BTRFS_MAX_DEDUPE_LEN; - chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); - if (chunk_count == 0) - num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT; - - for (i = 0; i < chunk_count; i++) { - ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, - dst, dst_loff); - if (ret) - return ret; - - loff += BTRFS_MAX_DEDUPE_LEN; - dst_loff += BTRFS_MAX_DEDUPE_LEN; - } - - if (tail_len > 0) - ret = btrfs_extent_same_range(src, loff, tail_len, dst, - dst_loff); - - return ret; -} - -static int clone_finish_inode_update(struct btrfs_trans_handle *trans, - struct inode *inode, - u64 endoff, - const u64 destoff, - const u64 olen, - int no_time_update) -{ - struct btrfs_root *root = BTRFS_I(inode)->root; - int ret; - - inode_inc_iversion(inode); - if (!no_time_update) - inode->i_mtime = inode->i_ctime = current_time(inode); - /* - * We round up to the block size at eof when determining which - * extents to clone above, but shouldn't round up the file size. - */ - if (endoff > destoff + olen) - endoff = destoff + olen; - if (endoff > inode->i_size) - btrfs_i_size_write(BTRFS_I(inode), endoff); - - ret = btrfs_update_inode(trans, root, inode); - if (ret) { - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - ret = btrfs_end_transaction(trans); -out: - return ret; -} - -static void clone_update_extent_map(struct btrfs_inode *inode, - const struct btrfs_trans_handle *trans, - const struct btrfs_path *path, - const u64 hole_offset, - const u64 hole_len) -{ - struct extent_map_tree *em_tree = &inode->extent_tree; - struct extent_map *em; - int ret; - - em = alloc_extent_map(); - if (!em) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); - return; - } - - if (path) { - struct btrfs_file_extent_item *fi; - - fi = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - btrfs_extent_item_to_extent_map(inode, path, fi, false, em); - em->generation = -1; - if (btrfs_file_extent_type(path->nodes[0], fi) == - BTRFS_FILE_EXTENT_INLINE) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &inode->runtime_flags); - } else { - em->start = hole_offset; - em->len = hole_len; - em->ram_bytes = em->len; - em->orig_start = hole_offset; - em->block_start = EXTENT_MAP_HOLE; - em->block_len = 0; - em->orig_block_len = 0; - em->compress_type = BTRFS_COMPRESS_NONE; - em->generation = trans->transid; - } - - while (1) { - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em, 1); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(inode, em->start, - em->start + em->len - 1, 0); - } - - if (ret) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); -} - -/* - * Make sure we do not end up inserting an inline extent into a file that has - * already other (non-inline) extents. If a file has an inline extent it can - * not have any other extents and the (single) inline extent must start at the - * file offset 0. Failing to respect these rules will lead to file corruption, - * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc - * - * We can have extents that have been already written to disk or we can have - * dirty ranges still in delalloc, in which case the extent maps and items are - * created only when we run delalloc, and the delalloc ranges might fall outside - * the range we are currently locking in the inode's io tree. So we check the - * inode's i_size because of that (i_size updates are done while holding the - * i_mutex, which we are holding here). - * We also check to see if the inode has a size not greater than "datal" but has - * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are - * protected against such concurrent fallocate calls by the i_mutex). - * - * If the file has no extents but a size greater than datal, do not allow the - * copy because we would need turn the inline extent into a non-inline one (even - * with NO_HOLES enabled). If we find our destination inode only has one inline - * extent, just overwrite it with the source inline extent if its size is less - * than the source extent's size, or we could copy the source inline extent's - * data into the destination inode's inline extent if the later is greater then - * the former. - */ -static int clone_copy_inline_extent(struct inode *dst, - struct btrfs_trans_handle *trans, - struct btrfs_path *path, - struct btrfs_key *new_key, - const u64 drop_start, - const u64 datal, - const u64 skip, - const u64 size, - char *inline_data) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); - struct btrfs_root *root = BTRFS_I(dst)->root; - const u64 aligned_end = ALIGN(new_key->offset + datal, - fs_info->sectorsize); - int ret; - struct btrfs_key key; - - if (new_key->offset > 0) - return -EOPNOTSUPP; - - key.objectid = btrfs_ino(BTRFS_I(dst)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = 0; - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - return ret; - } else if (ret > 0) { - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - return ret; - else if (ret > 0) - goto copy_inline_extent; - } - btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid == btrfs_ino(BTRFS_I(dst)) && - key.type == BTRFS_EXTENT_DATA_KEY) { - ASSERT(key.offset > 0); - return -EOPNOTSUPP; - } - } else if (i_size_read(dst) <= datal) { - struct btrfs_file_extent_item *ei; - u64 ext_len; - - /* - * If the file size is <= datal, make sure there are no other - * extents following (can happen do to an fallocate call with - * the flag FALLOC_FL_KEEP_SIZE). - */ - ei = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - /* - * If it's an inline extent, it can not have other extents - * following it. - */ - if (btrfs_file_extent_type(path->nodes[0], ei) == - BTRFS_FILE_EXTENT_INLINE) - goto copy_inline_extent; - - ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei); - if (ext_len > aligned_end) - return -EOPNOTSUPP; - - ret = btrfs_next_item(root, path); - if (ret < 0) { - return ret; - } else if (ret == 0) { - btrfs_item_key_to_cpu(path->nodes[0], &key, - path->slots[0]); - if (key.objectid == btrfs_ino(BTRFS_I(dst)) && - key.type == BTRFS_EXTENT_DATA_KEY) - return -EOPNOTSUPP; - } - } - -copy_inline_extent: - /* - * We have no extent items, or we have an extent at offset 0 which may - * or may not be inlined. All these cases are dealt the same way. - */ - if (i_size_read(dst) > datal) { - /* - * If the destination inode has an inline extent... - * This would require copying the data from the source inline - * extent into the beginning of the destination's inline extent. - * But this is really complex, both extents can be compressed - * or just one of them, which would require decompressing and - * re-compressing data (which could increase the new compressed - * size, not allowing the compressed data to fit anymore in an - * inline extent). - * So just don't support this case for now (it should be rare, - * we are not really saving space when cloning inline extents). - */ - return -EOPNOTSUPP; - } - - btrfs_release_path(path); - ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); - if (ret) - return ret; - ret = btrfs_insert_empty_item(trans, root, path, new_key, size); - if (ret) - return ret; - - if (skip) { - const u32 start = btrfs_file_extent_calc_inline_size(0); - - memmove(inline_data + start, inline_data + start + skip, datal); - } - - write_extent_buffer(path->nodes[0], inline_data, - btrfs_item_ptr_offset(path->nodes[0], - path->slots[0]), - size); - inode_add_bytes(dst, datal); - - return 0; -} - -/** - * btrfs_clone() - clone a range from inode file to another - * - * @src: Inode to clone from - * @inode: Inode to clone to - * @off: Offset within source to start clone from - * @olen: Original length, passed by user, of range to clone - * @olen_aligned: Block-aligned value of olen - * @destoff: Offset within @inode to start clone - * @no_time_update: Whether to update mtime/ctime on the target inode - */ -static int btrfs_clone(struct inode *src, struct inode *inode, - const u64 off, const u64 olen, const u64 olen_aligned, - const u64 destoff, int no_time_update) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_path *path = NULL; - struct extent_buffer *leaf; - struct btrfs_trans_handle *trans; - char *buf = NULL; - struct btrfs_key key; - u32 nritems; - int slot; - int ret; - const u64 len = olen_aligned; - u64 last_dest_end = destoff; - - ret = -ENOMEM; - buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); - if (!buf) - return ret; - - path = btrfs_alloc_path(); - if (!path) { - kvfree(buf); - return ret; - } - - path->reada = READA_FORWARD; - /* clone data */ - key.objectid = btrfs_ino(BTRFS_I(src)); - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = off; - - while (1) { - u64 next_key_min_offset = key.offset + 1; - - /* - * note the key will change type as we walk through the - * tree. - */ - path->leave_spinning = 1; - ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, - 0, 0); - if (ret < 0) - goto out; - /* - * First search, if no extent item that starts at offset off was - * found but the previous item is an extent item, it's possible - * it might overlap our target range, therefore process it. - */ - if (key.offset == off && ret > 0 && path->slots[0] > 0) { - btrfs_item_key_to_cpu(path->nodes[0], &key, - path->slots[0] - 1); - if (key.type == BTRFS_EXTENT_DATA_KEY) - path->slots[0]--; - } - - nritems = btrfs_header_nritems(path->nodes[0]); -process_slot: - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(BTRFS_I(src)->root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - nritems = btrfs_header_nritems(path->nodes[0]); - } - leaf = path->nodes[0]; - slot = path->slots[0]; - - btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.type > BTRFS_EXTENT_DATA_KEY || - key.objectid != btrfs_ino(BTRFS_I(src))) - break; - - if (key.type == BTRFS_EXTENT_DATA_KEY) { - struct btrfs_file_extent_item *extent; - int type; - u32 size; - struct btrfs_key new_key; - u64 disko = 0, diskl = 0; - u64 datao = 0, datal = 0; - u8 comp; - u64 drop_start; - - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - comp = btrfs_file_extent_compression(leaf, extent); - type = btrfs_file_extent_type(leaf, extent); - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - disko = btrfs_file_extent_disk_bytenr(leaf, - extent); - diskl = btrfs_file_extent_disk_num_bytes(leaf, - extent); - datao = btrfs_file_extent_offset(leaf, extent); - datal = btrfs_file_extent_num_bytes(leaf, - extent); - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - /* take upper bound, may be compressed */ - datal = btrfs_file_extent_ram_bytes(leaf, - extent); - } - - /* - * The first search might have left us at an extent - * item that ends before our target range's start, can - * happen if we have holes and NO_HOLES feature enabled. - */ - if (key.offset + datal <= off) { - path->slots[0]++; - goto process_slot; - } else if (key.offset >= off + len) { - break; - } - next_key_min_offset = key.offset + datal; - size = btrfs_item_size_nr(leaf, slot); - read_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - - btrfs_release_path(path); - path->leave_spinning = 0; - - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = btrfs_ino(BTRFS_I(inode)); - if (off <= key.offset) - new_key.offset = key.offset + destoff - off; - else - new_key.offset = destoff; - - /* - * Deal with a hole that doesn't have an extent item - * that represents it (NO_HOLES feature enabled). - * This hole is either in the middle of the cloning - * range or at the beginning (fully overlaps it or - * partially overlaps it). - */ - if (new_key.offset != last_dest_end) - drop_start = last_dest_end; - else - drop_start = new_key.offset; - - /* - * 1 - adjusting old extent (we may have to split it) - * 1 - add new extent - * 1 - inode update - */ - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - /* - * a | --- range to clone ---| b - * | ------------- extent ------------- | - */ - - /* subtract range b */ - if (key.offset + datal > off + len) - datal = off + len - key.offset; - - /* subtract range a */ - if (off > key.offset) { - datao += off - key.offset; - datal -= off - key.offset; - } - - ret = btrfs_drop_extents(trans, root, inode, - drop_start, - new_key.offset + datal, - 1); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); - goto out; - } - - ret = btrfs_insert_empty_item(trans, root, path, - &new_key, size); - if (ret) { - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - - leaf = path->nodes[0]; - slot = path->slots[0]; - write_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - - /* disko == 0 means it's a hole */ - if (!disko) - datao = 0; - - btrfs_set_file_extent_offset(leaf, extent, - datao); - btrfs_set_file_extent_num_bytes(leaf, extent, - datal); - - if (disko) { - inode_add_bytes(inode, datal); - ret = btrfs_inc_extent_ref(trans, - root, - disko, diskl, 0, - root->root_key.objectid, - btrfs_ino(BTRFS_I(inode)), - new_key.offset - datao); - if (ret) { - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); - goto out; - - } - } - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - u64 skip = 0; - u64 trim = 0; - - if (off > key.offset) { - skip = off - key.offset; - new_key.offset += skip; - } - - if (key.offset + datal > off + len) - trim = key.offset + datal - (off + len); - - if (comp && (skip || trim)) { - ret = -EINVAL; - btrfs_end_transaction(trans); - goto out; - } - size -= skip + trim; - datal -= skip + trim; - - ret = clone_copy_inline_extent(inode, - trans, path, - &new_key, - drop_start, - datal, - skip, size, buf); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); - goto out; - } - leaf = path->nodes[0]; - slot = path->slots[0]; - } - - /* If we have an implicit hole (NO_HOLES feature). */ - if (drop_start < new_key.offset) - clone_update_extent_map(BTRFS_I(inode), trans, - NULL, drop_start, - new_key.offset - drop_start); - - clone_update_extent_map(BTRFS_I(inode), trans, - path, 0, 0); - - btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(path); - - last_dest_end = ALIGN(new_key.offset + datal, - fs_info->sectorsize); - ret = clone_finish_inode_update(trans, inode, - last_dest_end, - destoff, olen, - no_time_update); - if (ret) - goto out; - if (new_key.offset + datal >= destoff + len) - break; - } - btrfs_release_path(path); - key.offset = next_key_min_offset; - - if (fatal_signal_pending(current)) { - ret = -EINTR; - goto out; - } - } - ret = 0; - - if (last_dest_end < destoff + len) { - /* - * We have an implicit hole (NO_HOLES feature is enabled) that - * fully or partially overlaps our cloning range at its end. - */ - btrfs_release_path(path); - - /* - * 1 - remove extent(s) - * 1 - inode update - */ - trans = btrfs_start_transaction(root, 2); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - ret = btrfs_drop_extents(trans, root, inode, - last_dest_end, destoff + len, 1); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - clone_update_extent_map(BTRFS_I(inode), trans, NULL, - last_dest_end, - destoff + len - last_dest_end); - ret = clone_finish_inode_update(trans, inode, destoff + len, - destoff, olen, no_time_update); - } - -out: - btrfs_free_path(path); - kvfree(buf); - return ret; -} - -static noinline int btrfs_clone_files(struct file *file, struct file *file_src, - u64 off, u64 olen, u64 destoff) -{ - struct inode *inode = file_inode(file); - struct inode *src = file_inode(file_src); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int ret; - u64 len = olen; - u64 bs = fs_info->sb->s_blocksize; - - /* - * TODO: - * - split compressed inline extents. annoying: we need to - * decompress into destination's address_space (the file offset - * may change, so source mapping won't do), then recompress (or - * otherwise reinsert) a subrange. - * - * - split destination inode's inline extents. The inline extents can - * be either compressed or non-compressed. - */ - - /* don't make the dst file partly checksummed */ - if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != - (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) - return -EINVAL; - - if (IS_SWAPFILE(src) || IS_SWAPFILE(inode)) - return -ETXTBSY; - - /* - * VFS's generic_remap_file_range_prep() protects us from cloning the - * eof block into the middle of a file, which would result in corruption - * if the file size is not blocksize aligned. So we don't need to check - * for that case here. - */ - if (off + len == src->i_size) - len = ALIGN(src->i_size, bs) - off; - - if (destoff > inode->i_size) { - const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); - - ret = btrfs_cont_expand(inode, inode->i_size, destoff); - if (ret) - return ret; - /* - * We may have truncated the last block if the inode's size is - * not sector size aligned, so we need to wait for writeback to - * complete before proceeding further, otherwise we can race - * with cloning and attempt to increment a reference to an - * extent that no longer exists (writeback completed right after - * we found the previous extent covering eof and before we - * attempted to increment its reference count). - */ - ret = btrfs_wait_ordered_range(inode, wb_start, - destoff - wb_start); - if (ret) - return ret; - } - - /* - * Lock destination range to serialize with concurrent readpages() and - * source range to serialize with relocation. - */ - btrfs_double_extent_lock(src, off, inode, destoff, len); - ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); - btrfs_double_extent_unlock(src, off, inode, destoff, len); - /* - * Truncate page cache pages so that future reads will see the cloned - * data immediately and not the previous data. - */ - truncate_inode_pages_range(&inode->i_data, - round_down(destoff, PAGE_SIZE), - round_up(destoff + len, PAGE_SIZE) - 1); - - return ret; -} - -static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *len, unsigned int remap_flags) -{ - struct inode *inode_in = file_inode(file_in); - struct inode *inode_out = file_inode(file_out); - u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; - bool same_inode = inode_out == inode_in; - u64 wb_len; - int ret; - - if (!(remap_flags & REMAP_FILE_DEDUP)) { - struct btrfs_root *root_out = BTRFS_I(inode_out)->root; - - if (btrfs_root_readonly(root_out)) - return -EROFS; - - if (file_in->f_path.mnt != file_out->f_path.mnt || - inode_in->i_sb != inode_out->i_sb) - return -EXDEV; - } - - if (same_inode) - inode_lock(inode_in); - else - btrfs_double_inode_lock(inode_in, inode_out); - - /* - * Now that the inodes are locked, we need to start writeback ourselves - * and can not rely on the writeback from the VFS's generic helper - * generic_remap_file_range_prep() because: - * - * 1) For compression we must call filemap_fdatawrite_range() range - * twice (btrfs_fdatawrite_range() does it for us), and the generic - * helper only calls it once; - * - * 2) filemap_fdatawrite_range(), called by the generic helper only - * waits for the writeback to complete, i.e. for IO to be done, and - * not for the ordered extents to complete. We need to wait for them - * to complete so that new file extent items are in the fs tree. - */ - if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) - wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); - else - wb_len = ALIGN(*len, bs); - - /* - * Since we don't lock ranges, wait for ongoing lockless dio writes (as - * any in progress could create its ordered extents after we wait for - * existing ordered extents below). - */ - inode_dio_wait(inode_in); - if (!same_inode) - inode_dio_wait(inode_out); - - ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), - wb_len); - if (ret < 0) - goto out_unlock; - ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), - wb_len); - if (ret < 0) - goto out_unlock; - - ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, - len, remap_flags); - if (ret < 0 || *len == 0) - goto out_unlock; - - return 0; - - out_unlock: - if (same_inode) - inode_unlock(inode_in); - else - btrfs_double_inode_unlock(inode_in, inode_out); - - return ret; -} - -loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, - struct file *dst_file, loff_t destoff, loff_t len, - unsigned int remap_flags) -{ - struct inode *src_inode = file_inode(src_file); - struct inode *dst_inode = file_inode(dst_file); - bool same_inode = dst_inode == src_inode; - int ret; - - if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) - return -EINVAL; - - ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, - &len, remap_flags); - if (ret < 0 || len == 0) - return ret; - - if (remap_flags & REMAP_FILE_DEDUP) - ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); - else - ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); - - if (same_inode) - inode_unlock(src_inode); - else - btrfs_double_inode_unlock(src_inode, dst_inode); - - return ret < 0 ? ret : len; -} - static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *new_root; struct btrfs_dir_item *di; struct btrfs_trans_handle *trans; - struct btrfs_path *path; - struct btrfs_key location; + struct btrfs_path *path = NULL; struct btrfs_disk_key disk_key; + struct fscrypt_str name = FSTR_INIT("default", 7); u64 objectid = 0; u64 dir_id; int ret; @@ -4104,53 +2837,49 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) if (!objectid) objectid = BTRFS_FS_TREE_OBJECTID; - location.objectid = objectid; - location.type = BTRFS_ROOT_ITEM_KEY; - location.offset = (u64)-1; - - new_root = btrfs_read_fs_root_no_name(fs_info, &location); + new_root = btrfs_get_fs_root(fs_info, objectid, true); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); goto out; } - if (!is_fstree(new_root->root_key.objectid)) { + if (!btrfs_is_fstree(btrfs_root_id(new_root))) { ret = -ENOENT; - goto out; + goto out_free; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out; + goto out_free; } - path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - btrfs_free_path(path); ret = PTR_ERR(trans); - goto out; + goto out_free; } dir_id = btrfs_super_root_dir(fs_info->super_copy); di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path, - dir_id, "default", 7, 1); + dir_id, &name, 1); if (IS_ERR_OR_NULL(di)) { - btrfs_free_path(path); + btrfs_release_path(path); btrfs_end_transaction(trans); btrfs_err(fs_info, "Umm, you don't have the default diritem, this isn't going to work"); ret = -ENOENT; - goto out; + goto out_free; } btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); - btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_free_path(path); + btrfs_release_path(path); btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL); btrfs_end_transaction(trans); +out_free: + btrfs_put_root(new_root); + btrfs_free_path(path); out: mnt_drop_write_file(file); return ret; @@ -4159,26 +2888,25 @@ out: static void get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; space->total_bytes = 0; space->used_bytes = 0; space->flags = 0; list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; - space->total_bytes += block_group->key.offset; - space->used_bytes += - btrfs_block_group_used(&block_group->item); + space->total_bytes += block_group->length; + space->used_bytes += block_group->used; } } static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, void __user *arg) { - struct btrfs_ioctl_space_args space_args; + struct btrfs_ioctl_space_args space_args = { 0 }; struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; - struct btrfs_ioctl_space_info *dest_orig; + struct btrfs_ioctl_space_info AUTO_KFREE(dest_orig); struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; static const u64 types[] = { @@ -4202,15 +2930,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *tmp; info = NULL; - rcu_read_lock(); - list_for_each_entry_rcu(tmp, &fs_info->space_info, - list) { + list_for_each_entry(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } - rcu_read_unlock(); if (!info) continue; @@ -4258,15 +2983,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, break; info = NULL; - rcu_read_lock(); - list_for_each_entry_rcu(tmp, &fs_info->space_info, - list) { + list_for_each_entry(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } - rcu_read_unlock(); if (!info) continue; @@ -4305,9 +3027,8 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, (arg + sizeof(struct btrfs_ioctl_space_args)); if (copy_to_user(user_dest, dest_orig, alloc_size)) - ret = -EFAULT; + return -EFAULT; - kfree(dest_orig); out: if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) ret = -EFAULT; @@ -4320,7 +3041,13 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, { struct btrfs_trans_handle *trans; u64 transid; - int ret; + + /* + * Start orphan cleanup here for the given root in case it hasn't been + * started already by other means. Errors are handled in the other + * functions during transaction commit. + */ + btrfs_orphan_cleanup(root); trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { @@ -4328,15 +3055,11 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, return PTR_ERR(trans); /* No running transaction, don't bother */ - transid = root->fs_info->last_trans_committed; + transid = btrfs_get_last_trans_committed(root->fs_info); goto out; } transid = trans->transid; - ret = btrfs_commit_transaction_async(trans, 0); - if (ret) { - btrfs_end_transaction(trans); - return ret; - } + btrfs_commit_transaction_async(trans); out: if (argp) if (copy_to_user(argp, &transid, sizeof(transid))) @@ -4347,30 +3070,39 @@ out: static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, void __user *argp) { - u64 transid; + /* By default wait for the current transaction. */ + u64 transid = 0; - if (argp) { + if (argp) if (copy_from_user(&transid, argp, sizeof(transid))) return -EFAULT; - } else { - transid = 0; /* current trans */ - } + return btrfs_wait_for_commit(fs_info, transid); } static long btrfs_ioctl_scrub(struct file *file, void __user *arg) { - struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); struct btrfs_ioctl_scrub_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_err(fs_info, "scrub: extent tree v2 not yet supported"); + return -EINVAL; + } + sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); + if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) { + ret = -EOPNOTSUPP; + goto out; + } + if (!(sa->flags & BTRFS_SCRUB_READONLY)) { ret = mnt_want_write_file(file); if (ret) @@ -4381,6 +3113,18 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg) &sa->progress, sa->flags & BTRFS_SCRUB_READONLY, 0); + /* + * Copy scrub args to user space even if btrfs_scrub_dev() returned an + * error. This is important as it allows user space to know how much + * progress scrub has done. For example, if scrub is canceled we get + * -ECANCELED from btrfs_scrub_dev() and return that error back to user + * space. Later user space can inspect the progress from the structure + * btrfs_ioctl_scrub_args and resume scrub from where it left off + * previously (btrfs-progs does this). + * If we fail to copy the btrfs_ioctl_scrub_args structure to user space + * then return -EFAULT to signal the structure was not copied or it may + * be corrupt and unreliable due to a partial copy. + */ if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; @@ -4414,7 +3158,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info, ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress); - if (copy_to_user(arg, sa, sizeof(*sa))) + if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); @@ -4438,7 +3182,7 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info, ret = btrfs_get_dev_stats(fs_info, sa); - if (copy_to_user(arg, sa, sizeof(*sa))) + if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); @@ -4454,6 +3198,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + btrfs_err(fs_info, "device replace not supported on extent tree v2 yet"); + return -EINVAL; + } + p = memdup_user(arg, sizeof(*p)); if (IS_ERR(p)) return PTR_ERR(p); @@ -4464,11 +3213,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, ret = -EROFS; goto out; } - if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { + if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } else { ret = btrfs_dev_replace_by_ioctl(fs_info, p); - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); + btrfs_exclop_finish(fs_info); } break; case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS: @@ -4484,7 +3233,7 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, break; } - if (copy_to_user(arg, p, sizeof(*p))) + if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p))) ret = -EFAULT; out: kfree(p); @@ -4498,7 +3247,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) u64 rel_ptr; int size; struct btrfs_ioctl_ino_path_args *ipa = NULL; - struct inode_fs_paths *ipath = NULL; + struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL; struct btrfs_path *path; if (!capable(CAP_DAC_READ_SEARCH)) @@ -4535,6 +3284,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) ipath->fspath->val[i] = rel_ptr; } + btrfs_free_path(path); + path = NULL; ret = copy_to_user((void __user *)(unsigned long)ipa->fspath, ipath->fspath, size); if (ret) { @@ -4544,32 +3295,11 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) out: btrfs_free_path(path); - free_ipath(ipath); kfree(ipa); return ret; } -static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) -{ - struct btrfs_data_container *inodes = ctx; - const size_t c = 3 * sizeof(u64); - - if (inodes->bytes_left >= c) { - inodes->bytes_left -= c; - inodes->val[inodes->elem_cnt] = inum; - inodes->val[inodes->elem_cnt + 1] = offset; - inodes->val[inodes->elem_cnt + 2] = root; - inodes->elem_cnt += 3; - } else { - inodes->bytes_missing += c - inodes->bytes_left; - inodes->bytes_left = 0; - inodes->elem_missed += 3; - } - - return 0; -} - static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, void __user *arg, int version) { @@ -4577,7 +3307,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, int size; struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_data_container *inodes = NULL; - struct btrfs_path *path = NULL; bool ignore_offset; if (!capable(CAP_SYS_ADMIN)) @@ -4605,21 +3334,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, size = min_t(u32, loi->size, SZ_16M); } - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - inodes = init_data_container(size); if (IS_ERR(inodes)) { ret = PTR_ERR(inodes); - inodes = NULL; - goto out; + goto out_loi; } - ret = iterate_inodes_from_logical(loi->logical, fs_info, path, - build_ino_list, inodes, ignore_offset); + ret = iterate_inodes_from_logical(loi->logical, fs_info, inodes, ignore_offset); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) @@ -4631,7 +3352,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, ret = -EFAULT; out: - btrfs_free_path(path); kvfree(inodes); out_loi: kfree(loi); @@ -4662,13 +3382,79 @@ void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->balance_lock); } +/* + * Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as + * required. + * + * @fs_info: the filesystem + * @excl_acquired: ptr to boolean value which is set to false in case balance + * is being resumed + * + * Return 0 on success in which case both fs_info::balance is acquired as well + * as exclusive ops are blocked. In case of failure return an error code. + */ +static int btrfs_try_lock_balance(struct btrfs_fs_info *fs_info, bool *excl_acquired) +{ + int ret; + + /* + * Exclusive operation is locked. Three possibilities: + * (1) some other op is running + * (2) balance is running + * (3) balance is paused -- special case (think resume) + */ + while (1) { + if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { + *excl_acquired = true; + mutex_lock(&fs_info->balance_mutex); + return 0; + } + + mutex_lock(&fs_info->balance_mutex); + if (fs_info->balance_ctl) { + /* This is either (2) or (3) */ + if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { + /* This is (2) */ + ret = -EINPROGRESS; + goto out_failure; + + } else { + mutex_unlock(&fs_info->balance_mutex); + /* + * Lock released to allow other waiters to + * continue, we'll reexamine the status again. + */ + mutex_lock(&fs_info->balance_mutex); + + if (fs_info->balance_ctl && + !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { + /* This is (3) */ + *excl_acquired = false; + return 0; + } + } + } else { + /* This is (1) */ + ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + goto out_failure; + } + + mutex_unlock(&fs_info->balance_mutex); + } + +out_failure: + mutex_unlock(&fs_info->balance_mutex); + *excl_acquired = false; + return ret; +} + static long btrfs_ioctl_balance(struct file *file, void __user *arg) { struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_ioctl_balance_args *bargs; struct btrfs_balance_control *bctl; - bool need_unlock; /* for mut. excl. ops lock */ + bool need_unlock = true; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -4678,133 +3464,80 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) if (ret) return ret; -again: - if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { - mutex_lock(&fs_info->balance_mutex); - need_unlock = true; - goto locked; + bargs = memdup_user(arg, sizeof(*bargs)); + if (IS_ERR(bargs)) { + ret = PTR_ERR(bargs); + bargs = NULL; + goto out; } - /* - * mut. excl. ops lock is locked. Three possibilities: - * (1) some other op is running - * (2) balance is running - * (3) balance is paused -- special case (think resume) - */ - mutex_lock(&fs_info->balance_mutex); - if (fs_info->balance_ctl) { - /* this is either (2) or (3) */ - if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { - mutex_unlock(&fs_info->balance_mutex); - /* - * Lock released to allow other waiters to continue, - * we'll reexamine the status again. - */ - mutex_lock(&fs_info->balance_mutex); - - if (fs_info->balance_ctl && - !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { - /* this is (3) */ - need_unlock = false; - goto locked; - } - - mutex_unlock(&fs_info->balance_mutex); - goto again; - } else { - /* this is (2) */ - mutex_unlock(&fs_info->balance_mutex); - ret = -EINPROGRESS; - goto out; - } - } else { - /* this is (1) */ - mutex_unlock(&fs_info->balance_mutex); - ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + ret = btrfs_try_lock_balance(fs_info, &need_unlock); + if (ret) goto out; - } -locked: - BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)); + lockdep_assert_held(&fs_info->balance_mutex); - if (arg) { - bargs = memdup_user(arg, sizeof(*bargs)); - if (IS_ERR(bargs)) { - ret = PTR_ERR(bargs); + if (bargs->flags & BTRFS_BALANCE_RESUME) { + if (!fs_info->balance_ctl) { + ret = -ENOTCONN; goto out_unlock; } - if (bargs->flags & BTRFS_BALANCE_RESUME) { - if (!fs_info->balance_ctl) { - ret = -ENOTCONN; - goto out_bargs; - } + bctl = fs_info->balance_ctl; + spin_lock(&fs_info->balance_lock); + bctl->flags |= BTRFS_BALANCE_RESUME; + spin_unlock(&fs_info->balance_lock); + btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE); - bctl = fs_info->balance_ctl; - spin_lock(&fs_info->balance_lock); - bctl->flags |= BTRFS_BALANCE_RESUME; - spin_unlock(&fs_info->balance_lock); + goto do_balance; + } - goto do_balance; - } - } else { - bargs = NULL; + if (bargs->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { + ret = -EINVAL; + goto out_unlock; } if (fs_info->balance_ctl) { ret = -EINPROGRESS; - goto out_bargs; + goto out_unlock; } bctl = kzalloc(sizeof(*bctl), GFP_KERNEL); if (!bctl) { ret = -ENOMEM; - goto out_bargs; - } - - if (arg) { - memcpy(&bctl->data, &bargs->data, sizeof(bctl->data)); - memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta)); - memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys)); - - bctl->flags = bargs->flags; - } else { - /* balance everything - no filters */ - bctl->flags |= BTRFS_BALANCE_TYPE_MASK; + goto out_unlock; } - if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { - ret = -EINVAL; - goto out_bctl; - } + memcpy(&bctl->data, &bargs->data, sizeof(bctl->data)); + memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta)); + memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys)); + bctl->flags = bargs->flags; do_balance: /* - * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to - * btrfs_balance. bctl is freed in reset_balance_state, or, if - * restriper was paused all the way until unmount, in free_fs_info. - * The flag should be cleared after reset_balance_state. + * Ownership of bctl and exclusive operation goes to btrfs_balance. + * bctl is freed in reset_balance_state, or, if restriper was paused + * all the way until unmount, in free_fs_info. The flag should be + * cleared after reset_balance_state. */ need_unlock = false; ret = btrfs_balance(fs_info, bctl, bargs); bctl = NULL; - if (arg) { + if (ret == 0 || ret == -ECANCELED) { if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; } -out_bctl: kfree(bctl); -out_bargs: - kfree(bargs); out_unlock: mutex_unlock(&fs_info->balance_mutex); if (need_unlock) - clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); + btrfs_exclop_finish(fs_info); out: mnt_drop_write_file(file); + kfree(bargs); return ret; } @@ -4826,7 +3559,7 @@ static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd) static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info, void __user *arg) { - struct btrfs_ioctl_balance_args *bargs; + struct btrfs_ioctl_balance_args AUTO_KFREE(bargs); int ret = 0; if (!capable(CAP_SYS_ADMIN)) @@ -4848,8 +3581,6 @@ static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info, if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; - - kfree(bargs); out: mutex_unlock(&fs_info->balance_mutex); return ret; @@ -4858,7 +3589,7 @@ out: static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_quota_ctl_args *sa; int ret; @@ -4875,14 +3606,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) goto drop_write; } - down_write(&fs_info->subvol_sem); - switch (sa->cmd) { case BTRFS_QUOTA_CTL_ENABLE: - ret = btrfs_quota_enable(fs_info); + case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA: + down_write(&fs_info->subvol_sem); + ret = btrfs_quota_enable(fs_info, sa); + up_write(&fs_info->subvol_sem); break; case BTRFS_QUOTA_CTL_DISABLE: + /* + * Lock the cleaner mutex to prevent races with concurrent + * relocation, because relocation may be building backrefs for + * blocks of the quota root while we are deleting the root. This + * is like dropping fs roots of deleted snapshots/subvolumes, we + * need the same protection. + * + * This also prevents races between concurrent tasks trying to + * disable quotas, because we will unlock and relock + * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. + * + * We take this here because we have the dependency of + * + * inode_lock -> subvol_sem + * + * because of rename. With relocation we can prealloc extents, + * so that makes the dependency chain + * + * cleaner_mutex -> inode_lock -> subvol_sem + * + * so we must take the cleaner_mutex here before we take the + * subvol_sem. The deadlock can't actually happen, but this + * quiets lockdep. + */ + mutex_lock(&fs_info->cleaner_mutex); + down_write(&fs_info->subvol_sem); ret = btrfs_quota_disable(fs_info); + up_write(&fs_info->subvol_sem); + mutex_unlock(&fs_info->cleaner_mutex); break; default: ret = -EINVAL; @@ -4890,7 +3650,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) } kfree(sa); - up_write(&fs_info->subvol_sem); drop_write: mnt_drop_write_file(file); return ret; @@ -4899,9 +3658,10 @@ drop_write: static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_assign_args *sa; + struct btrfs_qgroup_list *prealloc = NULL; struct btrfs_trans_handle *trans; int ret; int err; @@ -4909,6 +3669,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!btrfs_qgroup_enabled(fs_info)) + return -ENOTCONN; + ret = mnt_want_write_file(file); if (ret) return ret; @@ -4919,28 +3682,45 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) goto drop_write; } + if (sa->assign) { + prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); + if (!prealloc) { + ret = -ENOMEM; + goto out; + } + } + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } + /* + * Prealloc ownership is moved to the relation handler, there it's used + * or freed on error. + */ if (sa->assign) { - ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst); + ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc); + prealloc = NULL; } else { ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst); } /* update qgroup status and info */ + mutex_lock(&fs_info->qgroup_ioctl_lock); err = btrfs_run_qgroups(trans); + mutex_unlock(&fs_info->qgroup_ioctl_lock); if (err < 0) - btrfs_handle_fs_error(fs_info, err, - "failed to update qgroup status and info"); + btrfs_warn(fs_info, + "qgroup status update failed after %s relation, marked as inconsistent", + sa->assign ? "adding" : "deleting"); err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: + kfree(prealloc); kfree(sa); drop_write: mnt_drop_write_file(file); @@ -4959,6 +3739,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!btrfs_qgroup_enabled(root->fs_info)) + return -ENOTCONN; + ret = mnt_want_write_file(file); if (ret) return ret; @@ -4974,6 +3757,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) goto out; } + if (sa->create && btrfs_is_fstree(sa->qgroupid)) { + ret = -EINVAL; + goto out; + } + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); @@ -5010,6 +3798,9 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!btrfs_qgroup_enabled(root->fs_info)) + return -ENOTCONN; + ret = mnt_want_write_file(file); if (ret) return ret; @@ -5029,7 +3820,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) qgroupid = sa->qgroupid; if (!qgroupid) { /* take the current subvol as qgroup */ - qgroupid = root->root_key.objectid; + qgroupid = btrfs_root_id(root); } ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim); @@ -5048,13 +3839,16 @@ drop_write: static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_quota_rescan_args *qsa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!btrfs_qgroup_enabled(fs_info)) + return -ENOTCONN; + ret = mnt_want_write_file(file); if (ret) return ret; @@ -5079,37 +3873,27 @@ drop_write: return ret; } -static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_ioctl_quota_rescan_args *qsa; - int ret = 0; + struct btrfs_ioctl_quota_rescan_args qsa = {0}; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - qsa = kzalloc(sizeof(*qsa), GFP_KERNEL); - if (!qsa) - return -ENOMEM; - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { - qsa->flags = 1; - qsa->progress = fs_info->qgroup_rescan_progress.objectid; + qsa.flags = 1; + qsa.progress = fs_info->qgroup_rescan_progress.objectid; } - if (copy_to_user(arg, qsa, sizeof(*qsa))) - ret = -EFAULT; + if (copy_to_user(arg, &qsa, sizeof(qsa))) + return -EFAULT; - kfree(qsa); - return ret; + return 0; } -static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -5117,10 +3901,11 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) } static long _btrfs_ioctl_set_received_subvol(struct file *file, + struct mnt_idmap *idmap, struct btrfs_ioctl_received_subvol_args *sa) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root_item *root_item = &root->root_item; struct btrfs_trans_handle *trans; @@ -5128,7 +3913,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file, int ret = 0; int received_uuid_changed; - if (!inode_owner_or_capable(inode)) + if (!inode_owner_or_capable(idmap, inode)) return -EPERM; ret = mnt_want_write_file(file); @@ -5168,8 +3953,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file, !btrfs_is_empty_uuid(root_item->received_uuid)) { ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, - root->root_key.objectid); - if (ret && ret != -ENOENT) { + btrfs_root_id(root)); + if (unlikely(ret && ret != -ENOENT)) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; @@ -5192,8 +3977,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file, if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) { ret = btrfs_uuid_tree_add(trans, sa->uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, - root->root_key.objectid); - if (ret < 0 && ret != -EEXIST) { + btrfs_root_id(root)); + if (unlikely(ret < 0 && ret != -EEXIST)) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; @@ -5233,7 +4018,7 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file, args64->rtime.nsec = args32->rtime.nsec; args64->flags = args32->flags; - ret = _btrfs_ioctl_set_received_subvol(file, args64); + ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), args64); if (ret) goto out; @@ -5267,7 +4052,7 @@ static long btrfs_ioctl_set_received_subvol(struct file *file, if (IS_ERR(sa)) return PTR_ERR(sa); - ret = _btrfs_ioctl_set_received_subvol(file, sa); + ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), sa); if (ret) goto out; @@ -5281,10 +4066,9 @@ out: return ret; } -static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) +static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); size_t len; int ret; char label[BTRFS_LABEL_SIZE]; @@ -5309,7 +4093,7 @@ static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_trans_handle *trans; @@ -5340,7 +4124,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) } spin_lock(&fs_info->super_lock); - strcpy(super_block->label, label); + strscpy(super_block->label, label); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); @@ -5368,10 +4152,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg) return 0; } -static int btrfs_ioctl_get_features(struct file *file, void __user *arg) +static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags features; @@ -5385,13 +4168,13 @@ static int btrfs_ioctl_get_features(struct file *file, void __user *arg) return 0; } -static int check_feature_bits(struct btrfs_fs_info *fs_info, +static int check_feature_bits(const struct btrfs_fs_info *fs_info, enum btrfs_feature_set set, u64 change_mask, u64 flags, u64 supported_flags, u64 safe_set, u64 safe_clear) { - const char *type = btrfs_feature_set_names[set]; - char *names; + const char *type = btrfs_feature_set_name(set); + const char AUTO_KFREE(names); u64 disallowed, unsupported; u64 set_mask = flags & change_mask; u64 clear_mask = ~flags & change_mask; @@ -5399,12 +4182,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info, unsupported = set_mask & ~supported_flags; if (unsupported) { names = btrfs_printable_features(set, unsupported); - if (names) { + if (names) btrfs_warn(fs_info, "this kernel does not support the %s feature bit%s", names, strchr(names, ',') ? "s" : ""); - kfree(names); - } else + else btrfs_warn(fs_info, "this kernel does not support %s bits 0x%llx", type, unsupported); @@ -5414,12 +4196,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info, disallowed = set_mask & ~safe_set; if (disallowed) { names = btrfs_printable_features(set, disallowed); - if (names) { + if (names) btrfs_warn(fs_info, "can't set the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); - kfree(names); - } else + else btrfs_warn(fs_info, "can't set %s bits 0x%llx while mounted", type, disallowed); @@ -5429,12 +4210,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info, disallowed = clear_mask & ~safe_clear; if (disallowed) { names = btrfs_printable_features(set, disallowed); - if (names) { + if (names) btrfs_warn(fs_info, "can't clear the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); - kfree(names); - } else + else btrfs_warn(fs_info, "can't clear %s bits 0x%llx while mounted", type, disallowed); @@ -5453,7 +4233,7 @@ check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \ static int btrfs_ioctl_set_features(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags flags[2]; @@ -5521,14 +4301,14 @@ out_drop_write: return ret; } -static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat) +static int _btrfs_ioctl_send(struct btrfs_root *root, void __user *argp, bool compat) { struct btrfs_ioctl_send_args *arg; int ret; if (compat) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) - struct btrfs_ioctl_send_args_32 args32; + struct btrfs_ioctl_send_args_32 args32 = { 0 }; ret = copy_from_user(&args32, argp, sizeof(args32)); if (ret) @@ -5541,6 +4321,7 @@ static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat) arg->clone_sources = compat_ptr(args32.clone_sources); arg->parent_root = args32.parent_root; arg->flags = args32.flags; + arg->version = args32.version; memcpy(arg->reserved, args32.reserved, sizeof(args32.reserved)); #else @@ -5551,40 +4332,888 @@ static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat) if (IS_ERR(arg)) return PTR_ERR(arg); } - ret = btrfs_ioctl_send(file, arg); + ret = btrfs_ioctl_send(root, arg); kfree(arg); return ret; } +static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp, + bool compat) +{ + struct btrfs_ioctl_encoded_io_args args = { 0 }; + size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, + flags); + size_t copy_end; + struct btrfs_inode *inode = BTRFS_I(file_inode(file)); + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_io_tree *io_tree = &inode->io_tree; + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + loff_t pos; + struct kiocb kiocb; + ssize_t ret; + u64 disk_bytenr, disk_io_size; + struct extent_state *cached_state = NULL; + + if (!capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto out_acct; + } + + if (compat) { +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + struct btrfs_ioctl_encoded_io_args_32 args32; + + copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, + flags); + if (copy_from_user(&args32, argp, copy_end)) { + ret = -EFAULT; + goto out_acct; + } + args.iov = compat_ptr(args32.iov); + args.iovcnt = args32.iovcnt; + args.offset = args32.offset; + args.flags = args32.flags; +#else + return -ENOTTY; +#endif + } else { + copy_end = copy_end_kernel; + if (copy_from_user(&args, argp, copy_end)) { + ret = -EFAULT; + goto out_acct; + } + } + if (args.flags != 0) { + ret = -EINVAL; + goto out_acct; + } + + ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), + &iov, &iter); + if (ret < 0) + goto out_acct; + + if (iov_iter_count(&iter) == 0) { + ret = 0; + goto out_iov; + } + pos = args.offset; + ret = rw_verify_area(READ, file, &pos, args.len); + if (ret < 0) + goto out_iov; + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = pos; + + ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state, + &disk_bytenr, &disk_io_size); + + if (ret == -EIOCBQUEUED) { + bool unlocked = false; + u64 start, lockend, count; + + start = ALIGN_DOWN(kiocb.ki_pos, fs_info->sectorsize); + lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; + + if (args.compression) + count = disk_io_size; + else + count = args.len; + + ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend, + &cached_state, disk_bytenr, + disk_io_size, count, + args.compression, &unlocked); + + if (!unlocked) { + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); + } + } + + if (ret >= 0) { + fsnotify_access(file); + if (copy_to_user(argp + copy_end, + (char *)&args + copy_end_kernel, + sizeof(args) - copy_end_kernel)) + ret = -EFAULT; + } + +out_iov: + kfree(iov); +out_acct: + if (ret > 0) + add_rchar(current, ret); + inc_syscr(current); + return ret; +} + +static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool compat) +{ + struct btrfs_ioctl_encoded_io_args args; + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + loff_t pos; + struct kiocb kiocb; + ssize_t ret; + + if (!capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto out_acct; + } + + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EBADF; + goto out_acct; + } + + if (compat) { +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + struct btrfs_ioctl_encoded_io_args_32 args32; + + if (copy_from_user(&args32, argp, sizeof(args32))) { + ret = -EFAULT; + goto out_acct; + } + args.iov = compat_ptr(args32.iov); + args.iovcnt = args32.iovcnt; + args.offset = args32.offset; + args.flags = args32.flags; + args.len = args32.len; + args.unencoded_len = args32.unencoded_len; + args.unencoded_offset = args32.unencoded_offset; + args.compression = args32.compression; + args.encryption = args32.encryption; + memcpy(args.reserved, args32.reserved, sizeof(args.reserved)); +#else + return -ENOTTY; +#endif + } else { + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + goto out_acct; + } + } + + ret = -EINVAL; + if (args.flags != 0) + goto out_acct; + if (memchr_inv(args.reserved, 0, sizeof(args.reserved))) + goto out_acct; + if (args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE && + args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE) + goto out_acct; + if (args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES || + args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES) + goto out_acct; + if (args.unencoded_offset > args.unencoded_len) + goto out_acct; + if (args.len > args.unencoded_len - args.unencoded_offset) + goto out_acct; + + ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), + &iov, &iter); + if (ret < 0) + goto out_acct; + + if (iov_iter_count(&iter) == 0) { + ret = 0; + goto out_iov; + } + pos = args.offset; + ret = rw_verify_area(WRITE, file, &pos, args.len); + if (ret < 0) + goto out_iov; + + init_sync_kiocb(&kiocb, file); + ret = kiocb_set_rw_flags(&kiocb, 0, WRITE); + if (ret) + goto out_iov; + kiocb.ki_pos = pos; + + file_start_write(file); + + ret = btrfs_do_write_iter(&kiocb, &iter, &args); + if (ret > 0) + fsnotify_modify(file); + + file_end_write(file); +out_iov: + kfree(iov); +out_acct: + if (ret > 0) + add_wchar(current, ret); + inc_syscw(current); + return ret; +} + +struct btrfs_uring_encoded_data { + struct btrfs_ioctl_encoded_io_args args; + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov; + struct iov_iter iter; +}; + +/* + * Context that's attached to an encoded read io_uring command, in cmd->pdu. It + * contains the fields in btrfs_uring_read_extent that are necessary to finish + * off and cleanup the I/O in btrfs_uring_read_finished. + */ +struct btrfs_uring_priv { + struct io_uring_cmd *cmd; + struct page **pages; + unsigned long nr_pages; + struct kiocb iocb; + struct iovec *iov; + struct iov_iter iter; + struct extent_state *cached_state; + u64 count; + u64 start; + u64 lockend; + int err; + bool compressed; +}; + +struct io_btrfs_cmd { + struct btrfs_uring_encoded_data *data; + struct btrfs_uring_priv *priv; +}; + +static void btrfs_uring_read_finished(struct io_tw_req tw_req, io_tw_token_t tw) +{ + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); + struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); + struct btrfs_uring_priv *priv = bc->priv; + struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp)); + struct extent_io_tree *io_tree = &inode->io_tree; + pgoff_t index; + u64 cur; + size_t page_offset; + ssize_t ret; + + /* The inode lock has already been acquired in btrfs_uring_read_extent. */ + btrfs_lockdep_inode_acquire(inode, i_rwsem); + + if (priv->err) { + ret = priv->err; + goto out; + } + + if (priv->compressed) { + index = 0; + page_offset = 0; + } else { + index = (priv->iocb.ki_pos - priv->start) >> PAGE_SHIFT; + page_offset = offset_in_page(priv->iocb.ki_pos - priv->start); + } + cur = 0; + while (cur < priv->count) { + size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset); + + if (copy_page_to_iter(priv->pages[index], page_offset, bytes, + &priv->iter) != bytes) { + ret = -EFAULT; + goto out; + } + + index++; + cur += bytes; + page_offset = 0; + } + ret = priv->count; + +out: + btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); + btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); + + io_uring_cmd_done(cmd, ret, IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); + add_rchar(current, ret); + + for (index = 0; index < priv->nr_pages; index++) + __free_page(priv->pages[index]); + + kfree(priv->pages); + kfree(priv->iov); + kfree(priv); + kfree(bc->data); +} + +void btrfs_uring_read_extent_endio(void *ctx, int err) +{ + struct btrfs_uring_priv *priv = ctx; + struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(priv->cmd, struct io_btrfs_cmd); + + priv->err = err; + bc->priv = priv; + + io_uring_cmd_complete_in_task(priv->cmd, btrfs_uring_read_finished); +} + +static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter, + u64 start, u64 lockend, + struct extent_state *cached_state, + u64 disk_bytenr, u64 disk_io_size, + size_t count, bool compressed, + struct iovec *iov, struct io_uring_cmd *cmd) +{ + struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); + struct extent_io_tree *io_tree = &inode->io_tree; + struct page **pages; + struct btrfs_uring_priv *priv = NULL; + unsigned long nr_pages; + int ret; + + nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); + if (!pages) + return -ENOMEM; + ret = btrfs_alloc_page_array(nr_pages, pages, 0); + if (ret) { + ret = -ENOMEM; + goto out_fail; + } + + priv = kmalloc(sizeof(*priv), GFP_NOFS); + if (!priv) { + ret = -ENOMEM; + goto out_fail; + } + + priv->iocb = *iocb; + priv->iov = iov; + priv->iter = *iter; + priv->count = count; + priv->cmd = cmd; + priv->cached_state = cached_state; + priv->compressed = compressed; + priv->nr_pages = nr_pages; + priv->pages = pages; + priv->start = start; + priv->lockend = lockend; + priv->err = 0; + + ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr, + disk_io_size, pages, priv); + if (ret && ret != -EIOCBQUEUED) + goto out_fail; + + /* + * If we return -EIOCBQUEUED, we're deferring the cleanup to + * btrfs_uring_read_finished(), which will handle unlocking the extent + * and inode and freeing the allocations. + */ + + /* + * We're returning to userspace with the inode lock held, and that's + * okay - it'll get unlocked in a worker thread. Call + * btrfs_lockdep_inode_release() to avoid confusing lockdep. + */ + btrfs_lockdep_inode_release(inode, i_rwsem); + + return -EIOCBQUEUED; + +out_fail: + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); + kfree(priv); + return ret; +} + +static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct file *file = cmd->file; + struct btrfs_inode *inode = BTRFS_I(file->f_inode); + struct extent_io_tree *io_tree = &inode->io_tree; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags); + size_t copy_end; + int ret; + u64 disk_bytenr, disk_io_size; + loff_t pos; + struct kiocb kiocb; + struct extent_state *cached_state = NULL; + u64 start, lockend; + void __user *sqe_addr; + struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); + struct btrfs_uring_encoded_data *data = NULL; + + if (cmd->flags & IORING_URING_CMD_REISSUE) + data = bc->data; + + if (!capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto out_acct; + } + sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)); + + if (issue_flags & IO_URING_F_COMPAT) { +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags); +#else + ret = -ENOTTY; + goto out_acct; +#endif + } else { + copy_end = copy_end_kernel; + } + + if (!data) { + data = kzalloc(sizeof(*data), GFP_NOFS); + if (!data) { + ret = -ENOMEM; + goto out_acct; + } + + bc->data = data; + + if (issue_flags & IO_URING_F_COMPAT) { +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + struct btrfs_ioctl_encoded_io_args_32 args32; + + if (copy_from_user(&args32, sqe_addr, copy_end)) { + ret = -EFAULT; + goto out_acct; + } + + data->args.iov = compat_ptr(args32.iov); + data->args.iovcnt = args32.iovcnt; + data->args.offset = args32.offset; + data->args.flags = args32.flags; +#endif + } else { + if (copy_from_user(&data->args, sqe_addr, copy_end)) { + ret = -EFAULT; + goto out_acct; + } + } + + if (data->args.flags != 0) { + ret = -EINVAL; + goto out_acct; + } + + data->iov = data->iovstack; + ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt, + ARRAY_SIZE(data->iovstack), &data->iov, + &data->iter); + if (ret < 0) + goto out_acct; + + if (iov_iter_count(&data->iter) == 0) { + ret = 0; + goto out_free; + } + } + + pos = data->args.offset; + ret = rw_verify_area(READ, file, &pos, data->args.len); + if (ret < 0) + goto out_free; + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = pos; + + if (issue_flags & IO_URING_F_NONBLOCK) + kiocb.ki_flags |= IOCB_NOWAIT; + + start = ALIGN_DOWN(pos, fs_info->sectorsize); + lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; + + ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state, + &disk_bytenr, &disk_io_size); + if (ret == -EAGAIN) + goto out_acct; + if (ret < 0 && ret != -EIOCBQUEUED) + goto out_free; + + file_accessed(file); + + if (copy_to_user(sqe_addr + copy_end, + (const char *)&data->args + copy_end_kernel, + sizeof(data->args) - copy_end_kernel)) { + if (ret == -EIOCBQUEUED) { + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); + } + ret = -EFAULT; + goto out_free; + } + + if (ret == -EIOCBQUEUED) { + u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size); + + /* Match ioctl by not returning past EOF if uncompressed. */ + if (!data->args.compression) + count = min_t(u64, count, data->args.len); + + ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend, + cached_state, disk_bytenr, disk_io_size, + count, data->args.compression, + data->iov, cmd); + + goto out_acct; + } + +out_free: + kfree(data->iov); + +out_acct: + if (ret > 0) + add_rchar(current, ret); + inc_syscr(current); + + if (ret != -EIOCBQUEUED && ret != -EAGAIN) + kfree(data); + + return ret; +} + +static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct file *file = cmd->file; + loff_t pos; + struct kiocb kiocb; + ssize_t ret; + void __user *sqe_addr; + struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); + struct btrfs_uring_encoded_data *data = NULL; + + if (cmd->flags & IORING_URING_CMD_REISSUE) + data = bc->data; + + if (!capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto out_acct; + } + sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)); + + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EBADF; + goto out_acct; + } + + if (!data) { + data = kzalloc(sizeof(*data), GFP_NOFS); + if (!data) { + ret = -ENOMEM; + goto out_acct; + } + + bc->data = data; + + if (issue_flags & IO_URING_F_COMPAT) { +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + struct btrfs_ioctl_encoded_io_args_32 args32; + + if (copy_from_user(&args32, sqe_addr, sizeof(args32))) { + ret = -EFAULT; + goto out_acct; + } + data->args.iov = compat_ptr(args32.iov); + data->args.iovcnt = args32.iovcnt; + data->args.offset = args32.offset; + data->args.flags = args32.flags; + data->args.len = args32.len; + data->args.unencoded_len = args32.unencoded_len; + data->args.unencoded_offset = args32.unencoded_offset; + data->args.compression = args32.compression; + data->args.encryption = args32.encryption; + memcpy(data->args.reserved, args32.reserved, + sizeof(data->args.reserved)); +#else + ret = -ENOTTY; + goto out_acct; +#endif + } else { + if (copy_from_user(&data->args, sqe_addr, sizeof(data->args))) { + ret = -EFAULT; + goto out_acct; + } + } + + ret = -EINVAL; + if (data->args.flags != 0) + goto out_acct; + if (memchr_inv(data->args.reserved, 0, sizeof(data->args.reserved))) + goto out_acct; + if (data->args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE && + data->args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE) + goto out_acct; + if (data->args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES || + data->args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES) + goto out_acct; + if (data->args.unencoded_offset > data->args.unencoded_len) + goto out_acct; + if (data->args.len > data->args.unencoded_len - data->args.unencoded_offset) + goto out_acct; + + data->iov = data->iovstack; + ret = import_iovec(ITER_SOURCE, data->args.iov, data->args.iovcnt, + ARRAY_SIZE(data->iovstack), &data->iov, + &data->iter); + if (ret < 0) + goto out_acct; + + if (iov_iter_count(&data->iter) == 0) { + ret = 0; + goto out_iov; + } + } + + if (issue_flags & IO_URING_F_NONBLOCK) { + ret = -EAGAIN; + goto out_acct; + } + + pos = data->args.offset; + ret = rw_verify_area(WRITE, file, &pos, data->args.len); + if (ret < 0) + goto out_iov; + + init_sync_kiocb(&kiocb, file); + ret = kiocb_set_rw_flags(&kiocb, 0, WRITE); + if (ret) + goto out_iov; + kiocb.ki_pos = pos; + + file_start_write(file); + + ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args); + if (ret > 0) + fsnotify_modify(file); + + file_end_write(file); +out_iov: + kfree(data->iov); +out_acct: + if (ret > 0) + add_wchar(current, ret); + inc_syscw(current); + + if (ret != -EAGAIN) + kfree(data); + return ret; +} + +int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(cmd->file))))) + return -EIO; + + switch (cmd->cmd_op) { + case BTRFS_IOC_ENCODED_READ: +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + case BTRFS_IOC_ENCODED_READ_32: +#endif + return btrfs_uring_encoded_read(cmd, issue_flags); + + case BTRFS_IOC_ENCODED_WRITE: +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + case BTRFS_IOC_ENCODED_WRITE_32: +#endif + return btrfs_uring_encoded_write(cmd, issue_flags); + } + + return -EINVAL; +} + +static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *argp) +{ + struct btrfs_root *root; + struct btrfs_ioctl_subvol_wait args = { 0 }; + signed long sched_ret; + int refs; + u64 root_flags; + bool wait_for_deletion = false; + bool found = false; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + switch (args.mode) { + case BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED: + /* + * Wait for the first one deleted that waits until all previous + * are cleaned. + */ + spin_lock(&fs_info->trans_lock); + if (!list_empty(&fs_info->dead_roots)) { + root = list_last_entry(&fs_info->dead_roots, + struct btrfs_root, root_list); + args.subvolid = btrfs_root_id(root); + found = true; + } + spin_unlock(&fs_info->trans_lock); + if (!found) + return -ENOENT; + + fallthrough; + case BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE: + if ((0 < args.subvolid && args.subvolid < BTRFS_FIRST_FREE_OBJECTID) || + BTRFS_LAST_FREE_OBJECTID < args.subvolid) + return -EINVAL; + break; + case BTRFS_SUBVOL_SYNC_COUNT: + spin_lock(&fs_info->trans_lock); + args.count = list_count_nodes(&fs_info->dead_roots); + spin_unlock(&fs_info->trans_lock); + if (copy_to_user(argp, &args, sizeof(args))) + return -EFAULT; + return 0; + case BTRFS_SUBVOL_SYNC_PEEK_FIRST: + spin_lock(&fs_info->trans_lock); + /* Last in the list was deleted first. */ + if (!list_empty(&fs_info->dead_roots)) { + root = list_last_entry(&fs_info->dead_roots, + struct btrfs_root, root_list); + args.subvolid = btrfs_root_id(root); + } else { + args.subvolid = 0; + } + spin_unlock(&fs_info->trans_lock); + if (copy_to_user(argp, &args, sizeof(args))) + return -EFAULT; + return 0; + case BTRFS_SUBVOL_SYNC_PEEK_LAST: + spin_lock(&fs_info->trans_lock); + /* First in the list was deleted last. */ + if (!list_empty(&fs_info->dead_roots)) { + root = list_first_entry(&fs_info->dead_roots, + struct btrfs_root, root_list); + args.subvolid = btrfs_root_id(root); + } else { + args.subvolid = 0; + } + spin_unlock(&fs_info->trans_lock); + if (copy_to_user(argp, &args, sizeof(args))) + return -EFAULT; + return 0; + default: + return -EINVAL; + } + + /* 32bit limitation: fs_roots_radix key is not wide enough. */ + if (sizeof(unsigned long) != sizeof(u64) && args.subvolid > U32_MAX) + return -EOVERFLOW; + + while (1) { + /* Wait for the specific one. */ + if (down_read_interruptible(&fs_info->subvol_sem) == -EINTR) + return -EINTR; + refs = -1; + spin_lock(&fs_info->fs_roots_radix_lock); + root = radix_tree_lookup(&fs_info->fs_roots_radix, + (unsigned long)args.subvolid); + if (root) { + spin_lock(&root->root_item_lock); + refs = btrfs_root_refs(&root->root_item); + root_flags = btrfs_root_flags(&root->root_item); + spin_unlock(&root->root_item_lock); + } + spin_unlock(&fs_info->fs_roots_radix_lock); + up_read(&fs_info->subvol_sem); + + /* Subvolume does not exist. */ + if (!root) + return -ENOENT; + + /* Subvolume not deleted at all. */ + if (refs > 0) + return -EEXIST; + /* We've waited and now the subvolume is gone. */ + if (wait_for_deletion && refs == -1) { + /* Return the one we waited for as the last one. */ + if (copy_to_user(argp, &args, sizeof(args))) + return -EFAULT; + return 0; + } + + /* Subvolume not found on the first try (deleted or never existed). */ + if (refs == -1) + return -ENOENT; + + wait_for_deletion = true; + ASSERT(root_flags & BTRFS_ROOT_SUBVOL_DEAD); + sched_ret = schedule_timeout_interruptible(HZ); + /* Early wake up or error. */ + if (sched_ret != 0) + return -EINTR; + } + + return 0; +} + +#ifdef CONFIG_BTRFS_EXPERIMENTAL +static int btrfs_ioctl_shutdown(struct btrfs_fs_info *fs_info, unsigned long arg) +{ + int ret = 0; + u32 flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, (u32 __user *)arg)) + return -EFAULT; + + if (flags >= BTRFS_SHUTDOWN_FLAGS_LAST) + return -EINVAL; + + if (btrfs_is_shutdown(fs_info)) + return 0; + + switch (flags) { + case BTRFS_SHUTDOWN_FLAGS_LOGFLUSH: + case BTRFS_SHUTDOWN_FLAGS_DEFAULT: + ret = freeze_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL); + if (ret) + return ret; + btrfs_force_shutdown(fs_info); + ret = thaw_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL); + if (ret) + return ret; + break; + case BTRFS_SHUTDOWN_FLAGS_NOLOGFLUSH: + btrfs_force_shutdown(fs_info); + break; + } + return ret; +} +#endif + long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; void __user *argp = (void __user *)arg; switch (cmd) { - case FS_IOC_GETFLAGS: - return btrfs_ioctl_getflags(file, argp); - case FS_IOC_SETFLAGS: - return btrfs_ioctl_setflags(file, argp); case FS_IOC_GETVERSION: - return btrfs_ioctl_getversion(file, argp); + return btrfs_ioctl_getversion(inode, argp); + case FS_IOC_GETFSLABEL: + return btrfs_ioctl_get_fslabel(fs_info, argp); + case FS_IOC_SETFSLABEL: + return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: - return btrfs_ioctl_fitrim(file, argp); + return btrfs_ioctl_fitrim(fs_info, argp); case BTRFS_IOC_SNAP_CREATE: - return btrfs_ioctl_snap_create(file, argp, 0); + return btrfs_ioctl_snap_create(file, argp, false); case BTRFS_IOC_SNAP_CREATE_V2: - return btrfs_ioctl_snap_create_v2(file, argp, 0); + return btrfs_ioctl_snap_create_v2(file, argp, false); case BTRFS_IOC_SUBVOL_CREATE: - return btrfs_ioctl_snap_create(file, argp, 1); + return btrfs_ioctl_snap_create(file, argp, true); case BTRFS_IOC_SUBVOL_CREATE_V2: - return btrfs_ioctl_snap_create_v2(file, argp, 1); + return btrfs_ioctl_snap_create_v2(file, argp, true); case BTRFS_IOC_SNAP_DESTROY: - return btrfs_ioctl_snap_destroy(file, argp); + return btrfs_ioctl_snap_destroy(file, argp, false); + case BTRFS_IOC_SNAP_DESTROY_V2: + return btrfs_ioctl_snap_destroy(file, argp, true); case BTRFS_IOC_SUBVOL_GETFLAGS: - return btrfs_ioctl_subvol_getflags(file, argp); + return btrfs_ioctl_subvol_getflags(BTRFS_I(inode), argp); case BTRFS_IOC_SUBVOL_SETFLAGS: return btrfs_ioctl_subvol_setflags(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: @@ -5605,14 +5234,12 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_fs_info(fs_info, argp); case BTRFS_IOC_DEV_INFO: return btrfs_ioctl_dev_info(fs_info, argp); - case BTRFS_IOC_BALANCE: - return btrfs_ioctl_balance(file, NULL); case BTRFS_IOC_TREE_SEARCH: - return btrfs_ioctl_tree_search(file, argp); + return btrfs_ioctl_tree_search(root, argp); case BTRFS_IOC_TREE_SEARCH_V2: - return btrfs_ioctl_tree_search_v2(file, argp); + return btrfs_ioctl_tree_search_v2(root, argp); case BTRFS_IOC_INO_LOOKUP: - return btrfs_ioctl_ino_lookup(file, argp); + return btrfs_ioctl_ino_lookup(root, argp); case BTRFS_IOC_INO_PATHS: return btrfs_ioctl_ino_to_path(root, argp); case BTRFS_IOC_LOGICAL_INO: @@ -5624,16 +5251,15 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_SYNC: { int ret; - ret = btrfs_start_delalloc_roots(fs_info, -1); + ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); if (ret) return ret; ret = btrfs_sync_fs(inode->i_sb, 1); /* - * The transaction thread may want to do more work, - * namely it pokes the cleaner kthread that will start - * processing uncleaned subvols. + * There may be work for the cleaner kthread to do (subvolume + * deletion, delayed iputs, defrag inodes, etc), so wake it up. */ - wake_up_process(fs_info->transaction_kthread); + wake_up_process(fs_info->cleaner_kthread); return ret; } case BTRFS_IOC_START_SYNC: @@ -5659,10 +5285,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_set_received_subvol_32(file, argp); #endif case BTRFS_IOC_SEND: - return _btrfs_ioctl_send(file, argp, false); + return _btrfs_ioctl_send(root, argp, false); #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) case BTRFS_IOC_SEND_32: - return _btrfs_ioctl_send(file, argp, true); + return _btrfs_ioctl_send(root, argp, true); #endif case BTRFS_IOC_GET_DEV_STATS: return btrfs_ioctl_get_dev_stats(fs_info, argp); @@ -5677,31 +5303,45 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_QUOTA_RESCAN: return btrfs_ioctl_quota_rescan(file, argp); case BTRFS_IOC_QUOTA_RESCAN_STATUS: - return btrfs_ioctl_quota_rescan_status(file, argp); + return btrfs_ioctl_quota_rescan_status(fs_info, argp); case BTRFS_IOC_QUOTA_RESCAN_WAIT: - return btrfs_ioctl_quota_rescan_wait(file, argp); + return btrfs_ioctl_quota_rescan_wait(fs_info); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); - case BTRFS_IOC_GET_FSLABEL: - return btrfs_ioctl_get_fslabel(file, argp); - case BTRFS_IOC_SET_FSLABEL: - return btrfs_ioctl_set_fslabel(file, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: - return btrfs_ioctl_get_features(file, argp); + return btrfs_ioctl_get_features(fs_info, argp); case BTRFS_IOC_SET_FEATURES: return btrfs_ioctl_set_features(file, argp); - case FS_IOC_FSGETXATTR: - return btrfs_ioctl_fsgetxattr(file, argp); - case FS_IOC_FSSETXATTR: - return btrfs_ioctl_fssetxattr(file, argp); case BTRFS_IOC_GET_SUBVOL_INFO: - return btrfs_ioctl_get_subvol_info(file, argp); + return btrfs_ioctl_get_subvol_info(inode, argp); case BTRFS_IOC_GET_SUBVOL_ROOTREF: - return btrfs_ioctl_get_subvol_rootref(file, argp); + return btrfs_ioctl_get_subvol_rootref(root, argp); case BTRFS_IOC_INO_LOOKUP_USER: return btrfs_ioctl_ino_lookup_user(file, argp); + case FS_IOC_ENABLE_VERITY: + return fsverity_ioctl_enable(file, (const void __user *)argp); + case FS_IOC_MEASURE_VERITY: + return fsverity_ioctl_measure(file, argp); + case FS_IOC_READ_VERITY_METADATA: + return fsverity_ioctl_read_metadata(file, argp); + case BTRFS_IOC_ENCODED_READ: + return btrfs_ioctl_encoded_read(file, argp, false); + case BTRFS_IOC_ENCODED_WRITE: + return btrfs_ioctl_encoded_write(file, argp, false); +#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) + case BTRFS_IOC_ENCODED_READ_32: + return btrfs_ioctl_encoded_read(file, argp, true); + case BTRFS_IOC_ENCODED_WRITE_32: + return btrfs_ioctl_encoded_write(file, argp, true); +#endif + case BTRFS_IOC_SUBVOL_SYNC_WAIT: + return btrfs_ioctl_subvol_sync(fs_info, argp); +#ifdef CONFIG_BTRFS_EXPERIMENTAL + case BTRFS_IOC_SHUTDOWN: + return btrfs_ioctl_shutdown(fs_info, arg); +#endif } return -ENOTTY; @@ -5715,12 +5355,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * handling is necessary. */ switch (cmd) { - case FS_IOC32_GETFLAGS: - cmd = FS_IOC_GETFLAGS; - break; - case FS_IOC32_SETFLAGS: - cmd = FS_IOC_SETFLAGS; - break; case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; |
