diff options
Diffstat (limited to 'fs/ntfs3/file.c')
| -rw-r--r-- | fs/ntfs3/file.c | 627 |
1 files changed, 467 insertions, 160 deletions
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index d294cd975688..2e7b2e566ebe 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -13,29 +13,37 @@ #include <linux/compat.h> #include <linux/falloc.h> #include <linux/fiemap.h> +#include <linux/fileattr.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" +/* + * cifx, btrfs, exfat, ext4, f2fs use this constant. + * Hope this value will become common to all fs. + */ +#define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32) + static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) { struct fstrim_range __user *user_range; struct fstrim_range range; + struct block_device *dev; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!bdev_max_discard_sectors(sbi->sb->s_bdev)) + dev = sbi->sb->s_bdev; + if (!bdev_max_discard_sectors(dev)) return -EOPNOTSUPP; user_range = (struct fstrim_range __user *)arg; if (copy_from_user(&range, user_range, sizeof(range))) return -EFAULT; - range.minlen = max_t(u32, range.minlen, - bdev_discard_granularity(sbi->sb->s_bdev)); + range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); err = ntfs_trim_fs(sbi, &range); if (err < 0) @@ -47,20 +55,91 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) return 0; } -static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) +static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) +{ + if (copy_to_user(buf, sbi->volume.label, FSLABEL_MAX)) + return -EFAULT; + + return 0; +} + +static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) +{ + u8 user[FSLABEL_MAX] = { 0 }; + int len; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(user, buf, FSLABEL_MAX)) + return -EFAULT; + + len = strnlen(user, FSLABEL_MAX); + + return ntfs_set_label(sbi, user, len); +} + +/* + * ntfs_force_shutdown - helper function. Called from ioctl + */ +static int ntfs_force_shutdown(struct super_block *sb, u32 flags) +{ + int err; + struct ntfs_sb_info *sbi = sb->s_fs_info; + + if (unlikely(ntfs3_forced_shutdown(sb))) + return 0; + + /* No additional options yet (flags). */ + err = bdev_freeze(sb->s_bdev); + if (err) + return err; + set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags); + bdev_thaw(sb->s_bdev); + return 0; +} + +static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg) +{ + u32 flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, (__u32 __user *)arg)) + return -EFAULT; + + return ntfs_force_shutdown(sb, flags); +} + +/* + * ntfs_ioctl - file_operations::unlocked_ioctl + */ +long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) { struct inode *inode = file_inode(filp); - struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; + struct super_block *sb = inode->i_sb; + struct ntfs_sb_info *sbi = sb->s_fs_info; + + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ntfs_i(inode)))) + return -EINVAL; switch (cmd) { case FITRIM: return ntfs_ioctl_fitrim(sbi, arg); + case FS_IOC_GETFSLABEL: + return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg); + case FS_IOC_SETFSLABEL: + return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg); + case NTFS3_IOC_SHUTDOWN: + return ntfs_ioctl_shutdown(sb, arg); } return -ENOTTY; /* Inappropriate ioctl for device. */ } #ifdef CONFIG_COMPAT -static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) +long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) { return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); @@ -70,25 +149,36 @@ static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) /* * ntfs_getattr - inode_operations::getattr */ -int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path, +int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, u32 flags) { struct inode *inode = d_inode(path->dentry); struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + stat->result_mask |= STATX_BTIME; + stat->btime = ni->i_crtime; + stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ + + if (inode->i_flags & S_IMMUTABLE) + stat->attributes |= STATX_ATTR_IMMUTABLE; + + if (inode->i_flags & S_APPEND) + stat->attributes |= STATX_ATTR_APPEND; + if (is_compressed(ni)) stat->attributes |= STATX_ATTR_COMPRESSED; if (is_encrypted(ni)) stat->attributes |= STATX_ATTR_ENCRYPTED; - stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED; + stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | + STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; - generic_fillattr(mnt_userns, inode, stat); - - stat->result_mask |= STATX_BTIME; - stat->btime = ni->i_crtime; - stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ + generic_fillattr(idmap, request_mask, inode, stat); return 0; } @@ -104,17 +194,19 @@ static int ntfs_extend_initialized_size(struct file *file, loff_t pos = valid; int err; + if (valid >= new_valid) + return 0; + if (is_resident(ni)) { ni->i_valid = new_valid; return 0; } WARN_ON(is_compressed(ni)); - WARN_ON(valid >= new_valid); for (;;) { u32 zerofrom, len; - struct page *page; + struct folio *folio; u8 bits; CLST vcn, lcn, clen; @@ -140,14 +232,13 @@ static int ntfs_extend_initialized_size(struct file *file, if (pos + len > new_valid) len = new_valid - pos; - err = ntfs_write_begin(file, mapping, pos, len, &page, NULL); + err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); if (err) goto out; - zero_user_segment(page, zerofrom, PAGE_SIZE); + folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); - /* This function in any case puts page. */ - err = ntfs_write_end(file, mapping, pos, len, len, page, NULL); + err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); if (err < 0) goto out; pos += len; @@ -178,7 +269,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) { int err = 0; struct address_space *mapping = inode->i_mapping; - u32 blocksize = 1 << inode->i_blkbits; + u32 blocksize = i_blocksize(inode); pgoff_t idx = vbo >> PAGE_SHIFT; u32 from = vbo & (PAGE_SIZE - 1); pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -186,24 +277,26 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) struct buffer_head *head, *bh; u32 bh_next, bh_off, to; sector_t iblock; - struct page *page; + struct folio *folio; + bool dirty = false; for (; idx < idx_end; idx += 1, from = 0) { page_off = (loff_t)idx << PAGE_SHIFT; - to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) - : PAGE_SIZE; + to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : + PAGE_SIZE; iblock = page_off >> inode->i_blkbits; - page = find_or_create_page(mapping, idx, - mapping_gfp_constraint(mapping, - ~__GFP_FS)); - if (!page) - return -ENOMEM; + folio = __filemap_get_folio( + mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + mapping_gfp_constraint(mapping, ~__GFP_FS)); + if (IS_ERR(folio)) + return PTR_ERR(folio); - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); + head = folio_buffers(folio); + if (!head) + head = create_empty_buffers(folio, blocksize, 0); - bh = head = page_buffers(page); + bh = head; bh_off = 0; do { bh_next = bh_off + blocksize; @@ -219,52 +312,51 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) } /* Ok, it's mapped. Make sure it's up-to-date. */ - if (PageUptodate(page)) + if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); - - if (!buffer_uptodate(bh)) { - lock_buffer(bh); - bh->b_end_io = end_buffer_read_sync; - get_bh(bh); - submit_bh(REQ_OP_READ, bh); - - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) { - unlock_page(page); - put_page(page); - err = -EIO; - goto out; - } + else if (bh_read(bh, 0) < 0) { + err = -EIO; + folio_unlock(folio); + folio_put(folio); + goto out; } mark_buffer_dirty(bh); - } while (bh_off = bh_next, iblock += 1, head != (bh = bh->b_this_page)); - zero_user_segment(page, from, to); + folio_zero_segment(folio, from, to); + dirty = true; - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); cond_resched(); } out: - mark_inode_dirty(inode); + if (dirty) + mark_inode_dirty(inode); return err; } /* - * ntfs_file_mmap - file_operations::mmap + * ntfs_file_mmap_prepare - file_operations::mmap_prepare */ -static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) { - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; + struct file *file = desc->file; + struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); - u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); - bool rw = vma->vm_flags & VM_WRITE; + u64 from = ((u64)desc->pgoff << PAGE_SHIFT); + bool rw = desc->vm_flags & VM_WRITE; int err; + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + if (is_encrypted(ni)) { ntfs_inode_warn(inode, "mmap encrypted not supported"); return -EOPNOTSUPP; @@ -275,14 +367,19 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) return -EOPNOTSUPP; } - if (is_compressed(ni) && rw) { - ntfs_inode_warn(inode, "mmap(write) compressed not supported"); - return -EOPNOTSUPP; + if (is_compressed(ni)) { + if (rw) { + ntfs_inode_warn(inode, + "mmap(write) compressed not supported"); + return -EOPNOTSUPP; + } + /* Turn off readahead for compressed files. */ + file->f_ra.ra_pages = 0; } if (rw) { u64 to = min_t(loff_t, i_size_read(inode), - from + vma->vm_end - vma->vm_start); + from + vma_desc_size(desc)); if (is_sparsed(ni)) { /* Allocate clusters for rw map. */ @@ -313,7 +410,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } } - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); out: return err; } @@ -347,7 +444,43 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, err = 0; } - inode->i_ctime = inode->i_mtime = current_time(inode); + if (file && is_sparsed(ni)) { + /* + * This code optimizes large writes to sparse file. + * TODO: merge this fragment with fallocate fragment. + */ + struct ntfs_sb_info *sbi = ni->mi.sbi; + CLST vcn = pos >> sbi->cluster_bits; + CLST cend = bytes_to_cluster(sbi, end); + CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); + CLST lcn, clen; + bool new; + + if (cend_v > cend) + cend_v = cend; + + /* + * Allocate and zero new clusters. + * Zeroing these clusters may be too long. + */ + for (; vcn < cend_v; vcn += clen) { + err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, + &clen, &new, true); + if (err) + goto out; + } + /* + * Allocate but not zero new clusters. + */ + for (; vcn < cend; vcn += clen) { + err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, + &clen, &new, false); + if (err) + goto out; + } + } + + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); if (IS_SYNC(inode)) { @@ -405,7 +538,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size) ni_unlock(ni); ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; - inode->i_ctime = inode->i_mtime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); if (!IS_DIRSYNC(inode)) { dirty = 1; } else { @@ -417,13 +550,11 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size) if (dirty) mark_inode_dirty(inode); - /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ - return 0; } /* - * ntfs_fallocate + * ntfs_fallocate - file_operations::ntfs_fallocate * * Preallocate space for a file. This implements ntfs's fallocate file * operation, which gets called from sys_fallocate system call. User @@ -433,7 +564,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size) */ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) { - struct inode *inode = file->f_mapping->host; + struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; @@ -503,10 +634,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ni_lock(ni); err = attr_punch_hole(ni, vbo, len, &frame_size); ni_unlock(ni); + if (!err) + goto ok; + if (err != E_NTFS_NOTALIGNED) goto out; /* Process not aligned punch. */ + err = 0; mask = frame_size - 1; vbo_a = (vbo + mask) & ~mask; end_a = end & ~mask; @@ -529,6 +664,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ni_lock(ni); err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); ni_unlock(ni); + if (err) + goto out; } } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { /* @@ -552,6 +689,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ni_lock(ni); err = attr_collapse_range(ni, vbo, len); ni_unlock(ni); + if (err) + goto out; } else if (mode & FALLOC_FL_INSERT_RANGE) { /* Check new size. */ err = inode_newsize_ok(inode, new_size); @@ -568,15 +707,27 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) ni_lock(ni); err = attr_insert_range(ni, vbo, len); ni_unlock(ni); + if (err) + goto out; } else { /* Check new size. */ + u8 cluster_bits = sbi->cluster_bits; + + /* Be sure file is non resident. */ + if (is_resident(ni)) { + ni_lock(ni); + err = attr_force_nonresident(ni); + ni_unlock(ni); + if (err) + goto out; + } /* generic/213: expected -ENOSPC instead of -EFBIG. */ if (!is_supported_holes) { loff_t to_alloc = new_size - inode_get_bytes(inode); if (to_alloc > 0 && - (to_alloc >> sbi->cluster_bits) > + (to_alloc >> cluster_bits) > wnd_zeroes(&sbi->used.bitmap)) { err = -ENOSPC; goto out; @@ -597,7 +748,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) } if (is_supported_holes) { - CLST vcn = vbo >> sbi->cluster_bits; + CLST vcn = vbo >> cluster_bits; CLST cend = bytes_to_cluster(sbi, end); CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); CLST lcn, clen; @@ -636,17 +787,24 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) &ni->file.run, i_size, &ni->i_valid, true, NULL); ni_unlock(ni); + if (err) + goto out; } else if (new_size > i_size) { - inode->i_size = new_size; + i_size_write(inode, new_size); } } +ok: + err = file_modified(file); + if (err) + goto out; + out: if (map_locked) filemap_invalidate_unlock(mapping); if (!err) { - inode->i_ctime = inode->i_mtime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); } @@ -655,28 +813,25 @@ out: } /* - * ntfs3_setattr - inode_operations::setattr + * ntfs_setattr - inode_operations::setattr */ -int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, - struct iattr *attr) +int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr) { - struct super_block *sb = dentry->d_sb; - struct ntfs_sb_info *sbi = sb->s_fs_info; struct inode *inode = d_inode(dentry); struct ntfs_inode *ni = ntfs_i(inode); u32 ia_valid = attr->ia_valid; umode_t mode = inode->i_mode; int err; - if (sbi->options->noacsrules) { - /* "No access rules" - Force any changes of time etc. */ - attr->ia_valid |= ATTR_FORCE; - /* and disable for editing some attributes. */ - attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); - ia_valid = attr->ia_valid; - } + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; - err = setattr_prepare(mnt_userns, dentry, attr); + err = setattr_prepare(idmap, dentry, attr); if (err) goto out; @@ -689,7 +844,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, goto out; } inode_dio_wait(inode); - oldsize = inode->i_size; + oldsize = i_size_read(inode); newsize = attr->ia_size; if (newsize <= oldsize) @@ -701,13 +856,13 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, goto out; ni->ni_flags |= NI_FLAG_UPDATE_PARENT; - inode->i_size = newsize; + i_size_write(inode, newsize); } - setattr_copy(mnt_userns, inode, attr); + setattr_copy(idmap, inode, attr); if (mode != inode->i_mode) { - err = ntfs_acl_chmod(mnt_userns, dentry); + err = ntfs_acl_chmod(idmap, dentry); if (err) goto out; @@ -719,28 +874,32 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, } if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) - ntfs_save_wsl_perm(inode); + ntfs_save_wsl_perm(inode, NULL); mark_inode_dirty(inode); out: return err; } -static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +/* + * check_read_restriction: + * common code for ntfs_file_read_iter and ntfs_file_splice_read + */ +static int check_read_restriction(struct inode *inode) { - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + if (is_encrypted(ni)) { ntfs_inode_warn(inode, "encrypted i/o not supported"); return -EOPNOTSUPP; } - if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { - ntfs_inode_warn(inode, "direct i/o + compressed not supported"); - return -EOPNOTSUPP; - } - #ifndef CONFIG_NTFS3_LZX_XPRESS if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { ntfs_inode_warn( @@ -755,10 +914,69 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) return -EOPNOTSUPP; } + return 0; +} + +/* + * ntfs_file_read_iter - file_operations::read_iter + */ +static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); + struct ntfs_inode *ni = ntfs_i(inode); + ssize_t err; + + err = check_read_restriction(inode); + if (err) + return err; + + if (is_compressed(ni)) { + if (iocb->ki_flags & IOCB_DIRECT) { + ntfs_inode_warn( + inode, "direct i/o + compressed not supported"); + return -EOPNOTSUPP; + } + /* Turn off readahead for compressed files. */ + file->f_ra.ra_pages = 0; + } + + /* Check minimum alignment for dio. */ + if (iocb->ki_flags & IOCB_DIRECT) { + struct super_block *sb = inode->i_sb; + struct ntfs_sb_info *sbi = sb->s_fs_info; + if ((iocb->ki_pos | iov_iter_alignment(iter)) & + sbi->bdev_blocksize_mask) { + iocb->ki_flags &= ~IOCB_DIRECT; + } + } + return generic_file_read_iter(iocb, iter); } /* + * ntfs_file_splice_read - file_operations::splice_read + */ +static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) +{ + struct inode *inode = file_inode(in); + ssize_t err; + + err = check_read_restriction(inode); + if (err) + return err; + + if (is_compressed(ntfs_i(inode))) { + /* Turn off readahead for compressed files. */ + in->f_ra.ra_pages = 0; + } + + return filemap_splice_read(in, ppos, pipe, len, flags); +} + +/* * ntfs_get_frame_pages * * Return: Array of locked pages. @@ -773,23 +991,25 @@ static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, *frame_uptodate = true; for (npages = 0; npages < pages_per_frame; npages++, index++) { - struct page *page; + struct folio *folio; - page = find_or_create_page(mapping, index, gfp_mask); - if (!page) { + folio = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + gfp_mask); + if (IS_ERR(folio)) { while (npages--) { - page = pages[npages]; - unlock_page(page); - put_page(page); + folio = page_folio(pages[npages]); + folio_unlock(folio); + folio_put(folio); } return -ENOMEM; } - if (!PageUptodate(page)) + if (!folio_test_uptodate(folio)) *frame_uptodate = false; - pages[npages] = page; + pages[npages] = &folio->page; } return 0; @@ -805,12 +1025,13 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) size_t count = iov_iter_count(from); loff_t pos = iocb->ki_pos; struct inode *inode = file_inode(file); - loff_t i_size = inode->i_size; + loff_t i_size = i_size_read(inode); struct address_space *mapping = inode->i_mapping; struct ntfs_inode *ni = ntfs_i(inode); u64 valid = ni->i_valid; struct ntfs_sb_info *sbi = ni->mi.sbi; - struct page *page, **pages = NULL; + struct page **pages = NULL; + struct folio *folio; size_t written = 0; u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; u32 frame_size = 1u << frame_bits; @@ -834,7 +1055,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (!pages) return -ENOMEM; - current->backing_dev_info = inode_to_bdi(inode); err = file_remove_privs(file); if (err) goto out; @@ -871,12 +1091,12 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (!frame_uptodate && off) { err = ni_read_frame(ni, frame_vbo, pages, - pages_per_frame); + pages_per_frame, 0); if (err) { for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - unlock_page(page); - put_page(page); + folio = page_folio(pages[ip]); + folio_unlock(folio); + folio_put(folio); } goto out; } @@ -885,10 +1105,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ip = off >> PAGE_SHIFT; off = offset_in_page(valid); for (; ip < pages_per_frame; ip++, off = 0) { - page = pages[ip]; - zero_user_segment(page, off, PAGE_SIZE); - flush_dcache_page(page); - SetPageUptodate(page); + folio = page_folio(pages[ip]); + folio_zero_segment(folio, off, PAGE_SIZE); + flush_dcache_folio(folio); + folio_mark_uptodate(folio); } ni_lock(ni); @@ -896,10 +1116,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - SetPageUptodate(page); - unlock_page(page); - put_page(page); + folio = page_folio(pages[ip]); + folio_mark_uptodate(folio); + folio_unlock(folio); + folio_put(folio); } if (err) @@ -936,13 +1156,13 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (off || (to < i_size && (to & (frame_size - 1)))) { err = ni_read_frame(ni, frame_vbo, pages, - pages_per_frame); + pages_per_frame, 0); if (err) { for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - unlock_page(page); - put_page(page); + folio = page_folio(pages[ip]); + folio_unlock(folio); + folio_put(folio); } goto out; } @@ -958,10 +1178,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) for (;;) { size_t cp, tail = PAGE_SIZE - off; - page = pages[ip]; - cp = copy_page_from_iter_atomic(page, off, - min(tail, bytes), from); - flush_dcache_page(page); + folio = page_folio(pages[ip]); + cp = copy_folio_from_iter_atomic( + folio, off, min(tail, bytes), from); + flush_dcache_folio(folio); copied += cp; bytes -= cp; @@ -981,11 +1201,11 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { - page = pages[ip]; - ClearPageDirty(page); - SetPageUptodate(page); - unlock_page(page); - put_page(page); + folio = page_folio(pages[ip]); + folio_clear_dirty(folio); + folio_mark_uptodate(folio); + folio_unlock(folio); + folio_put(folio); } if (err) @@ -1007,54 +1227,83 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) out: kfree(pages); - current->backing_dev_info = NULL; - if (err < 0) return err; iocb->ki_pos += written; if (iocb->ki_pos > ni->i_valid) ni->i_valid = iocb->ki_pos; + if (iocb->ki_pos > i_size) + i_size_write(inode, iocb->ki_pos); return written; } /* - * ntfs_file_write_iter - file_operations::write_iter + * check_write_restriction: + * common code for ntfs_file_write_iter and ntfs_file_splice_write */ -static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +static int check_write_restriction(struct inode *inode) { - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t ret; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + if (is_encrypted(ni)) { ntfs_inode_warn(inode, "encrypted i/o not supported"); return -EOPNOTSUPP; } - if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { - ntfs_inode_warn(inode, "direct i/o + compressed not supported"); - return -EOPNOTSUPP; - } - if (is_dedup(ni)) { ntfs_inode_warn(inode, "write into deduplicated not supported"); return -EOPNOTSUPP; } + return 0; +} + +/* + * ntfs_file_write_iter - file_operations::write_iter + */ +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); + struct ntfs_inode *ni = ntfs_i(inode); + ssize_t ret; + int err; + if (!inode_trylock(inode)) { if (iocb->ki_flags & IOCB_NOWAIT) return -EAGAIN; inode_lock(inode); } + ret = check_write_restriction(inode); + if (ret) + goto out; + + if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { + ntfs_inode_warn(inode, "direct i/o + compressed not supported"); + ret = -EOPNOTSUPP; + goto out; + } + ret = generic_write_checks(iocb, from); if (ret <= 0) goto out; + err = file_modified(iocb->ki_filp); + if (err) { + ret = err; + goto out; + } + if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { /* Should never be here, see ntfs_file_open(). */ ret = -EOPNOTSUPP; @@ -1065,8 +1314,8 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (ret) goto out; - ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) - : __generic_file_write_iter(iocb, from); + ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : + __generic_file_write_iter(iocb, from); out: inode_unlock(inode); @@ -1084,6 +1333,13 @@ int ntfs_file_open(struct inode *inode, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + if (unlikely((is_compressed(ni) || is_encrypted(ni)) && (file->f_flags & O_DIRECT))) { return -EOPNOTSUPP; @@ -1118,13 +1374,22 @@ static int ntfs_file_release(struct inode *inode, struct file *file) int err = 0; /* If we are last writer on the inode, drop the block reservation. */ - if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && - atomic_read(&inode->i_writecount) == 1)) { + if (sbi->options->prealloc && + ((file->f_mode & FMODE_WRITE) && + atomic_read(&inode->i_writecount) == 1) + /* + * The only file when inode->i_fop = &ntfs_file_operations and + * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. + * + * Add additional check here. + */ + && inode->i_ino != MFT_REC_MFT) { ni_lock(ni); down_write(&ni->file.run_lock); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, - inode->i_size, &ni->i_valid, false, NULL); + i_size_read(inode), &ni->i_valid, false, + NULL); up_write(&ni->file.run_lock); ni_unlock(ni); @@ -1133,7 +1398,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file) } /* - * ntfs_fiemap - file_operations::fiemap + * ntfs_fiemap - inode_operations::fiemap */ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) @@ -1141,6 +1406,10 @@ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int err; struct ntfs_inode *ni = ntfs_i(inode); + /* Avoid any operation if inode is bad. */ + if (unlikely(is_bad_ni(ni))) + return -EINVAL; + err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); if (err) return err; @@ -1154,13 +1423,41 @@ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return err; } +/* + * ntfs_file_splice_write - file_operations::splice_write + */ +static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, + struct file *file, loff_t *ppos, + size_t len, unsigned int flags) +{ + ssize_t err; + struct inode *inode = file_inode(file); + + err = check_write_restriction(inode); + if (err) + return err; + + return iter_file_splice_write(pipe, file, ppos, len, flags); +} + +/* + * ntfs_file_fsync - file_operations::fsync + */ +static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct inode *inode = file_inode(file); + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + + return generic_file_fsync(file, start, end, datasync); +} + // clang-format off const struct inode_operations ntfs_file_inode_operations = { .getattr = ntfs_getattr, - .setattr = ntfs3_setattr, + .setattr = ntfs_setattr, .listxattr = ntfs_listxattr, - .permission = ntfs_permission, - .get_inode_acl = ntfs_get_acl, + .get_acl = ntfs_get_acl, .set_acl = ntfs_set_acl, .fiemap = ntfs_fiemap, }; @@ -1173,12 +1470,22 @@ const struct file_operations ntfs_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = ntfs_compat_ioctl, #endif - .splice_read = generic_file_splice_read, - .mmap = ntfs_file_mmap, + .splice_read = ntfs_file_splice_read, + .splice_write = ntfs_file_splice_write, + .mmap_prepare = ntfs_file_mmap_prepare, .open = ntfs_file_open, - .fsync = generic_file_fsync, - .splice_write = iter_file_splice_write, + .fsync = ntfs_file_fsync, .fallocate = ntfs_fallocate, .release = ntfs_file_release, }; + +#if IS_ENABLED(CONFIG_NTFS_FS) +const struct file_operations ntfs_legacy_file_operations = { + .llseek = generic_file_llseek, + .read_iter = ntfs_file_read_iter, + .splice_read = ntfs_file_splice_read, + .open = ntfs_file_open, + .release = ntfs_file_release, +}; +#endif // clang-format on |
