summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
commit5264406cdb66c7003eb3edf53c9773b1b20611b9 (patch)
treee94f76f64a0b3b45dcb9f9bec85cce2ba78e1221 /fs
parent200e340f2196d7fd427a5810d06e893b932f145a (diff)
parentdd45ab9dd28c82fc495d98cd9788666fd8d76b99 (diff)
Merge tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs iov_iter updates from Al Viro: "Part 1 - isolated cleanups and optimizations. One of the goals is to reduce the overhead of using ->read_iter() and ->write_iter() instead of ->read()/->write(). new_sync_{read,write}() has a surprising amount of overhead, in particular inside iocb_flags(). That's the explanation for the beginning of the series is in this pile; it's not directly iov_iter-related, but it's a part of the same work..." * tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: first_iovec_segment(): just return address iov_iter: massage calling conventions for first_{iovec,bvec}_segment() iov_iter: first_{iovec,bvec}_segment() - simplify a bit iov_iter: lift dealing with maxpages out of first_{iovec,bvec}_segment() iov_iter_get_pages{,_alloc}(): cap the maxsize with MAX_RW_COUNT iov_iter_bvec_advance(): don't bother with bvec_iter copy_page_{to,from}_iter(): switch iovec variants to generic keep iocb_flags() result cached in struct file iocb: delay evaluation of IS_SYNC(...) until we want to check IOCB_DSYNC struct file: use anonymous union member for rcuhead and llist btrfs: use IOMAP_DIO_NOSYNC teach iomap_dio_rw() to suppress dsync No need of likely/unlikely on calls of check_copy_size()
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c2
-rw-r--r--fs/btrfs/file.c19
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/fcntl.c1
-rw-r--r--fs/file_table.c17
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/iomap/direct-io.c19
-rw-r--r--fs/open.c1
-rw-r--r--fs/zonefs/super.c2
10 files changed, 28 insertions, 40 deletions
diff --git a/fs/aio.c b/fs/aio.c
index a1911e86859c..606613e9d1f4 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1475,7 +1475,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
- req->ki_flags = iocb_flags(req->ki_filp);
+ req->ki_flags = req->ki_filp->f_iocb_flags;
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9dfde1af8a64..30e6aef9739f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1848,7 +1848,6 @@ static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
- const bool is_sync_write = (iocb->ki_flags & IOCB_DSYNC);
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -1902,15 +1901,6 @@ relock:
}
/*
- * We remove IOCB_DSYNC so that we don't deadlock when iomap_dio_rw()
- * calls generic_write_sync() (through iomap_dio_complete()), because
- * that results in calling fsync (btrfs_sync_file()) which will try to
- * lock the inode in exclusive/write mode.
- */
- if (is_sync_write)
- iocb->ki_flags &= ~IOCB_DSYNC;
-
- /*
* The iov_iter can be mapped to the same file range we are writing to.
* If that's the case, then we will deadlock in the iomap code, because
* it first calls our callback btrfs_dio_iomap_begin(), which will create
@@ -1964,13 +1954,6 @@ again:
btrfs_inode_unlock(inode, ilock_flags);
- /*
- * Add back IOCB_DSYNC. Our caller, btrfs_file_write_iter(), will do
- * the fsync (call generic_write_sync()).
- */
- if (is_sync_write)
- iocb->ki_flags |= IOCB_DSYNC;
-
/* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
goto out;
@@ -2038,7 +2021,7 @@ ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
struct file *file = iocb->ki_filp;
struct btrfs_inode *inode = BTRFS_I(file_inode(file));
ssize_t num_written, num_sync;
- const bool sync = iocb->ki_flags & IOCB_DSYNC;
+ const bool sync = iocb_is_dsync(iocb);
/*
* If the fs flips readonly due to some impossible error, although we
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 11a52db506b3..3867cb0646e9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8165,7 +8165,8 @@ ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_befo
struct btrfs_dio_data data;
return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- IOMAP_DIO_PARTIAL, &data, done_before);
+ IOMAP_DIO_PARTIAL | IOMAP_DIO_NOSYNC,
+ &data, done_before);
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 94b71440c332..df5e2d048799 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1216,7 +1216,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
retval = 0;
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb))
retval = dio_set_defer_completion(dio);
else if (!dio->inode->i_sb->s_dio_done_wq) {
/*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 34a3faa4886d..146c9ab0cd4b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -78,6 +78,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
}
spin_lock(&filp->f_lock);
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ filp->f_iocb_flags = iocb_flags(filp);
spin_unlock(&filp->f_lock);
out:
diff --git a/fs/file_table.c b/fs/file_table.c
index 5727a63a7b67..99c6796c9f28 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -45,7 +45,7 @@ static struct percpu_counter nr_files __cacheline_aligned_in_smp;
static void file_free_rcu(struct rcu_head *head)
{
- struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
+ struct file *f = container_of(head, struct file, f_rcuhead);
put_cred(f->f_cred);
kmem_cache_free(filp_cachep, f);
@@ -56,7 +56,7 @@ static inline void file_free(struct file *f)
security_file_free(f);
if (!(f->f_mode & FMODE_NOACCOUNT))
percpu_counter_dec(&nr_files);
- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
+ call_rcu(&f->f_rcuhead, file_free_rcu);
}
/*
@@ -142,7 +142,7 @@ static struct file *__alloc_file(int flags, const struct cred *cred)
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free_rcu(&f->f_u.fu_rcuhead);
+ file_free_rcu(&f->f_rcuhead);
return ERR_PTR(error);
}
@@ -243,6 +243,7 @@ static struct file *alloc_file(const struct path *path, int flags,
if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
file->f_mode |= FMODE_CAN_WRITE;
+ file->f_iocb_flags = iocb_flags(file);
file->f_mode |= FMODE_OPENED;
file->f_op = fop;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
@@ -343,13 +344,13 @@ static void delayed_fput(struct work_struct *unused)
struct llist_node *node = llist_del_all(&delayed_fput_list);
struct file *f, *t;
- llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
+ llist_for_each_entry_safe(f, t, node, f_llist)
__fput(f);
}
static void ____fput(struct callback_head *work)
{
- __fput(container_of(work, struct file, f_u.fu_rcuhead));
+ __fput(container_of(work, struct file, f_rcuhead));
}
/*
@@ -376,8 +377,8 @@ void fput(struct file *file)
struct task_struct *task = current;
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))
+ init_task_work(&file->f_rcuhead, ____fput);
+ if (!task_work_add(task, &file->f_rcuhead, TWA_RESUME))
return;
/*
* After this task has run exit_task_work(),
@@ -386,7 +387,7 @@ void fput(struct file *file)
*/
}
- if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
+ if (llist_add(&file->f_llist, &delayed_fput_list))
schedule_delayed_work(&delayed_fput_work, 1);
}
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 05caa2b9272e..00fa861aeead 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1042,7 +1042,7 @@ static unsigned int fuse_write_flags(struct kiocb *iocb)
{
unsigned int flags = iocb->ki_filp->f_flags;
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb))
flags |= O_DSYNC;
if (iocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 18a3d9357dce..c75d33d5c3ce 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -548,17 +548,18 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
/* for data sync or sync, we need sync completion processing */
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb) && !(dio_flags & IOMAP_DIO_NOSYNC)) {
dio->flags |= IOMAP_DIO_NEED_SYNC;
- /*
- * For datasync only writes, we optimistically try using FUA for
- * this IO. Any non-FUA write that occurs will clear this flag,
- * hence we know before completion whether a cache flush is
- * necessary.
- */
- if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
- dio->flags |= IOMAP_DIO_WRITE_FUA;
+ /*
+ * For datasync only writes, we optimistically try
+ * using FUA for this IO. Any non-FUA write that
+ * occurs will clear this flag, hence we know before
+ * completion whether a cache flush is necessary.
+ */
+ if (!(iocb->ki_flags & IOCB_SYNC))
+ dio->flags |= IOMAP_DIO_WRITE_FUA;
+ }
}
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
diff --git a/fs/open.c b/fs/open.c
index 1177e8d1cf83..8a813fa5ca56 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -894,6 +894,7 @@ static int do_dentry_open(struct file *f,
f->f_mode |= FMODE_CAN_ODIRECT;
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
+ f->f_iocb_flags = iocb_flags(f);
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index c5fa8adfb7a6..fcdc4bad95c5 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -779,7 +779,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_ioprio = iocb->ki_ioprio;
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb))
bio->bi_opf |= REQ_FUA;
ret = bio_iov_iter_get_pages(bio, from);