From c3f207ab29f793b8c942ce8067ed123f18d5b81b Mon Sep 17 00:00:00 2001 From: Rohith Surabattula Date: Tue, 13 Apr 2021 00:26:42 -0500 Subject: cifs: Deferred close for files When file is closed, SMB2 close request is not sent to server immediately and is deferred for acregmax defined interval. When file is reopened by same process for read or write, the file handle is reused if an oplock is held. When client receives a oplock/lease break, file is closed immediately if reference count is zero, else oplock is downgraded. Signed-off-by: Rohith Surabattula Reviewed-by: Shyam Prasad N Signed-off-by: Steve French --- fs/cifs/file.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 2 deletions(-) (limited to 'fs/cifs/file.c') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 3d4e6e7dac1d..7e97aeabd616 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -322,9 +322,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, cfile->dentry = dget(dentry); cfile->f_flags = file->f_flags; cfile->invalidHandle = false; + cfile->oplock_break_received = false; + cfile->deferred_scheduled = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); INIT_WORK(&cfile->put, cifsFileInfo_put_work); + INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); mutex_init(&cfile->fh_mutex); spin_lock_init(&cfile->file_info_lock); @@ -565,6 +568,23 @@ int cifs_open(struct inode *inode, struct file *file) file->f_op = &cifs_file_direct_ops; } + spin_lock(&CIFS_I(inode)->deferred_lock); + /* Get the cached handle as SMB2 close is deferred */ + rc = cifs_get_readable_path(tcon, full_path, &cfile); + if (rc == 0) { + if (file->f_flags == cfile->f_flags) { + file->private_data = cfile; + cifs_del_deferred_close(cfile); + spin_unlock(&CIFS_I(inode)->deferred_lock); + goto out; + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + } + if (server->oplocks) oplock = REQ_OPLOCK; else @@ -846,11 +866,52 @@ reopen_error_exit: return rc; } +void smb2_deferred_work_close(struct work_struct *work) +{ + struct cifsFileInfo *cfile = container_of(work, + struct cifsFileInfo, deferred.work); + + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + cifs_del_deferred_close(cfile); + cfile->deferred_scheduled = false; + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + _cifsFileInfo_put(cfile, true, false); +} + int cifs_close(struct inode *inode, struct file *file) { + struct cifsFileInfo *cfile; + struct cifsInodeInfo *cinode = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifs_deferred_close *dclose; + if (file->private_data != NULL) { - _cifsFileInfo_put(file->private_data, true, false); + cfile = file->private_data; file->private_data = NULL; + dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); + if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && + dclose) { + if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) + inode->i_ctime = inode->i_mtime = current_time(inode); + spin_lock(&cinode->deferred_lock); + cifs_add_deferred_close(cfile, dclose); + if (cfile->deferred_scheduled) { + mod_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + } else { + /* Deferred close for files */ + queue_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + cfile->deferred_scheduled = true; + spin_unlock(&cinode->deferred_lock); + return 0; + } + spin_unlock(&cinode->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } else { + _cifsFileInfo_put(cfile, true, false); + kfree(dclose); + } } /* return code from the ->release op is always ignored */ @@ -1947,7 +2008,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { - if (!open_file->invalidHandle) { + if ((!open_file->invalidHandle) && + (!open_file->oplock_break_received)) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get(open_file); @@ -2476,6 +2538,8 @@ retry: if (cfile) cifsFileInfo_put(cfile); free_xid(xid); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -2584,6 +2648,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, unlock_page(page); put_page(page); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -4744,6 +4810,8 @@ void cifs_oplock_break(struct work_struct *work) struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; bool purge_cache = false; + bool is_deferred = false; + struct cifs_deferred_close *dclose; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -4791,6 +4859,18 @@ oplock_break_ack: cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); + /* + * When oplock break is received and there are no active + * file handles but cached, then set the flag oplock_break_received. + * So, new open will not use cached handle. + */ + spin_lock(&CIFS_I(inode)->deferred_lock); + is_deferred = cifs_is_deferred_close(cfile, &dclose); + if (is_deferred) { + cfile->oplock_break_received = true; + mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); + } + spin_unlock(&CIFS_I(inode)->deferred_lock); cifs_done_oplock_break(cinode); } -- cgit From 087f757b0129850c99cc9116df4909dac1bce871 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 29 Apr 2021 00:18:43 -0500 Subject: cifs: add shutdown support Various filesystem support the shutdown ioctl which is used by various xfstests. The shutdown ioctl sets a flag on the superblock which prevents open, unlink, symlink, hardlink, rmdir, create etc. on the file system until unmount and remounted. The two flags supported in this patch are: FSOP_GOING_FLAGS_LOGFLUSH and FSOP_GOING_FLAGS_NOLOGFLUSH which require very little other than blocking new operations (since we do not cache writes to metadata on the client with cifs.ko). FSOP_GOING_FLAGS_DEFAULT is not supported yet, but could be added in the future but would need to call syncfs or equivalent to write out pending data on the mount. With this patch various xfstests now work including tests 043 through 046 for example. Signed-off-by: Steve French Reviewed-by: Aurelien Aptel --- fs/cifs/file.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/cifs/file.c') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 7e97aeabd616..c95893351b6c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -45,6 +45,7 @@ #include "fscache.h" #include "smbdirect.h" #include "fs_context.h" +#include "cifs_ioctl.h" static inline int cifs_convert_flags(unsigned int flags) { @@ -542,6 +543,11 @@ int cifs_open(struct inode *inode, struct file *file) xid = get_xid(); cifs_sb = CIFS_SB(inode->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) { + free_xid(xid); + return -EIO; + } + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { free_xid(xid); -- cgit From 78c09634f7dc061a3bd09704cdbebb3762a45cdf Mon Sep 17 00:00:00 2001 From: Rohith Surabattula Date: Mon, 19 Apr 2021 19:02:03 +0000 Subject: Cifs: Fix kernel oops caused by deferred close for files. Fix regression issue caused by deferred close for files. Signed-off-by: Rohith Surabattula Reviewed-by: Shyam Prasad N Signed-off-by: Steve French --- fs/cifs/file.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'fs/cifs/file.c') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c95893351b6c..919c82d4713d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -878,6 +878,10 @@ void smb2_deferred_work_close(struct work_struct *work) struct cifsFileInfo, deferred.work); spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + if (!cfile->deferred_scheduled) { + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + return; + } cifs_del_deferred_close(cfile); cfile->deferred_scheduled = false; spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); @@ -1987,8 +1991,10 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, if (total_written > 0) { spin_lock(&d_inode(dentry)->i_lock); - if (*offset > d_inode(dentry)->i_size) + if (*offset > d_inode(dentry)->i_size) { i_size_write(d_inode(dentry), *offset); + d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; + } spin_unlock(&d_inode(dentry)->i_lock); } mark_inode_dirty_sync(d_inode(dentry)); @@ -2647,8 +2653,10 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, if (rc > 0) { spin_lock(&inode->i_lock); - if (pos > inode->i_size) + if (pos > inode->i_size) { i_size_write(inode, pos); + inode->i_blocks = (512 - 1 + pos) >> 9; + } spin_unlock(&inode->i_lock); } @@ -4864,7 +4872,6 @@ oplock_break_ack: cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } - _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); /* * When oplock break is received and there are no active * file handles but cached, then set the flag oplock_break_received. @@ -4872,11 +4879,12 @@ oplock_break_ack: */ spin_lock(&CIFS_I(inode)->deferred_lock); is_deferred = cifs_is_deferred_close(cfile, &dclose); - if (is_deferred) { + if (is_deferred && cfile->deferred_scheduled) { cfile->oplock_break_received = true; mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); } spin_unlock(&CIFS_I(inode)->deferred_lock); + _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); cifs_done_oplock_break(cinode); } -- cgit