summaryrefslogtreecommitdiff
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c86
1 files changed, 74 insertions, 12 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c483cc50b82e..ac1dc331ba31 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -31,6 +31,8 @@
#include "fscache.h"
#include "pnfs.h"
+#include "nfstrace.h"
+
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
#define MIN_POOL_WRITE (32)
@@ -861,7 +863,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
return 0;
l_ctx = req->wb_lock_context;
do_flush = req->wb_page != page || req->wb_context != ctx;
- if (l_ctx) {
+ if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
do_flush |= l_ctx->lockowner.l_owner != current->files
|| l_ctx->lockowner.l_pid != current->tgid;
}
@@ -874,6 +876,33 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
}
/*
+ * Avoid buffered writes when a open context credential's key would
+ * expire soon.
+ *
+ * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
+ *
+ * Return 0 and set a credential flag which triggers the inode to flush
+ * and performs NFS_FILE_SYNC writes if the key will expired within
+ * RPC_KEY_EXPIRE_TIMEO.
+ */
+int
+nfs_key_timeout_notify(struct file *filp, struct inode *inode)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(filp);
+ struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
+
+ return rpcauth_key_timeout_notify(auth, ctx->cred);
+}
+
+/*
+ * Test if the open context credential key is marked to expire soon.
+ */
+bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
+{
+ return rpcauth_cred_key_to_expire(ctx->cred);
+}
+
+/*
* If the page cache is marked as unsafe or invalid, then we can't rely on
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
@@ -888,6 +917,28 @@ out:
return PageUptodate(page) != 0;
}
+/* If we know the page is up to date, and we're not using byte range locks (or
+ * if we have the whole file locked for writing), it may be more efficient to
+ * extend the write to cover the entire page in order to avoid fragmentation
+ * inefficiencies.
+ *
+ * If the file is opened for synchronous writes or if we have a write delegation
+ * from the server then we can just skip the rest of the checks.
+ */
+static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
+{
+ if (file->f_flags & O_DSYNC)
+ return 0;
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
+ return 1;
+ if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
+ (inode->i_flock->fl_start == 0 &&
+ inode->i_flock->fl_end == OFFSET_MAX &&
+ inode->i_flock->fl_type != F_RDLCK)))
+ return 1;
+ return 0;
+}
+
/*
* Update and possibly write a cached page of an NFS file.
*
@@ -908,14 +959,7 @@ int nfs_updatepage(struct file *file, struct page *page,
file->f_path.dentry->d_name.name, count,
(long long)(page_file_offset(page) + offset));
- /* If we're not using byte range locks, and we know the page
- * is up to date, it may be more efficient to extend the write
- * to cover the entire page in order to avoid fragmentation
- * inefficiencies.
- */
- if (nfs_write_pageuptodate(page, inode) &&
- inode->i_flock == NULL &&
- !(file->f_flags & O_DSYNC)) {
+ if (nfs_can_extend_write(file, page, inode)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
}
@@ -978,6 +1022,9 @@ int nfs_initiate_write(struct rpc_clnt *clnt,
data->args.count,
(unsigned long long)data->args.offset);
+ nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
+ &task_setup_data.rpc_client, &msg, data);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
@@ -1250,7 +1297,10 @@ EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
+ int err;
+ err = NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
+ if (err)
+ rpc_exit(task, err);
}
void nfs_commit_prepare(struct rpc_task *task, void *calldata)
@@ -1441,6 +1491,9 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+ nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
+ NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1715,8 +1768,14 @@ int nfs_wb_all(struct inode *inode)
.range_start = 0,
.range_end = LLONG_MAX,
};
+ int ret;
+
+ trace_nfs_writeback_inode_enter(inode);
+
+ ret = sync_inode(inode, &wbc);
- return sync_inode(inode, &wbc);
+ trace_nfs_writeback_inode_exit(inode, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(nfs_wb_all);
@@ -1764,6 +1823,8 @@ int nfs_wb_page(struct inode *inode, struct page *page)
};
int ret;
+ trace_nfs_writeback_page_enter(inode);
+
for (;;) {
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
@@ -1772,14 +1833,15 @@ int nfs_wb_page(struct inode *inode, struct page *page)
goto out_error;
continue;
}
+ ret = 0;
if (!PagePrivate(page))
break;
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
- return 0;
out_error:
+ trace_nfs_writeback_page_exit(inode, ret);
return ret;
}