summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.c52
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/ceph/crypto.c2
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c4
-rw-r--r--fs/erofs/decompressor_lzma.c5
-rw-r--r--fs/erofs/super.c2
-rw-r--r--fs/fs_context.c34
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/nfs4state.c45
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/file.c2
-rw-r--r--fs/overlayfs/ovl_entry.h10
-rw-r--r--fs/overlayfs/params.c126
-rw-r--r--fs/overlayfs/super.c27
-rw-r--r--fs/quota/dquot.c66
-rw-r--r--fs/smb/client/cached_dir.c141
-rw-r--r--fs/smb/client/cached_dir.h2
-rw-r--r--fs/smb/client/connect.c26
-rw-r--r--fs/smb/server/connection.c2
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/mgmt/tree_connect.c42
-rw-r--r--fs/smb/server/mgmt/tree_connect.h11
-rw-r--r--fs/smb/server/mgmt/user_session.c11
-rw-r--r--fs/smb/server/mgmt/user_session.h1
-rw-r--r--fs/smb/server/server.c2
-rw-r--r--fs/smb/server/smb2pdu.c105
-rw-r--r--fs/smb/server/vfs_cache.c30
-rw-r--r--fs/smb/server/vfs_cache.h9
-rw-r--r--fs/xfs/libxfs/xfs_ag.c6
-rw-r--r--fs/xfs/scrub/xfile.c1
-rw-r--r--fs/xfs/xfs_discard.c266
-rw-r--r--fs/xfs/xfs_discard.h6
-rw-r--r--fs/xfs/xfs_extent_busy.c35
-rw-r--r--fs/xfs/xfs_extent_busy.h24
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/xfs/xfs_log_cil.c93
-rw-r--r--fs/xfs/xfs_log_priv.h5
-rw-r--r--fs/xfs/xfs_notify_failure.c6
43 files changed, 806 insertions, 421 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a4cb4b642987..da519c1b6ad0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -682,18 +682,30 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
u64 search_start;
int ret;
- if (test_bit(BTRFS_ROOT_DELETING, &root->state))
- btrfs_err(fs_info,
- "COW'ing blocks on a fs root that's being dropped");
-
- if (trans->transaction != fs_info->running_transaction)
- WARN(1, KERN_CRIT "trans %llu running %llu\n",
- trans->transid,
- fs_info->running_transaction->transid);
+ if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+ "attempt to COW block %llu on root %llu that is being deleted",
+ buf->start, btrfs_root_id(root));
+ return -EUCLEAN;
+ }
- if (trans->transid != fs_info->generation)
- WARN(1, KERN_CRIT "trans %llu running %llu\n",
- trans->transid, fs_info->generation);
+ /*
+ * COWing must happen through a running transaction, which always
+ * matches the current fs generation (it's a transaction with a state
+ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
+ * into error state to prevent the commit of any transaction.
+ */
+ if (unlikely(trans->transaction != fs_info->running_transaction ||
+ trans->transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
+ buf->start, btrfs_root_id(root), trans->transid,
+ fs_info->running_transaction->transid,
+ fs_info->generation);
+ return -EUCLEAN;
+ }
if (!should_cow_block(trans, root, buf)) {
*cow_ret = buf;
@@ -805,8 +817,22 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
int progress_passed = 0;
struct btrfs_disk_key disk_key;
- WARN_ON(trans->transaction != fs_info->running_transaction);
- WARN_ON(trans->transid != fs_info->generation);
+ /*
+ * COWing must happen through a running transaction, which always
+ * matches the current fs generation (it's a transaction with a state
+ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
+ * into error state to prevent the commit of any transaction.
+ */
+ if (unlikely(trans->transaction != fs_info->running_transaction ||
+ trans->transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
+ parent->start, btrfs_root_id(root), trans->transid,
+ fs_info->running_transaction->transid,
+ fs_info->generation);
+ return -EUCLEAN;
+ }
parent_nritems = btrfs_header_nritems(parent);
blocksize = fs_info->nodesize;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index caf0bbd028d1..90aaedce1548 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -313,7 +313,7 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
{
struct btrfs_delayed_item *item;
- item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+ item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
if (item) {
item->data_len = data_len;
item->type = type;
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index dc1085b2a397..1da213197f55 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -95,7 +95,7 @@ struct btrfs_delayed_item {
bool logged;
/* The maximum leaf size is 64K, so u16 is more than enough. */
u16 data_len;
- char data[];
+ char data[] __counted_by(data_len);
};
static inline void btrfs_init_delayed_root(
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 75ab766fe156..8e7d03bc1b56 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2978,7 +2978,7 @@ static void get_block_group_info(struct list_head *groups_list,
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_ioctl_space_args space_args;
+ struct btrfs_ioctl_space_args space_args = { 0 };
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
@@ -4338,7 +4338,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_send_args_32 args32;
+ struct btrfs_ioctl_send_args_32 args32 = { 0 };
ret = copy_from_user(&args32, argp, sizeof(args32));
if (ret)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 6b309f8a99a8..93869cda6af9 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -219,8 +219,8 @@ do { \
(errno))) { \
/* Stack trace printed. */ \
} else { \
- btrfs_debug((trans)->fs_info, \
- "Transaction aborted (error %d)", \
+ btrfs_err((trans)->fs_info, \
+ "Transaction aborted (error %d)", \
(errno)); \
} \
} \
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index e1f31b86fd48..5b5112c78462 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -460,7 +460,7 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
out:
fscrypt_fname_free_buffer(&_tname);
out_inode:
- if ((dir != fname->dir) && !IS_ERR(dir)) {
+ if (dir != fname->dir) {
if ((dir->i_state & I_NEW))
discard_new_inode(dir);
else
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b1da02f5dbe3..b5f8038065d7 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2969,7 +2969,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
ret = do_splice_direct(src_file, &src_off, dst_file,
&dst_off, src_objlen, flags);
/* Abort on short copies or on error */
- if (ret < src_objlen) {
+ if (ret < (long)src_objlen) {
dout("Failed partial copy (%zd)\n", ret);
goto out;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 800ab7920513..b79100f720b3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -769,9 +769,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
ci->i_truncate_seq = truncate_seq;
/* the MDS should have revoked these caps */
- WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR |
+ WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
CEPH_CAP_FILE_LAZYIO));
/*
* If we hold relevant caps, or in the case where we're
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 73091fbe3ea4..dee10d22ada9 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -217,9 +217,12 @@ again:
strm->buf.out_size = min_t(u32, outlen,
PAGE_SIZE - pageofs);
outlen -= strm->buf.out_size;
- if (!rq->out[no] && rq->fillgaps) /* deduped */
+ if (!rq->out[no] && rq->fillgaps) { /* deduped */
rq->out[no] = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
+ set_page_private(rq->out[no],
+ Z_EROFS_SHORTLIVED_PAGE);
+ }
if (rq->out[no])
strm->buf.out = kmap(rq->out[no]) + pageofs;
pageofs = 0;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 44a24d573f1f..3700af9ee173 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -235,7 +235,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
return PTR_ERR(ptr);
dis = ptr + erofs_blkoff(sb, *pos);
- if (!dif->path) {
+ if (!sbi->devs->flatdev && !dif->path) {
if (!dis->tag[0]) {
erofs_err(sb, "empty device tag @ pos %llu", *pos);
return -EINVAL;
diff --git a/fs/fs_context.c b/fs/fs_context.c
index a0ad7a0c4680..98589aae5208 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -192,17 +192,19 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
EXPORT_SYMBOL(vfs_parse_fs_string);
/**
- * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
* @fc: The superblock configuration to fill in.
* @data: The data to parse
+ * @sep: callback for separating next option
*
- * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
- * called from the ->monolithic_mount_data() fs_context operation.
+ * Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
+ * option separator callback.
*
* Returns 0 on success or the error returned by the ->parse_option() fs_context
* operation on failure.
*/
-int generic_parse_monolithic(struct fs_context *fc, void *data)
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **))
{
char *options = data, *key;
int ret = 0;
@@ -214,7 +216,7 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
if (ret)
return ret;
- while ((key = strsep(&options, ",")) != NULL) {
+ while ((key = sep(&options)) != NULL) {
if (*key) {
size_t v_len = 0;
char *value = strchr(key, '=');
@@ -233,6 +235,28 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
return ret;
}
+EXPORT_SYMBOL(vfs_parse_monolithic_sep);
+
+static char *vfs_parse_comma_sep(char **s)
+{
+ return strsep(s, ",");
+}
+
+/**
+ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * @fc: The superblock configuration to fill in.
+ * @data: The data to parse
+ *
+ * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
+ * called from the ->monolithic_mount_data() fs_context operation.
+ *
+ * Returns 0 on success or the error returned by the ->parse_option() fs_context
+ * operation on failure.
+ */
+int generic_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
+}
EXPORT_SYMBOL(generic_parse_monolithic);
/**
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3508d8238826..7016eaadf555 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -10622,7 +10622,9 @@ static void nfs4_disable_swap(struct inode *inode)
*/
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
- nfs4_schedule_state_manager(clp);
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ wake_up_var(&clp->cl_state);
}
static const struct inode_operations nfs4_dir_inode_operations = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e079987af4a3..9a5d911a7edc 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1209,16 +1209,26 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
{
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+ struct rpc_clnt *clnt = clp->cl_rpcclient;
+ bool swapon = false;
- if (clp->cl_rpcclient->cl_shutdown)
+ if (clnt->cl_shutdown)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
- if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
- wake_up_var(&clp->cl_state);
- return;
+
+ if (atomic_read(&clnt->cl_swapper)) {
+ swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
+ &clp->cl_state);
+ if (!swapon) {
+ wake_up_var(&clp->cl_state);
+ return;
+ }
}
- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+
+ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
+ return;
+
__module_get(THIS_MODULE);
refcount_inc(&clp->cl_count);
@@ -1235,8 +1245,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
__func__, PTR_ERR(task));
if (!nfs_client_init_is_complete(clp))
nfs_mark_client_ready(clp, PTR_ERR(task));
+ if (swapon)
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs4_clear_state_manager_bit(clp);
- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs_put_client(clp);
module_put(THIS_MODULE);
}
@@ -2703,6 +2714,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
+ if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
+ &clp->cl_state)) {
+ memflags = memalloc_nofs_save();
+ continue;
+ }
+
if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@@ -2741,22 +2759,25 @@ static int nfs4_run_state_manager(void *ptr)
allow_signal(SIGKILL);
again:
- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
nfs4_state_manager(clp);
- if (atomic_read(&cl->cl_swapper)) {
+
+ if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
+ !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
wait_var_event_interruptible(&clp->cl_state,
test_bit(NFS4CLNT_RUN_MANAGER,
&clp->cl_state));
- if (atomic_read(&cl->cl_swapper) &&
- test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
+ if (!atomic_read(&cl->cl_swapper))
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
/* Either no longer a swapper, or were signalled */
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
}
- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
- !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
nfs_put_client(clp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 8c1ee1a1a28f..7720b5e43014 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -802,8 +802,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- nfs_release_request(req);
atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ nfs_release_request(req);
}
}
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index c8c8588bd98c..26b782c53910 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -188,7 +188,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
/* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable)
- return 0;
+ return 1;
/* Upper file handle for pure upper */
if (!ovl_dentry_lower(dentry))
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 693971d20280..8be4dc050d1e 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -341,7 +341,6 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- real.flags = 0;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
@@ -413,7 +412,6 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- real.flags = 0;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index e9539f98e86a..d82d2a043da2 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -8,6 +8,7 @@
struct ovl_config {
char *upperdir;
char *workdir;
+ char **lowerdirs;
bool default_permissions;
int redirect_mode;
int verity_mode;
@@ -39,17 +40,8 @@ struct ovl_layer {
int idx;
/* One fsid per unique underlying sb (upper fsid == 0) */
int fsid;
- char *name;
};
-/*
- * ovl_free_fs() relies on @mnt being the first member when unmounting
- * the private mounts created for each layer. Let's check both the
- * offset and type.
- */
-static_assert(offsetof(struct ovl_layer, mnt) == 0);
-static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
-
struct ovl_path {
const struct ovl_layer *layer;
struct dentry *dentry;
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index b9355bb6d75a..f6ff23fd101c 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -157,6 +157,34 @@ const struct fs_parameter_spec ovl_parameter_spec[] = {
{}
};
+static char *ovl_next_opt(char **s)
+{
+ char *sbegin = *s;
+ char *p;
+
+ if (sbegin == NULL)
+ return NULL;
+
+ for (p = sbegin; *p; p++) {
+ if (*p == '\\') {
+ p++;
+ if (!*p)
+ break;
+ } else if (*p == ',') {
+ *p = '\0';
+ *s = p + 1;
+ return sbegin;
+ }
+ }
+ *s = NULL;
+ return sbegin;
+}
+
+static int ovl_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, ovl_next_opt);
+}
+
static ssize_t ovl_parse_param_split_lowerdirs(char *str)
{
ssize_t nr_layers = 1, nr_colons = 0;
@@ -164,7 +192,8 @@ static ssize_t ovl_parse_param_split_lowerdirs(char *str)
for (s = d = str;; s++, d++) {
if (*s == '\\') {
- s++;
+ /* keep esc chars in split lowerdir */
+ *d++ = *s++;
} else if (*s == ':') {
bool next_colon = (*(s + 1) == ':');
@@ -239,7 +268,7 @@ static void ovl_unescape(char *s)
}
}
-static int ovl_mount_dir(const char *name, struct path *path)
+static int ovl_mount_dir(const char *name, struct path *path, bool upper)
{
int err = -ENOMEM;
char *tmp = kstrdup(name, GFP_KERNEL);
@@ -248,7 +277,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
ovl_unescape(tmp);
err = ovl_mount_dir_noesc(tmp, path);
- if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
+ if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
pr_err("filesystem on '%s' not supported as upperdir\n",
tmp);
path_put_init(path);
@@ -269,7 +298,7 @@ static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
struct path path;
char *dup;
- err = ovl_mount_dir(name, &path);
+ err = ovl_mount_dir(name, &path, true);
if (err)
return err;
@@ -321,12 +350,6 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
* Set "/lower1", "/lower2", and "/lower3" as lower layers and
* "/data1" and "/data2" as data lower layers. Any existing lower
* layers are replaced.
- * (2) lowerdir=:/lower4
- * Append "/lower4" to current stack of lower layers. This requires
- * that there already is at least one lower layer configured.
- * (3) lowerdir=::/lower5
- * Append data "/lower5" as data lower layer. This requires that
- * there's at least one regular lower layer present.
*/
static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
{
@@ -348,49 +371,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
return 0;
}
- if (strncmp(name, "::", 2) == 0) {
- /*
- * This is a data layer.
- * There must be at least one regular lower layer
- * specified.
- */
- if (ctx->nr == 0) {
- pr_err("data lower layers without regular lower layers not allowed");
- return -EINVAL;
- }
-
- /* Skip the leading "::". */
- name += 2;
- data_layer = true;
- /*
- * A data layer is automatically an append as there
- * must've been at least one regular lower layer.
- */
- append = true;
- } else if (*name == ':') {
- /*
- * This is a regular lower layer.
- * If users want to append a layer enforce that they
- * have already specified a first layer before. It's
- * better to be strict.
- */
- if (ctx->nr == 0) {
- pr_err("cannot append layer if no previous layer has been specified");
- return -EINVAL;
- }
-
- /*
- * Once a sequence of data layers has started regular
- * lower layers are forbidden.
- */
- if (ctx->nr_data > 0) {
- pr_err("regular lower layers cannot follow data lower layers");
- return -EINVAL;
- }
-
- /* Skip the leading ":". */
- name++;
- append = true;
+ if (*name == ':') {
+ pr_err("cannot append lower layer");
+ return -EINVAL;
}
dup = kstrdup(name, GFP_KERNEL);
@@ -472,7 +455,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
l = &ctx->lower[nr];
memset(l, 0, sizeof(*l));
- err = ovl_mount_dir_noesc(dup_iter, &l->path);
+ err = ovl_mount_dir(dup_iter, &l->path, false);
if (err)
goto out_put;
@@ -682,6 +665,7 @@ static int ovl_reconfigure(struct fs_context *fc)
}
static const struct fs_context_operations ovl_context_ops = {
+ .parse_monolithic = ovl_parse_monolithic,
.parse_param = ovl_parse_param,
.get_tree = ovl_get_tree,
.reconfigure = ovl_reconfigure,
@@ -752,12 +736,12 @@ void ovl_free_fs(struct ovl_fs *ofs)
if (ofs->upperdir_locked)
ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
- /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
- mounts = (struct vfsmount **) ofs->layers;
+ /* Reuse ofs->config.lowerdirs as a vfsmount array before freeing it */
+ mounts = (struct vfsmount **) ofs->config.lowerdirs;
for (i = 0; i < ofs->numlayer; i++) {
iput(ofs->layers[i].trap);
+ kfree(ofs->config.lowerdirs[i]);
mounts[i] = ofs->layers[i].mnt;
- kfree(ofs->layers[i].name);
}
kern_unmount_array(mounts, ofs->numlayer);
kfree(ofs->layers);
@@ -765,6 +749,7 @@ void ovl_free_fs(struct ovl_fs *ofs)
free_anon_bdev(ofs->fs[i].pseudo_dev);
kfree(ofs->fs);
+ kfree(ofs->config.lowerdirs);
kfree(ofs->config.upperdir);
kfree(ofs->config.workdir);
if (ofs->creator_cred)
@@ -949,16 +934,23 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
struct super_block *sb = dentry->d_sb;
struct ovl_fs *ofs = OVL_FS(sb);
size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
- const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower];
-
- /* ofs->layers[0] is the upper layer */
- seq_printf(m, ",lowerdir=%s", ofs->layers[1].name);
- /* dump regular lower layers */
- for (nr = 2; nr < nr_merged_lower; nr++)
- seq_printf(m, ":%s", ofs->layers[nr].name);
- /* dump data lower layers */
- for (nr = 0; nr < ofs->numdatalayer; nr++)
- seq_printf(m, "::%s", data_layers[nr].name);
+
+ /*
+ * lowerdirs[] starts from offset 1, then
+ * >= 0 regular lower layers prefixed with : and
+ * >= 0 data-only lower layers prefixed with ::
+ *
+ * we need to escase comma and space like seq_show_option() does and
+ * we also need to escape the colon separator from lowerdir paths.
+ */
+ seq_puts(m, ",lowerdir=");
+ for (nr = 1; nr < ofs->numlayer; nr++) {
+ if (nr > 1)
+ seq_putc(m, ':');
+ if (nr >= nr_merged_lower)
+ seq_putc(m, ':');
+ seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
+ }
if (ofs->config.upperdir) {
seq_show_option(m, "upperdir", ofs->config.upperdir);
seq_show_option(m, "workdir", ofs->config.workdir);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index def266b5e2a3..3fa2416264a4 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -104,8 +104,8 @@ static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak)
static int ovl_dentry_revalidate_common(struct dentry *dentry,
unsigned int flags, bool weak)
{
- struct ovl_entry *oe = OVL_E(dentry);
- struct ovl_path *lowerstack = ovl_lowerstack(oe);
+ struct ovl_entry *oe;
+ struct ovl_path *lowerstack;
struct inode *inode = d_inode_rcu(dentry);
struct dentry *upper;
unsigned int i;
@@ -115,6 +115,8 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
if (!inode)
return -ECHILD;
+ oe = OVL_I_E(inode);
+ lowerstack = ovl_lowerstack(oe);
upper = ovl_i_dentry_upper(inode);
if (upper)
ret = ovl_revalidate_real(upper, flags, weak);
@@ -167,6 +169,7 @@ static void ovl_free_inode(struct inode *inode)
struct ovl_inode *oi = OVL_I(inode);
kfree(oi->redirect);
+ kfree(oi->oe);
mutex_destroy(&oi->lock);
kmem_cache_free(ovl_inode_cachep, oi);
}
@@ -176,7 +179,7 @@ static void ovl_destroy_inode(struct inode *inode)
struct ovl_inode *oi = OVL_I(inode);
dput(oi->__upperdentry);
- ovl_free_entry(oi->oe);
+ ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe));
if (S_ISDIR(inode->i_mode))
ovl_dir_cache_free(inode);
else
@@ -569,11 +572,6 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
upper_layer->idx = 0;
upper_layer->fsid = 0;
- err = -ENOMEM;
- upper_layer->name = kstrdup(ofs->config.upperdir, GFP_KERNEL);
- if (!upper_layer->name)
- goto out;
-
/*
* Inherit SB_NOSEC flag from upperdir.
*
@@ -1122,7 +1120,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
layers[ofs->numlayer].idx = ofs->numlayer;
layers[ofs->numlayer].fsid = fsid;
layers[ofs->numlayer].fs = &ofs->fs[fsid];
- layers[ofs->numlayer].name = l->name;
+ /* Store for printing lowerdir=... in ovl_show_options() */
+ ofs->config.lowerdirs[ofs->numlayer] = l->name;
l->name = NULL;
ofs->numlayer++;
ofs->fs[fsid].is_lower = true;
@@ -1367,8 +1366,16 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
if (!layers)
goto out_err;
+ ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL);
+ if (!ofs->config.lowerdirs) {
+ kfree(layers);
+ goto out_err;
+ }
ofs->layers = layers;
- /* Layer 0 is reserved for upper even if there's no upper */
+ /*
+ * Layer 0 is reserved for upper even if there's no upper.
+ * For consistency, config.lowerdirs[0] is NULL.
+ */
ofs->numlayer = 1;
sb->s_stack_depth = 0;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9e72bfe8bbad..31e897ad5e6a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -233,19 +233,18 @@ static void put_quota_format(struct quota_format_type *fmt)
* All dquots are placed to the end of inuse_list when first created, and this
* list is used for invalidate operation, which must look at every dquot.
*
- * When the last reference of a dquot will be dropped, the dquot will be
- * added to releasing_dquots. We'd then queue work item which would call
+ * When the last reference of a dquot is dropped, the dquot is added to
+ * releasing_dquots. We'll then queue work item which will call
* synchronize_srcu() and after that perform the final cleanup of all the
- * dquots on the list. Both releasing_dquots and free_dquots use the
- * dq_free list_head in the dquot struct. When a dquot is removed from
- * releasing_dquots, a reference count is always subtracted, and if
- * dq_count == 0 at that point, the dquot will be added to the free_dquots.
+ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
+ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
+ * struct.
*
- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
- * and this list is searched whenever we need an available dquot. Dquots are
- * removed from the list as soon as they are used again, and
- * dqstats.free_dquots gives the number of dquots on the list. When
- * dquot is invalidated it's completely released from memory.
+ * Unused and cleaned up dquots are in the free_dquots list and this list is
+ * searched whenever we need an available dquot. Dquots are removed from the
+ * list as soon as they are used again and dqstats.free_dquots gives the number
+ * of dquots on the list. When dquot is invalidated it's completely released
+ * from memory.
*
* Dirty dquots are added to the dqi_dirty_list of quota_info when mark
* dirtied, and this list is searched when writing dirty dquots back to
@@ -321,6 +320,7 @@ static inline void put_dquot_last(struct dquot *dquot)
static inline void put_releasing_dquots(struct dquot *dquot)
{
list_add_tail(&dquot->dq_free, &releasing_dquots);
+ set_bit(DQ_RELEASING_B, &dquot->dq_flags);
}
static inline void remove_free_dquot(struct dquot *dquot)
@@ -328,8 +328,10 @@ static inline void remove_free_dquot(struct dquot *dquot)
if (list_empty(&dquot->dq_free))
return;
list_del_init(&dquot->dq_free);
- if (!atomic_read(&dquot->dq_count))
+ if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
dqstats_dec(DQST_FREE_DQUOTS);
+ else
+ clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
}
static inline void put_inuse(struct dquot *dquot)
@@ -581,12 +583,6 @@ restart:
continue;
/* Wait for dquot users */
if (atomic_read(&dquot->dq_count)) {
- /* dquot in releasing_dquots, flush and retry */
- if (!list_empty(&dquot->dq_free)) {
- spin_unlock(&dq_list_lock);
- goto restart;
- }
-
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
/*
@@ -606,6 +602,15 @@ restart:
goto restart;
}
/*
+ * The last user already dropped its reference but dquot didn't
+ * get fully cleaned up yet. Restart the scan which flushes the
+ * work cleaning up released dquots.
+ */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ goto restart;
+ }
+ /*
* Quota now has no users and it has been written on last
* dqput()
*/
@@ -696,6 +701,13 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
dq_dirty);
WARN_ON(!dquot_active(dquot));
+ /* If the dquot is releasing we should not touch it */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ flush_delayed_work(&quota_release_work);
+ spin_lock(&dq_list_lock);
+ continue;
+ }
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
@@ -809,18 +821,18 @@ static void quota_release_workfn(struct work_struct *work)
/* Exchange the list head to avoid livelock. */
list_replace_init(&releasing_dquots, &rls_head);
spin_unlock(&dq_list_lock);
+ synchronize_srcu(&dquot_srcu);
restart:
- synchronize_srcu(&dquot_srcu);
spin_lock(&dq_list_lock);
while (!list_empty(&rls_head)) {
dquot = list_first_entry(&rls_head, struct dquot, dq_free);
- /* Dquot got used again? */
- if (atomic_read(&dquot->dq_count) > 1) {
- remove_free_dquot(dquot);
- atomic_dec(&dquot->dq_count);
- continue;
- }
+ WARN_ON_ONCE(atomic_read(&dquot->dq_count));
+ /*
+ * Note that DQ_RELEASING_B protects us from racing with
+ * invalidate_dquots() calls so we are safe to work with the
+ * dquot even after we drop dq_list_lock.
+ */
if (dquot_dirty(dquot)) {
spin_unlock(&dq_list_lock);
/* Commit dquot before releasing */
@@ -834,7 +846,6 @@ restart:
}
/* Dquot is inactive and clean, now move it to free list */
remove_free_dquot(dquot);
- atomic_dec(&dquot->dq_count);
put_dquot_last(dquot);
}
spin_unlock(&dq_list_lock);
@@ -875,6 +886,7 @@ void dqput(struct dquot *dquot)
BUG_ON(!list_empty(&dquot->dq_free));
#endif
put_releasing_dquots(dquot);
+ atomic_dec(&dquot->dq_count);
spin_unlock(&dq_list_lock);
queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
}
@@ -963,7 +975,7 @@ we_slept:
dqstats_inc(DQST_LOOKUPS);
}
/* Wait for dq_lock - after this we know that either dquot_release() is
- * already finished or it will be canceled due to dq_count > 1 test */
+ * already finished or it will be canceled due to dq_count > 0 test */
wait_on_dquot(dquot);
/* Read the dquot / allocate space in quota file */
if (!dquot_active(dquot)) {
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index e2be8aedb26e..fe1bf5b6e0cb 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -15,6 +15,7 @@
static struct cached_fid *init_cached_dir(const char *path);
static void free_cached_dir(struct cached_fid *cfid);
static void smb2_close_cached_fid(struct kref *ref);
+static void cfids_laundromat_worker(struct work_struct *work);
static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
const char *path,
@@ -169,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
return -ENOENT;
}
/*
- * At this point we either have a lease already and we can just
- * return it. If not we are guaranteed to be the only thread accessing
- * this cfid.
+ * Return cached fid if it has a lease. Otherwise, it is either a new
+ * entry or laundromat worker removed it from @cfids->entries. Caller
+ * will put last reference if the latter.
*/
+ spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path);
return 0;
}
+ spin_unlock(&cfids->cfid_list_lock);
/*
* Skip any prefix paths in @path as lookup_positive_unlocked() ends up
@@ -294,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
goto oshr_free;
}
}
+ spin_lock(&cfids->cfid_list_lock);
cfid->dentry = dentry;
cfid->time = jiffies;
cfid->has_lease = true;
+ spin_unlock(&cfids->cfid_list_lock);
oshr_free:
kfree(utf16_path);
@@ -305,24 +311,28 @@ oshr_free:
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
spin_lock(&cfids->cfid_list_lock);
- if (rc && !cfid->has_lease) {
- if (cfid->on_list) {
- list_del(&cfid->entry);
- cfid->on_list = false;
- cfids->num_entries--;
+ if (!cfid->has_lease) {
+ if (rc) {
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfids->num_entries--;
+ }
+ rc = -ENOENT;
+ } else {
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+ * lease. Release the Lease-ref so that the directory
+ * will be closed when the caller closes the cached
+ * handle.
+ */
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ goto out;
}
- rc = -ENOENT;
}
spin_unlock(&cfids->cfid_list_lock);
- if (!rc && !cfid->has_lease) {
- /*
- * We are guaranteed to have two references at this point.
- * One for the caller and one for a potential lease.
- * Release the Lease-ref so that the directory will be closed
- * when the caller closes the cached handle.
- */
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
if (rc) {
if (cfid->is_open)
SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
@@ -330,7 +340,7 @@ oshr_free:
free_cached_dir(cfid);
cfid = NULL;
}
-
+out:
if (rc == 0) {
*ret_cfid = cfid;
atomic_inc(&tcon->num_remote_opens);
@@ -572,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(cfid);
}
-static int
-cifs_cfids_laundromat_thread(void *p)
+static void cfids_laundromat_worker(struct work_struct *work)
{
- struct cached_fids *cfids = p;
+ struct cached_fids *cfids;
struct cached_fid *cfid, *q;
- struct list_head entry;
+ LIST_HEAD(entry);
- while (!kthread_should_stop()) {
- ssleep(1);
- INIT_LIST_HEAD(&entry);
- if (kthread_should_stop())
- return 0;
- spin_lock(&cfids->cfid_list_lock);
- list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
- list_del(&cfid->entry);
- list_add(&cfid->entry, &entry);
- cfids->num_entries--;
- }
- }
- spin_unlock(&cfids->cfid_list_lock);
+ cfids = container_of(work, struct cached_fids, laundromat_work.work);
- list_for_each_entry_safe(cfid, q, &entry, entry) {
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ if (cfid->time &&
+ time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
cfid->on_list = false;
- list_del(&cfid->entry);
+ list_move(&cfid->entry, &entry);
+ cfids->num_entries--;
+ /* To prevent race with smb2_cached_lease_break() */
+ kref_get(&cfid->refcount);
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ /*
+ * Cancel and wait for the work to finish in case we are racing
+ * with it.
+ */
+ cancel_work_sync(&cfid->lease_break);
+ if (cfid->has_lease) {
/*
- * Cancel, and wait for the work to finish in
- * case we are racing with it.
+ * Our lease has not yet been cancelled from the server
+ * so we need to drop the reference.
*/
- cancel_work_sync(&cfid->lease_break);
- if (cfid->has_lease) {
- /*
- * We lease has not yet been cancelled from
- * the server so we need to drop the reference.
- */
- spin_lock(&cfids->cfid_list_lock);
- cfid->has_lease = false;
- spin_unlock(&cfids->cfid_list_lock);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
+ spin_lock(&cfids->cfid_list_lock);
+ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
}
+ /* Drop the extra reference opened above */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
}
-
- return 0;
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
}
-
struct cached_fids *init_cached_dirs(void)
{
struct cached_fids *cfids;
@@ -629,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
spin_lock_init(&cfids->cfid_list_lock);
INIT_LIST_HEAD(&cfids->entries);
- /*
- * since we're in a cifs function already, we know that
- * this will succeed. No need for try_module_get().
- */
- __module_get(THIS_MODULE);
- cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
- cfids, "cifsd-cfid-laundromat");
- if (IS_ERR(cfids->laundromat)) {
- cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
- kfree(cfids);
- module_put(THIS_MODULE);
- return NULL;
- }
+ INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+
return cfids;
}
@@ -657,11 +656,7 @@ void free_cached_dirs(struct cached_fids *cfids)
if (cfids == NULL)
return;
- if (cfids->laundromat) {
- kthread_stop(cfids->laundromat);
- cfids->laundromat = NULL;
- module_put(THIS_MODULE);
- }
+ cancel_delayed_work_sync(&cfids->laundromat_work);
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index a82ff2cea789..81ba0fd5cc16 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -57,7 +57,7 @@ struct cached_fids {
spinlock_t cfid_list_lock;
int num_entries;
struct list_head entries;
- struct task_struct *laundromat;
+ struct delayed_work laundromat_work;
};
extern struct cached_fids *init_cached_dirs(void);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 3902e90dca6b..7b923e36501b 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -2474,8 +2474,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{
- int rc, xid;
struct cifs_tcon *tcon;
+ bool nohandlecache;
+ int rc, xid;
tcon = cifs_find_tcon(ses, ctx);
if (tcon) {
@@ -2493,14 +2494,17 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
goto out_fail;
}
- if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
- tcon = tcon_info_alloc(true);
+ if (ses->server->dialect >= SMB20_PROT_ID &&
+ (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
+ nohandlecache = ctx->nohandlecache;
else
- tcon = tcon_info_alloc(false);
+ nohandlecache = true;
+ tcon = tcon_info_alloc(!nohandlecache);
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
}
+ tcon->nohandlecache = nohandlecache;
if (ctx->snapshot_time) {
if (ses->server->vals->protocol_id == 0) {
@@ -2662,10 +2666,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
tcon->nocase = ctx->nocase;
tcon->broken_sparse_sup = ctx->no_sparse;
tcon->max_cached_dirs = ctx->max_cached_dirs;
- if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
- tcon->nohandlecache = ctx->nohandlecache;
- else
- tcon->nohandlecache = true;
tcon->nodelete = ctx->nodelete;
tcon->local_lease = ctx->local_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2895,9 +2895,9 @@ bind_socket(struct TCP_Server_Info *server)
if (server->srcaddr.ss_family != AF_UNSPEC) {
/* Bind to the specified local IP address */
struct socket *socket = server->ssocket;
- rc = socket->ops->bind(socket,
- (struct sockaddr *) &server->srcaddr,
- sizeof(server->srcaddr));
+ rc = kernel_bind(socket,
+ (struct sockaddr *) &server->srcaddr,
+ sizeof(server->srcaddr));
if (rc < 0) {
struct sockaddr_in *saddr4;
struct sockaddr_in6 *saddr6;
@@ -3046,8 +3046,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
- rc = socket->ops->connect(socket, saddr, slen,
- server->noblockcnt ? O_NONBLOCK : 0);
+ rc = kernel_connect(socket, saddr, slen,
+ server->noblockcnt ? O_NONBLOCK : 0);
/*
* When mounting SMB root file systems, we do not want to block in
* connect. Otherwise bail out and then let cifs_reconnect() perform
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index db7fa704a3f6..4b38c3a285f6 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -84,6 +84,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
spin_lock_init(&conn->llist_lock);
INIT_LIST_HEAD(&conn->lock_list);
+ init_rwsem(&conn->session_lock);
+
down_write(&conn_list_lock);
list_add(&conn->conns_list, &conn_list);
up_write(&conn_list_lock);
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index ab2583f030ce..3c005246a32e 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -50,6 +50,7 @@ struct ksmbd_conn {
struct nls_table *local_nls;
struct unicode_map *um;
struct list_head conns_list;
+ struct rw_semaphore session_lock;
/* smb session 1 per user */
struct xarray sessions;
unsigned long last_active;
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index 408cddf2f094..d2c81a8a11dd 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -73,7 +73,10 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
tree_conn->user = sess->user;
tree_conn->share_conf = sc;
+ tree_conn->t_state = TREE_NEW;
status.tree_conn = tree_conn;
+ atomic_set(&tree_conn->refcount, 1);
+ init_waitqueue_head(&tree_conn->refcount_q);
ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
GFP_KERNEL));
@@ -93,14 +96,33 @@ out_error:
return status;
}
+void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
+{
+ /*
+ * Checking waitqueue to releasing tree connect on
+ * tree disconnect. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&tcon->refcount) &&
+ waitqueue_active(&tcon->refcount_q))
+ wake_up(&tcon->refcount_q);
+}
+
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *tree_conn)
{
int ret;
+ write_lock(&sess->tree_conns_lock);
+ xa_erase(&sess->tree_conns, tree_conn->id);
+ write_unlock(&sess->tree_conns_lock);
+
+ if (!atomic_dec_and_test(&tree_conn->refcount))
+ wait_event(tree_conn->refcount_q,
+ atomic_read(&tree_conn->refcount) == 0);
+
ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
ksmbd_release_tree_conn_id(sess, tree_conn->id);
- xa_erase(&sess->tree_conns, tree_conn->id);
ksmbd_share_config_put(tree_conn->share_conf);
kfree(tree_conn);
return ret;
@@ -111,11 +133,15 @@ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
{
struct ksmbd_tree_connect *tcon;
+ read_lock(&sess->tree_conns_lock);
tcon = xa_load(&sess->tree_conns, id);
if (tcon) {
- if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
+ if (tcon->t_state != TREE_CONNECTED)
+ tcon = NULL;
+ else if (!atomic_inc_not_zero(&tcon->refcount))
tcon = NULL;
}
+ read_unlock(&sess->tree_conns_lock);
return tcon;
}
@@ -129,8 +155,18 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
if (!sess)
return -EINVAL;
- xa_for_each(&sess->tree_conns, id, tc)
+ xa_for_each(&sess->tree_conns, id, tc) {
+ write_lock(&sess->tree_conns_lock);
+ if (tc->t_state == TREE_DISCONNECTED) {
+ write_unlock(&sess->tree_conns_lock);
+ ret = -ENOENT;
+ continue;
+ }
+ tc->t_state = TREE_DISCONNECTED;
+ write_unlock(&sess->tree_conns_lock);
+
ret |= ksmbd_tree_conn_disconnect(sess, tc);
+ }
xa_destroy(&sess->tree_conns);
return ret;
}
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index 562d647ad9fa..6377a70b811c 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -14,7 +14,11 @@ struct ksmbd_share_config;
struct ksmbd_user;
struct ksmbd_conn;
-#define TREE_CONN_EXPIRE 1
+enum {
+ TREE_NEW = 0,
+ TREE_CONNECTED,
+ TREE_DISCONNECTED
+};
struct ksmbd_tree_connect {
int id;
@@ -27,7 +31,9 @@ struct ksmbd_tree_connect {
int maximal_access;
bool posix_extensions;
- unsigned long status;
+ atomic_t refcount;
+ wait_queue_head_t refcount_q;
+ unsigned int t_state;
};
struct ksmbd_tree_conn_status {
@@ -46,6 +52,7 @@ struct ksmbd_session;
struct ksmbd_tree_conn_status
ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
const char *share_name);
+void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *tree_conn);
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 8a5dcab05614..15f68ee05089 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -174,7 +174,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
unsigned long id;
struct ksmbd_session *sess;
- down_write(&sessions_table_lock);
+ down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
if (sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
@@ -185,7 +185,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
continue;
}
}
- up_write(&sessions_table_lock);
+ up_write(&conn->session_lock);
}
int ksmbd_session_register(struct ksmbd_conn *conn,
@@ -227,7 +227,9 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
}
}
}
+ up_write(&sessions_table_lock);
+ down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
unsigned long chann_id;
struct channel *chann;
@@ -244,7 +246,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
ksmbd_session_destroy(sess);
}
}
- up_write(&sessions_table_lock);
+ up_write(&conn->session_lock);
}
struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
@@ -252,9 +254,11 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
{
struct ksmbd_session *sess;
+ down_read(&conn->session_lock);
sess = xa_load(&conn->sessions, id);
if (sess)
sess->last_active = jiffies;
+ up_read(&conn->session_lock);
return sess;
}
@@ -351,6 +355,7 @@ static struct ksmbd_session *__session_create(int protocol)
xa_init(&sess->ksmbd_chann_list);
xa_init(&sess->rpc_handle_list);
sess->sequence_number = 1;
+ rwlock_init(&sess->tree_conns_lock);
ret = __init_smb2_session(sess);
if (ret)
diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
index f99d475b28db..63cb08fffde8 100644
--- a/fs/smb/server/mgmt/user_session.h
+++ b/fs/smb/server/mgmt/user_session.h
@@ -60,6 +60,7 @@ struct ksmbd_session {
struct ksmbd_file_table file_table;
unsigned long last_active;
+ rwlock_t tree_conns_lock;
};
static inline int test_session_flag(struct ksmbd_session *sess, int bit)
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 32347fec33c4..3079e607c5fe 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -241,6 +241,8 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
} while (is_chained == true);
send:
+ if (work->tcon)
+ ksmbd_tree_connect_put(work->tcon);
smb3_preauth_hash_rsp(work);
if (work->sess && work->sess->enc && work->encrypted &&
conn->ops->encrypt_resp) {
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 544022dd6d20..93262ca3f58a 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -231,11 +231,12 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
{
struct smb2_hdr *rsp_hdr;
- if (work->next_smb2_rcv_hdr_off)
- rsp_hdr = ksmbd_resp_buf_next(work);
- else
- rsp_hdr = smb2_get_msg(work->response_buf);
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Status = err;
+
+ work->iov_idx = 0;
+ work->iov_cnt = 0;
+ work->next_smb2_rcv_hdr_off = 0;
smb2_set_err_rsp(work);
}
@@ -1993,6 +1994,9 @@ int smb2_tree_connect(struct ksmbd_work *work)
if (conn->posix_ext_supported)
status.tree_conn->posix_extensions = true;
+ write_lock(&sess->tree_conns_lock);
+ status.tree_conn->t_state = TREE_CONNECTED;
+ write_unlock(&sess->tree_conns_lock);
rsp->StructureSize = cpu_to_le16(16);
out_err1:
rsp->Capabilities = 0;
@@ -2122,27 +2126,50 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
ksmbd_debug(SMB, "request\n");
+ if (!tcon) {
+ ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ ksmbd_close_tree_conn_fds(work);
+
+ write_lock(&sess->tree_conns_lock);
+ if (tcon->t_state == TREE_DISCONNECTED) {
+ write_unlock(&sess->tree_conns_lock);
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
+ tcon->t_state = TREE_DISCONNECTED;
+ write_unlock(&sess->tree_conns_lock);
+
+ err = ksmbd_tree_conn_disconnect(sess, tcon);
+ if (err) {
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ goto err_out;
+ }
+
+ work->tcon = NULL;
+
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp,
sizeof(struct smb2_tree_disconnect_rsp));
if (err) {
rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
- smb2_set_err_rsp(work);
- return err;
+ goto err_out;
}
- if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
- ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ return 0;
- rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
- smb2_set_err_rsp(work);
- return -ENOENT;
- }
+err_out:
+ smb2_set_err_rsp(work);
+ return err;
- ksmbd_close_tree_conn_fds(work);
- ksmbd_tree_conn_disconnect(sess, tcon);
- work->tcon = NULL;
- return 0;
}
/**
@@ -2164,17 +2191,17 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_debug(SMB, "request\n");
- sess_id = le64_to_cpu(req->hdr.SessionId);
-
- rsp->StructureSize = cpu_to_le16(4);
- err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
- if (err) {
- rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+ ksmbd_conn_lock(conn);
+ if (!ksmbd_conn_good(conn)) {
+ ksmbd_conn_unlock(conn);
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
smb2_set_err_rsp(work);
- return err;
+ return -ENOENT;
}
-
+ sess_id = le64_to_cpu(req->hdr.SessionId);
ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+ ksmbd_conn_unlock(conn);
+
ksmbd_close_session_fds(work);
ksmbd_conn_wait_idle(conn, sess_id);
@@ -2196,6 +2223,14 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_free_user(sess->user);
sess->user = NULL;
ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+
+ rsp->StructureSize = cpu_to_le16(4);
+ err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
+ if (err) {
+ rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+ smb2_set_err_rsp(work);
+ return err;
+ }
return 0;
}
@@ -3370,8 +3405,10 @@ err_out:
}
ksmbd_revert_fsids(work);
err_out1:
- if (!rc)
+ if (!rc) {
+ ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
+ }
if (rc) {
if (rc == -EINVAL)
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
@@ -6115,12 +6152,12 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz);
nbytes = rpc_resp->payload_sz;
- kvfree(rpc_resp);
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer),
aux_payload_buf, nbytes);
if (err)
goto out;
+ kvfree(rpc_resp);
} else {
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer));
@@ -7028,10 +7065,6 @@ skip:
ksmbd_debug(SMB,
"would have to wait for getting lock\n");
- spin_lock(&work->conn->llist_lock);
- list_add_tail(&smb_lock->clist,
- &work->conn->lock_list);
- spin_unlock(&work->conn->llist_lock);
list_add(&smb_lock->llist, &rollback_list);
argv = kmalloc(sizeof(void *), GFP_KERNEL);
@@ -7062,9 +7095,6 @@ skip:
if (work->state != KSMBD_WORK_ACTIVE) {
list_del(&smb_lock->llist);
- spin_lock(&work->conn->llist_lock);
- list_del(&smb_lock->clist);
- spin_unlock(&work->conn->llist_lock);
locks_free_lock(flock);
if (work->state == KSMBD_WORK_CANCELLED) {
@@ -7084,19 +7114,16 @@ skip:
}
list_del(&smb_lock->llist);
- spin_lock(&work->conn->llist_lock);
- list_del(&smb_lock->clist);
- spin_unlock(&work->conn->llist_lock);
release_async_work(work);
goto retry;
} else if (!rc) {
+ list_add(&smb_lock->llist, &rollback_list);
spin_lock(&work->conn->llist_lock);
list_add_tail(&smb_lock->clist,
&work->conn->lock_list);
list_add_tail(&smb_lock->flist,
&fp->lock_list);
spin_unlock(&work->conn->llist_lock);
- list_add(&smb_lock->llist, &rollback_list);
ksmbd_debug(SMB, "successful in taking lock\n");
} else {
goto out;
@@ -8036,10 +8063,10 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
goto err_out;
}
- opinfo_put(opinfo);
- ksmbd_fd_put(work, fp);
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
+ opinfo_put(opinfo);
+ ksmbd_fd_put(work, fp);
rsp->StructureSize = cpu_to_le16(24);
rsp->OplockLevel = rsp_oplevel;
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index f41f8d6108ce..c91eac6514dd 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -106,7 +106,7 @@ int ksmbd_query_inode_status(struct inode *inode)
ci = __ksmbd_inode_lookup(inode);
if (ci) {
ret = KSMBD_INODE_STATUS_OK;
- if (ci->m_flags & S_DEL_PENDING)
+ if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
ret = KSMBD_INODE_STATUS_PENDING_DELETE;
atomic_dec(&ci->m_count);
}
@@ -116,7 +116,7 @@ int ksmbd_query_inode_status(struct inode *inode)
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
{
- return (fp->f_ci->m_flags & S_DEL_PENDING);
+ return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
}
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
@@ -333,6 +333,9 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
{
+ if (fp->f_state != FP_INITED)
+ return NULL;
+
if (!atomic_inc_not_zero(&fp->refcount))
return NULL;
return fp;
@@ -382,15 +385,20 @@ int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
return 0;
ft = &work->sess->file_table;
- read_lock(&ft->lock);
+ write_lock(&ft->lock);
fp = idr_find(ft->idr, id);
if (fp) {
set_close_state_blocked_works(fp);
- if (!atomic_dec_and_test(&fp->refcount))
+ if (fp->f_state != FP_INITED)
fp = NULL;
+ else {
+ fp->f_state = FP_CLOSED;
+ if (!atomic_dec_and_test(&fp->refcount))
+ fp = NULL;
+ }
}
- read_unlock(&ft->lock);
+ write_unlock(&ft->lock);
if (!fp)
return -EINVAL;
@@ -570,6 +578,7 @@ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
fp->tcon = work->tcon;
fp->volatile_id = KSMBD_NO_FID;
fp->persistent_id = KSMBD_NO_FID;
+ fp->f_state = FP_NEW;
fp->f_ci = ksmbd_inode_get(fp);
if (!fp->f_ci) {
@@ -591,6 +600,17 @@ err_out:
return ERR_PTR(ret);
}
+void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+ unsigned int state)
+{
+ if (!fp)
+ return;
+
+ write_lock(&ft->lock);
+ fp->f_state = state;
+ write_unlock(&ft->lock);
+}
+
static int
__close_file_table_ids(struct ksmbd_file_table *ft,
struct ksmbd_tree_connect *tcon,
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index fcb13413fa8d..03d0bf941216 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -60,6 +60,12 @@ struct ksmbd_inode {
__le32 m_fattr;
};
+enum {
+ FP_NEW = 0,
+ FP_INITED,
+ FP_CLOSED
+};
+
struct ksmbd_file {
struct file *filp;
u64 persistent_id;
@@ -98,6 +104,7 @@ struct ksmbd_file {
/* if ls is happening on directory, below is valid*/
struct ksmbd_readdir_data readdir_data;
int dot_dotdot[2];
+ unsigned int f_state;
};
static inline void set_ctx_actor(struct dir_context *ctx,
@@ -142,6 +149,8 @@ int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
int ksmbd_init_global_file_table(void);
void ksmbd_free_global_file_table(void);
void ksmbd_set_fd_limit(unsigned long limit);
+void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+ unsigned int state);
/*
* INODE hash
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index e9cc481b4ddf..f9f4d694640d 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -1001,6 +1001,12 @@ xfs_ag_shrink_space(
error = -ENOSPC;
goto resv_init_out;
}
+
+ /* Update perag geometry */
+ pag->block_count -= delta;
+ __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
+ &pag->agino_max);
+
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
return 0;
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index d98e8e77c684..090c3ead43fd 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -10,7 +10,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_format.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
#include "scrub/scrub.h"
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index afc4c78b9eed..d5787991bb5b 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (C) 2010, 2023 Red Hat, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
@@ -19,21 +19,147 @@
#include "xfs_log.h"
#include "xfs_ag.h"
-STATIC int
-xfs_trim_extents(
+/*
+ * Notes on an efficient, low latency fstrim algorithm
+ *
+ * We need to walk the filesystem free space and issue discards on the free
+ * space that meet the search criteria (size and location). We cannot issue
+ * discards on extents that might be in use, or are so recently in use they are
+ * still marked as busy. To serialise against extent state changes whilst we are
+ * gathering extents to trim, we must hold the AGF lock to lock out other
+ * allocations and extent free operations that might change extent state.
+ *
+ * However, we cannot just hold the AGF for the entire AG free space walk whilst
+ * we issue discards on each free space that is found. Storage devices can have
+ * extremely slow discard implementations (e.g. ceph RBD) and so walking a
+ * couple of million free extents and issuing synchronous discards on each
+ * extent can take a *long* time. Whilst we are doing this walk, nothing else
+ * can access the AGF, and we can stall transactions and hence the log whilst
+ * modifications wait for the AGF lock to be released. This can lead hung tasks
+ * kicking the hung task timer and rebooting the system. This is bad.
+ *
+ * Hence we need to take a leaf from the bulkstat playbook. It takes the AGI
+ * lock, gathers a range of inode cluster buffers that are allocated, drops the
+ * AGI lock and then reads all the inode cluster buffers and processes them. It
+ * loops doing this, using a cursor to keep track of where it is up to in the AG
+ * for each iteration to restart the INOBT lookup from.
+ *
+ * We can't do this exactly with free space - once we drop the AGF lock, the
+ * state of the free extent is out of our control and we cannot run a discard
+ * safely on it in this situation. Unless, of course, we've marked the free
+ * extent as busy and undergoing a discard operation whilst we held the AGF
+ * locked.
+ *
+ * This is exactly how online discard works - free extents are marked busy when
+ * they are freed, and once the extent free has been committed to the journal,
+ * the busy extent record is marked as "undergoing discard" and the discard is
+ * then issued on the free extent. Once the discard completes, the busy extent
+ * record is removed and the extent is able to be allocated again.
+ *
+ * In the context of fstrim, if we find a free extent we need to discard, we
+ * don't have to discard it immediately. All we need to do it record that free
+ * extent as being busy and under discard, and all the allocation routines will
+ * now avoid trying to allocate it. Hence if we mark the extent as busy under
+ * the AGF lock, we can safely discard it without holding the AGF lock because
+ * nothing will attempt to allocate that free space until the discard completes.
+ *
+ * This also allows us to issue discards asynchronously like we do with online
+ * discard, and so for fast devices fstrim will run much faster as we can have
+ * multiple discard operations in flight at once, as well as pipeline the free
+ * extent search so that it overlaps in flight discard IO.
+ */
+
+struct workqueue_struct *xfs_discard_wq;
+
+static void
+xfs_discard_endio_work(
+ struct work_struct *work)
+{
+ struct xfs_busy_extents *extents =
+ container_of(work, struct xfs_busy_extents, endio_work);
+
+ xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
+ kmem_free(extents->owner);
+}
+
+/*
+ * Queue up the actual completion to a thread to avoid IRQ-safe locking for
+ * pagb_lock.
+ */
+static void
+xfs_discard_endio(
+ struct bio *bio)
+{
+ struct xfs_busy_extents *extents = bio->bi_private;
+
+ INIT_WORK(&extents->endio_work, xfs_discard_endio_work);
+ queue_work(xfs_discard_wq, &extents->endio_work);
+ bio_put(bio);
+}
+
+/*
+ * Walk the discard list and issue discards on all the busy extents in the
+ * list. We plug and chain the bios so that we only need a single completion
+ * call to clear all the busy extents once the discards are complete.
+ */
+int
+xfs_discard_extents(
+ struct xfs_mount *mp,
+ struct xfs_busy_extents *extents)
+{
+ struct xfs_extent_busy *busyp;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int error = 0;
+
+ blk_start_plug(&plug);
+ list_for_each_entry(busyp, &extents->extent_list, list) {
+ trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
+ busyp->length);
+
+ error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
+ XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
+ XFS_FSB_TO_BB(mp, busyp->length),
+ GFP_NOFS, &bio);
+ if (error && error != -EOPNOTSUPP) {
+ xfs_info(mp,
+ "discard failed for extent [0x%llx,%u], error %d",
+ (unsigned long long)busyp->bno,
+ busyp->length,
+ error);
+ break;
+ }
+ }
+
+ if (bio) {
+ bio->bi_private = extents;
+ bio->bi_end_io = xfs_discard_endio;
+ submit_bio(bio);
+ } else {
+ xfs_discard_endio_work(&extents->endio_work);
+ }
+ blk_finish_plug(&plug);
+
+ return error;
+}
+
+
+static int
+xfs_trim_gather_extents(
struct xfs_perag *pag,
xfs_daddr_t start,
xfs_daddr_t end,
xfs_daddr_t minlen,
+ struct xfs_alloc_rec_incore *tcur,
+ struct xfs_busy_extents *extents,
uint64_t *blocks_trimmed)
{
struct xfs_mount *mp = pag->pag_mount;
- struct block_device *bdev = mp->m_ddev_targp->bt_bdev;
struct xfs_btree_cur *cur;
struct xfs_buf *agbp;
- struct xfs_agf *agf;
int error;
int i;
+ int batch = 100;
/*
* Force out the log. This means any transactions that might have freed
@@ -45,20 +171,28 @@ xfs_trim_extents(
error = xfs_alloc_read_agf(pag, NULL, 0, &agbp);
if (error)
return error;
- agf = agbp->b_addr;
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT);
/*
- * Look up the longest btree in the AGF and start with it.
+ * Look up the extent length requested in the AGF and start with it.
*/
- error = xfs_alloc_lookup_ge(cur, 0, be32_to_cpu(agf->agf_longest), &i);
+ if (tcur->ar_startblock == NULLAGBLOCK)
+ error = xfs_alloc_lookup_ge(cur, 0, tcur->ar_blockcount, &i);
+ else
+ error = xfs_alloc_lookup_le(cur, tcur->ar_startblock,
+ tcur->ar_blockcount, &i);
if (error)
goto out_del_cursor;
+ if (i == 0) {
+ /* nothing of that length left in the AG, we are done */
+ tcur->ar_blockcount = 0;
+ goto out_del_cursor;
+ }
/*
* Loop until we are done with all extents that are large
- * enough to be worth discarding.
+ * enough to be worth discarding or we hit batch limits.
*/
while (i) {
xfs_agblock_t fbno;
@@ -73,7 +207,16 @@ xfs_trim_extents(
error = -EFSCORRUPTED;
break;
}
- ASSERT(flen <= be32_to_cpu(agf->agf_longest));
+
+ if (--batch <= 0) {
+ /*
+ * Update the cursor to point at this extent so we
+ * restart the next batch from this extent.
+ */
+ tcur->ar_startblock = fbno;
+ tcur->ar_blockcount = flen;
+ break;
+ }
/*
* use daddr format for all range/len calculations as that is
@@ -88,6 +231,7 @@ xfs_trim_extents(
*/
if (dlen < minlen) {
trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
+ tcur->ar_blockcount = 0;
break;
}
@@ -110,29 +254,103 @@ xfs_trim_extents(
goto next_extent;
}
- trace_xfs_discard_extent(mp, pag->pag_agno, fbno, flen);
- error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS);
- if (error)
- break;
+ xfs_extent_busy_insert_discard(pag, fbno, flen,
+ &extents->extent_list);
*blocks_trimmed += flen;
-
next_extent:
error = xfs_btree_decrement(cur, 0, &i);
if (error)
break;
- if (fatal_signal_pending(current)) {
- error = -ERESTARTSYS;
- break;
- }
+ /*
+ * If there's no more records in the tree, we are done. Set the
+ * cursor block count to 0 to indicate to the caller that there
+ * is no more extents to search.
+ */
+ if (i == 0)
+ tcur->ar_blockcount = 0;
}
+ /*
+ * If there was an error, release all the gathered busy extents because
+ * we aren't going to issue a discard on them any more.
+ */
+ if (error)
+ xfs_extent_busy_clear(mp, &extents->extent_list, false);
out_del_cursor:
xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp);
return error;
}
+static bool
+xfs_trim_should_stop(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
+/*
+ * Iterate the free list gathering extents and discarding them. We need a cursor
+ * for the repeated iteration of gather/discard loop, so use the longest extent
+ * we found in the last batch as the key to start the next.
+ */
+static int
+xfs_trim_extents(
+ struct xfs_perag *pag,
+ xfs_daddr_t start,
+ xfs_daddr_t end,
+ xfs_daddr_t minlen,
+ uint64_t *blocks_trimmed)
+{
+ struct xfs_alloc_rec_incore tcur = {
+ .ar_blockcount = pag->pagf_longest,
+ .ar_startblock = NULLAGBLOCK,
+ };
+ int error = 0;
+
+ do {
+ struct xfs_busy_extents *extents;
+
+ extents = kzalloc(sizeof(*extents), GFP_KERNEL);
+ if (!extents) {
+ error = -ENOMEM;
+ break;
+ }
+
+ extents->mount = pag->pag_mount;
+ extents->owner = extents;
+ INIT_LIST_HEAD(&extents->extent_list);
+
+ error = xfs_trim_gather_extents(pag, start, end, minlen,
+ &tcur, extents, blocks_trimmed);
+ if (error) {
+ kfree(extents);
+ break;
+ }
+
+ /*
+ * We hand the extent list to the discard function here so the
+ * discarded extents can be removed from the busy extent list.
+ * This allows the discards to run asynchronously with gathering
+ * the next round of extents to discard.
+ *
+ * However, we must ensure that we do not reference the extent
+ * list after this function call, as it may have been freed by
+ * the time control returns to us.
+ */
+ error = xfs_discard_extents(pag->pag_mount, extents);
+ if (error)
+ break;
+
+ if (xfs_trim_should_stop())
+ break;
+
+ } while (tcur.ar_blockcount != 0);
+
+ return error;
+
+}
+
/*
* trim a range of the filesystem.
*
@@ -195,12 +413,12 @@ xfs_ioc_trim(
for_each_perag_range(mp, agno, xfs_daddr_to_agno(mp, end), pag) {
error = xfs_trim_extents(pag, start, end, minlen,
&blocks_trimmed);
- if (error) {
+ if (error)
last_error = error;
- if (error == -ERESTARTSYS) {
- xfs_perag_rele(pag);
- break;
- }
+
+ if (xfs_trim_should_stop()) {
+ xfs_perag_rele(pag);
+ break;
}
}
diff --git a/fs/xfs/xfs_discard.h b/fs/xfs/xfs_discard.h
index de92d9cc958f..2b1a85223a56 100644
--- a/fs/xfs/xfs_discard.h
+++ b/fs/xfs/xfs_discard.h
@@ -3,8 +3,10 @@
#define XFS_DISCARD_H 1
struct fstrim_range;
-struct list_head;
+struct xfs_mount;
+struct xfs_busy_extents;
-extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
+int xfs_discard_extents(struct xfs_mount *mp, struct xfs_busy_extents *busy);
+int xfs_ioc_trim(struct xfs_mount *mp, struct fstrim_range __user *fstrim);
#endif /* XFS_DISCARD_H */
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 7c2fdc71e42d..9ecfdcdc752f 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -19,13 +19,13 @@
#include "xfs_log.h"
#include "xfs_ag.h"
-void
-xfs_extent_busy_insert(
- struct xfs_trans *tp,
+static void
+xfs_extent_busy_insert_list(
struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
- unsigned int flags)
+ unsigned int flags,
+ struct list_head *busy_list)
{
struct xfs_extent_busy *new;
struct xfs_extent_busy *busyp;
@@ -40,7 +40,7 @@ xfs_extent_busy_insert(
new->flags = flags;
/* trace before insert to be able to see failed inserts */
- trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
+ trace_xfs_extent_busy(pag->pag_mount, pag->pag_agno, bno, len);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
@@ -62,10 +62,33 @@ xfs_extent_busy_insert(
rb_link_node(&new->rb_node, parent, rbp);
rb_insert_color(&new->rb_node, &pag->pagb_tree);
- list_add(&new->list, &tp->t_busy);
+ /* always process discard lists in fifo order */
+ list_add_tail(&new->list, busy_list);
spin_unlock(&pag->pagb_lock);
}
+void
+xfs_extent_busy_insert(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ unsigned int flags)
+{
+ xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy);
+}
+
+void
+xfs_extent_busy_insert_discard(
+ struct xfs_perag *pag,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct list_head *busy_list)
+{
+ xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED,
+ busy_list);
+}
+
/*
* Search for a busy extent within the range of the extent we are about to
* allocate. You need to be holding the busy extent tree lock when calling
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index c37bf87e6781..0639aab336f3 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -16,9 +16,6 @@ struct xfs_alloc_arg;
/*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet.
- *
- * Note that we use the transaction ID to record the transaction, not the
- * transaction structure itself. See xfs_extent_busy_insert() for details.
*/
struct xfs_extent_busy {
struct rb_node rb_node; /* ag by-bno indexed search tree */
@@ -31,11 +28,32 @@ struct xfs_extent_busy {
#define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
};
+/*
+ * List used to track groups of related busy extents all the way through
+ * to discard completion.
+ */
+struct xfs_busy_extents {
+ struct xfs_mount *mount;
+ struct list_head extent_list;
+ struct work_struct endio_work;
+
+ /*
+ * Owner is the object containing the struct xfs_busy_extents to free
+ * once the busy extents have been processed. If only the
+ * xfs_busy_extents object needs freeing, then point this at itself.
+ */
+ void *owner;
+};
+
void
xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
+xfs_extent_busy_insert_discard(struct xfs_perag *pag, xfs_agblock_t bno,
+ xfs_extlen_t len, struct list_head *busy_list);
+
+void
xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 1c1e6171209d..2b3b05c28e9e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -584,6 +584,11 @@ xfs_vn_getattr(
}
}
+ if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
+ stat->change_cookie = inode_query_iversion(inode);
+ stat->result_mask |= STATX_CHANGE_COOKIE;
+ }
+
/*
* Note: If you add another clause to set an attribute flag, please
* update attributes_mask below.
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index ebc70aaa299c..67a99d94701e 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -16,8 +16,7 @@
#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_trace.h"
-
-struct workqueue_struct *xfs_discard_wq;
+#include "xfs_discard.h"
/*
* Allocate a new ticket. Failing to get a new ticket makes it really hard to
@@ -103,7 +102,7 @@ xlog_cil_ctx_alloc(void)
ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
INIT_LIST_HEAD(&ctx->committing);
- INIT_LIST_HEAD(&ctx->busy_extents);
+ INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
INIT_LIST_HEAD(&ctx->log_items);
INIT_LIST_HEAD(&ctx->lv_chain);
INIT_WORK(&ctx->push_work, xlog_cil_push_work);
@@ -132,7 +131,7 @@ xlog_cil_push_pcp_aggregate(
if (!list_empty(&cilpcp->busy_extents)) {
list_splice_init(&cilpcp->busy_extents,
- &ctx->busy_extents);
+ &ctx->busy_extents.extent_list);
}
if (!list_empty(&cilpcp->log_items))
list_splice_init(&cilpcp->log_items, &ctx->log_items);
@@ -708,76 +707,6 @@ xlog_cil_free_logvec(
}
}
-static void
-xlog_discard_endio_work(
- struct work_struct *work)
-{
- struct xfs_cil_ctx *ctx =
- container_of(work, struct xfs_cil_ctx, discard_endio_work);
- struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
-
- xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
- kmem_free(ctx);
-}
-
-/*
- * Queue up the actual completion to a thread to avoid IRQ-safe locking for
- * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
- * get the execution delayed up to 30 seconds for weird reasons.
- */
-static void
-xlog_discard_endio(
- struct bio *bio)
-{
- struct xfs_cil_ctx *ctx = bio->bi_private;
-
- INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
- queue_work(xfs_discard_wq, &ctx->discard_endio_work);
- bio_put(bio);
-}
-
-static void
-xlog_discard_busy_extents(
- struct xfs_mount *mp,
- struct xfs_cil_ctx *ctx)
-{
- struct list_head *list = &ctx->busy_extents;
- struct xfs_extent_busy *busyp;
- struct bio *bio = NULL;
- struct blk_plug plug;
- int error = 0;
-
- ASSERT(xfs_has_discard(mp));
-
- blk_start_plug(&plug);
- list_for_each_entry(busyp, list, list) {
- trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
- busyp->length);
-
- error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
- XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
- XFS_FSB_TO_BB(mp, busyp->length),
- GFP_NOFS, &bio);
- if (error && error != -EOPNOTSUPP) {
- xfs_info(mp,
- "discard failed for extent [0x%llx,%u], error %d",
- (unsigned long long)busyp->bno,
- busyp->length,
- error);
- break;
- }
- }
-
- if (bio) {
- bio->bi_private = ctx;
- bio->bi_end_io = xlog_discard_endio;
- submit_bio(bio);
- } else {
- xlog_discard_endio_work(&ctx->discard_endio_work);
- }
- blk_finish_plug(&plug);
-}
-
/*
* Mark all items committed and clear busy extents. We free the log vector
* chains in a separate pass so that we unpin the log items as quickly as
@@ -807,8 +736,8 @@ xlog_cil_committed(
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
ctx->start_lsn, abort);
- xfs_extent_busy_sort(&ctx->busy_extents);
- xfs_extent_busy_clear(mp, &ctx->busy_extents,
+ xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
+ xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
xfs_has_discard(mp) && !abort);
spin_lock(&ctx->cil->xc_push_lock);
@@ -817,10 +746,14 @@ xlog_cil_committed(
xlog_cil_free_logvec(&ctx->lv_chain);
- if (!list_empty(&ctx->busy_extents))
- xlog_discard_busy_extents(mp, ctx);
- else
- kmem_free(ctx);
+ if (!list_empty(&ctx->busy_extents.extent_list)) {
+ ctx->busy_extents.mount = mp;
+ ctx->busy_extents.owner = ctx;
+ xfs_discard_extents(mp, &ctx->busy_extents);
+ return;
+ }
+
+ kmem_free(ctx);
}
void
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index af87648331d5..fa3ad1d7b31c 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -6,6 +6,8 @@
#ifndef __XFS_LOG_PRIV_H__
#define __XFS_LOG_PRIV_H__
+#include "xfs_extent_busy.h" /* for struct xfs_busy_extents */
+
struct xfs_buf;
struct xlog;
struct xlog_ticket;
@@ -223,12 +225,11 @@ struct xfs_cil_ctx {
struct xlog_in_core *commit_iclog;
struct xlog_ticket *ticket; /* chkpt ticket */
atomic_t space_used; /* aggregate size of regions */
- struct list_head busy_extents; /* busy extents in chkpt */
+ struct xfs_busy_extents busy_extents;
struct list_head log_items; /* log items in chkpt */
struct list_head lv_chain; /* logvecs being pushed */
struct list_head iclog_entry;
struct list_head committing; /* ctx committing list */
- struct work_struct discard_endio_work;
struct work_struct push_work;
atomic_t order_id;
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 4a9bbd3fe120..a7daa522e00f 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -126,8 +126,8 @@ xfs_dax_notify_ddev_failure(
struct xfs_rmap_irec ri_low = { };
struct xfs_rmap_irec ri_high;
struct xfs_agf *agf;
- xfs_agblock_t agend;
struct xfs_perag *pag;
+ xfs_agblock_t range_agend;
pag = xfs_perag_get(mp, agno);
error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
@@ -148,10 +148,10 @@ xfs_dax_notify_ddev_failure(
ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
agf = agf_bp->b_addr;
- agend = min(be32_to_cpu(agf->agf_length),
+ range_agend = min(be32_to_cpu(agf->agf_length) - 1,
ri_high.rm_startblock);
notify.startblock = ri_low.rm_startblock;
- notify.blockcount = agend - ri_low.rm_startblock;
+ notify.blockcount = range_agend + 1 - ri_low.rm_startblock;
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
xfs_dax_failure_fn, &notify);