diff options
Diffstat (limited to 'fs/smb/server/vfs_cache.c')
-rw-r--r-- | fs/smb/server/vfs_cache.c | 217 |
1 files changed, 191 insertions, 26 deletions
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c index 030f70700036..8d1f30dcba7e 100644 --- a/fs/smb/server/vfs_cache.c +++ b/fs/smb/server/vfs_cache.c @@ -8,6 +8,8 @@ #include <linux/filelock.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/kthread.h> +#include <linux/freezer.h> #include "glob.h" #include "vfs_cache.h" @@ -17,6 +19,7 @@ #include "mgmt/tree_connect.h" #include "mgmt/user_session.h" #include "smb_common.h" +#include "server.h" #define S_DEL_PENDING 1 #define S_DEL_ON_CLS 2 @@ -31,6 +34,10 @@ static struct ksmbd_file_table global_ft; static atomic_long_t fd_limit; static struct kmem_cache *filp_cache; +static bool durable_scavenger_running; +static DEFINE_MUTEX(durable_scavenger_lock); +static wait_queue_head_t dh_wq; + void ksmbd_set_fd_limit(unsigned long limit) { limit = min(limit, get_max_files()); @@ -165,7 +172,7 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp) ci->m_fattr = 0; INIT_LIST_HEAD(&ci->m_fp_list); INIT_LIST_HEAD(&ci->m_op_list); - rwlock_init(&ci->m_lock); + init_rwsem(&ci->m_lock); ci->m_de = fp->filp->f_path.dentry; return 0; } @@ -181,7 +188,7 @@ static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp) if (ci) return ci; - ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL); + ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP); if (!ci) return NULL; @@ -254,21 +261,22 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp) ci->m_flags &= ~S_DEL_ON_CLS_STREAM; err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), &filp->f_path, - fp->stream.name); + fp->stream.name, + true); if (err) pr_err("remove xattr failed : %s\n", fp->stream.name); } if (atomic_dec_and_test(&ci->m_count)) { - write_lock(&ci->m_lock); + down_write(&ci->m_lock); if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) { ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING); - write_unlock(&ci->m_lock); + up_write(&ci->m_lock); ksmbd_vfs_unlink(filp); - write_lock(&ci->m_lock); + down_write(&ci->m_lock); } - write_unlock(&ci->m_lock); + up_write(&ci->m_lock); ksmbd_inode_free(ci); } @@ -279,9 +287,16 @@ static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp) if (!has_file_id(fp->persistent_id)) return; - write_lock(&global_ft.lock); idr_remove(global_ft.idr, fp->persistent_id); +} + +static void ksmbd_remove_durable_fd(struct ksmbd_file *fp) +{ + write_lock(&global_ft.lock); + __ksmbd_remove_durable_fd(fp); write_unlock(&global_ft.lock); + if (waitqueue_active(&dh_wq)) + wake_up(&dh_wq); } static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) @@ -289,9 +304,9 @@ static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp if (!has_file_id(fp->volatile_id)) return; - write_lock(&fp->f_ci->m_lock); + down_write(&fp->f_ci->m_lock); list_del_init(&fp->node); - write_unlock(&fp->f_ci->m_lock); + up_write(&fp->f_ci->m_lock); write_lock(&ft->lock); idr_remove(ft->idr, fp->volatile_id); @@ -304,7 +319,7 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) struct ksmbd_lock *smb_lock, *tmp_lock; fd_limit_close(); - __ksmbd_remove_durable_fd(fp); + ksmbd_remove_durable_fd(fp); if (ft) __ksmbd_remove_fd(ft, fp); @@ -476,7 +491,10 @@ struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id) struct ksmbd_file *fp; fp = __ksmbd_lookup_fd(&global_ft, id); - if (fp && fp->conn) { + if (fp && (fp->conn || + (fp->durable_scavenger_timeout && + (fp->durable_scavenger_timeout < + jiffies_to_msecs(jiffies))))) { ksmbd_put_durable_fd(fp); fp = NULL; } @@ -523,17 +541,17 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry) if (!ci) return NULL; - read_lock(&ci->m_lock); + down_read(&ci->m_lock); list_for_each_entry(lfp, &ci->m_fp_list, node) { if (inode == file_inode(lfp->filp)) { atomic_dec(&ci->m_count); lfp = ksmbd_fp_get(lfp); - read_unlock(&ci->m_lock); + up_read(&ci->m_lock); return lfp; } } atomic_dec(&ci->m_count); - read_unlock(&ci->m_lock); + up_read(&ci->m_lock); return NULL; } @@ -559,7 +577,7 @@ static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp, return -EMFILE; } - idr_preload(GFP_KERNEL); + idr_preload(KSMBD_DEFAULT_GFP); write_lock(&ft->lock); ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT); if (ret >= 0) { @@ -587,7 +605,7 @@ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp) struct ksmbd_file *fp; int ret; - fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL); + fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP); if (!fp) { pr_err("Failed to allocate memory\n"); return ERR_PTR(-ENOMEM); @@ -693,6 +711,142 @@ static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon, return fp->tcon != tcon; } +static bool ksmbd_durable_scavenger_alive(void) +{ + mutex_lock(&durable_scavenger_lock); + if (!durable_scavenger_running) { + mutex_unlock(&durable_scavenger_lock); + return false; + } + mutex_unlock(&durable_scavenger_lock); + + if (kthread_should_stop()) + return false; + + if (idr_is_empty(global_ft.idr)) + return false; + + return true; +} + +static void ksmbd_scavenger_dispose_dh(struct list_head *head) +{ + while (!list_empty(head)) { + struct ksmbd_file *fp; + + fp = list_first_entry(head, struct ksmbd_file, node); + list_del_init(&fp->node); + __ksmbd_close_fd(NULL, fp); + } +} + +static int ksmbd_durable_scavenger(void *dummy) +{ + struct ksmbd_file *fp = NULL; + unsigned int id; + unsigned int min_timeout = 1; + bool found_fp_timeout; + LIST_HEAD(scavenger_list); + unsigned long remaining_jiffies; + + __module_get(THIS_MODULE); + + set_freezable(); + while (ksmbd_durable_scavenger_alive()) { + if (try_to_freeze()) + continue; + + found_fp_timeout = false; + + remaining_jiffies = wait_event_timeout(dh_wq, + ksmbd_durable_scavenger_alive() == false, + __msecs_to_jiffies(min_timeout)); + if (remaining_jiffies) + min_timeout = jiffies_to_msecs(remaining_jiffies); + else + min_timeout = DURABLE_HANDLE_MAX_TIMEOUT; + + write_lock(&global_ft.lock); + idr_for_each_entry(global_ft.idr, fp, id) { + if (!fp->durable_timeout) + continue; + + if (atomic_read(&fp->refcount) > 1 || + fp->conn) + continue; + + found_fp_timeout = true; + if (fp->durable_scavenger_timeout <= + jiffies_to_msecs(jiffies)) { + __ksmbd_remove_durable_fd(fp); + list_add(&fp->node, &scavenger_list); + } else { + unsigned long durable_timeout; + + durable_timeout = + fp->durable_scavenger_timeout - + jiffies_to_msecs(jiffies); + + if (min_timeout > durable_timeout) + min_timeout = durable_timeout; + } + } + write_unlock(&global_ft.lock); + + ksmbd_scavenger_dispose_dh(&scavenger_list); + + if (found_fp_timeout == false) + break; + } + + mutex_lock(&durable_scavenger_lock); + durable_scavenger_running = false; + mutex_unlock(&durable_scavenger_lock); + + module_put(THIS_MODULE); + + return 0; +} + +void ksmbd_launch_ksmbd_durable_scavenger(void) +{ + if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)) + return; + + mutex_lock(&durable_scavenger_lock); + if (durable_scavenger_running == true) { + mutex_unlock(&durable_scavenger_lock); + return; + } + + durable_scavenger_running = true; + + server_conf.dh_task = kthread_run(ksmbd_durable_scavenger, + (void *)NULL, "ksmbd-durable-scavenger"); + if (IS_ERR(server_conf.dh_task)) + pr_err("cannot start conn thread, err : %ld\n", + PTR_ERR(server_conf.dh_task)); + mutex_unlock(&durable_scavenger_lock); +} + +void ksmbd_stop_durable_scavenger(void) +{ + if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)) + return; + + mutex_lock(&durable_scavenger_lock); + if (!durable_scavenger_running) { + mutex_unlock(&durable_scavenger_lock); + return; + } + + durable_scavenger_running = false; + if (waitqueue_active(&dh_wq)) + wake_up(&dh_wq); + mutex_unlock(&durable_scavenger_lock); + kthread_stop(server_conf.dh_task); +} + static bool session_fd_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp) { @@ -705,18 +859,24 @@ static bool session_fd_check(struct ksmbd_tree_connect *tcon, conn = fp->conn; ci = fp->f_ci; - write_lock(&ci->m_lock); + down_write(&ci->m_lock); list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) { if (op->conn != conn) continue; + if (op->conn && atomic_dec_and_test(&op->conn->refcnt)) + kfree(op->conn); op->conn = NULL; } - write_unlock(&ci->m_lock); + up_write(&ci->m_lock); fp->conn = NULL; fp->tcon = NULL; fp->volatile_id = KSMBD_NO_FID; + if (fp->durable_timeout) + fp->durable_scavenger_timeout = + jiffies_to_msecs(jiffies) + fp->durable_timeout; + return true; } @@ -749,11 +909,12 @@ void ksmbd_free_global_file_table(void) unsigned int id; idr_for_each_entry(global_ft.idr, fp, id) { - __ksmbd_remove_durable_fd(fp); - kmem_cache_free(filp_cache, fp); + ksmbd_remove_durable_fd(fp); + __ksmbd_close_fd(NULL, fp); } - ksmbd_destroy_file_table(&global_ft); + idr_destroy(global_ft.idr); + kfree(global_ft.idr); } int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share, @@ -762,7 +923,7 @@ int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share, char *pathname, *ab_pathname; int ret = 0; - pathname = kmalloc(PATH_MAX, GFP_KERNEL); + pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP); if (!pathname) return -EACCES; @@ -801,14 +962,16 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp) fp->tcon = work->tcon; ci = fp->f_ci; - write_lock(&ci->m_lock); + down_write(&ci->m_lock); list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) { if (op->conn) continue; op->conn = fp->conn; + atomic_inc(&op->conn->refcnt); } - write_unlock(&ci->m_lock); + up_write(&ci->m_lock); + fp->f_state = FP_NEW; __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID); if (!has_file_id(fp->volatile_id)) { fp->conn = NULL; @@ -820,7 +983,7 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp) int ksmbd_init_file_table(struct ksmbd_file_table *ft) { - ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL); + ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP); if (!ft->idr) return -ENOMEM; @@ -848,6 +1011,8 @@ int ksmbd_init_file_cache(void) if (!filp_cache) goto out; + init_waitqueue_head(&dh_wq); + return 0; out: |