summaryrefslogtreecommitdiff
path: root/fs/file_table.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/file_table.c')
-rw-r--r--fs/file_table.c653
1 files changed, 393 insertions, 260 deletions
diff --git a/fs/file_table.c b/fs/file_table.c
index b44e4c559786..cd4a3db4659a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/file_table.c
*
@@ -8,11 +9,12 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
-#include <linux/fdtable.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/security.h>
+#include <linux/cred.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
@@ -20,42 +22,62 @@
#include <linux/cdev.h>
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
-#include <linux/lglock.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
-#include <linux/hardirq.h>
#include <linux/task_work.h>
-#include <linux/ima.h>
+#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include "internal.h"
/* sysctl tunables... */
-struct files_stat_struct files_stat = {
+static struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
-DEFINE_STATIC_LGLOCK(files_lglock);
-
/* SLAB cache for file structures */
-static struct kmem_cache *filp_cachep __read_mostly;
+static struct kmem_cache *filp_cachep __ro_after_init;
+static struct kmem_cache *bfilp_cachep __ro_after_init;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-static void file_free_rcu(struct rcu_head *head)
+/* Container for backing file with optional user path */
+struct backing_file {
+ struct file file;
+ union {
+ struct path user_path;
+ freeptr_t bf_freeptr;
+ };
+};
+
+#define backing_file(f) container_of(f, struct backing_file, file)
+
+const struct path *backing_file_user_path(const struct file *f)
{
- struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
+ return &backing_file(f)->user_path;
+}
+EXPORT_SYMBOL_GPL(backing_file_user_path);
- put_cred(f->f_cred);
- kmem_cache_free(filp_cachep, f);
+void backing_file_set_user_path(struct file *f, const struct path *path)
+{
+ backing_file(f)->user_path = *path;
}
+EXPORT_SYMBOL_GPL(backing_file_set_user_path);
static inline void file_free(struct file *f)
{
- percpu_counter_dec(&nr_files);
- file_check_state(f);
- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
+ security_file_free(f);
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
+ percpu_counter_dec(&nr_files);
+ put_cred(f->f_cred);
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ path_put(backing_file_user_path(f));
+ kmem_cache_free(bfilp_cachep, backing_file(f));
+ } else {
+ kmem_cache_free(filp_cachep, f);
+ }
}
/*
@@ -75,24 +97,114 @@ unsigned long get_max_files(void)
}
EXPORT_SYMBOL_GPL(get_max_files);
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+
/*
* Handle nr_files sysctl
*/
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
-int proc_nr_files(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
- files_stat.nr_files = get_nr_files();
+ files_stat.nr_files = percpu_counter_sum_positive(&nr_files);
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-#else
-int proc_nr_files(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+
+static const struct ctl_table fs_stat_sysctls[] = {
+ {
+ .procname = "file-nr",
+ .data = &files_stat,
+ .maxlen = sizeof(files_stat),
+ .mode = 0444,
+ .proc_handler = proc_nr_files,
+ },
+ {
+ .procname = "file-max",
+ .data = &files_stat.max_files,
+ .maxlen = sizeof(files_stat.max_files),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = SYSCTL_LONG_ZERO,
+ .extra2 = SYSCTL_LONG_MAX,
+ },
+ {
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &sysctl_nr_open_min,
+ .extra2 = &sysctl_nr_open_max,
+ },
+};
+
+static int __init init_fs_stat_sysctls(void)
{
- return -ENOSYS;
+ register_sysctl_init("fs", fs_stat_sysctls);
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
+ return 0;
}
+fs_initcall(init_fs_stat_sysctls);
#endif
+static int init_file(struct file *f, int flags, const struct cred *cred)
+{
+ int error;
+
+ f->f_cred = get_cred(cred);
+ error = security_file_alloc(f);
+ if (unlikely(error)) {
+ put_cred(f->f_cred);
+ return error;
+ }
+
+ spin_lock_init(&f->f_lock);
+ /*
+ * Note that f_pos_lock is only used for files raising
+ * FMODE_ATOMIC_POS and directories. Other files such as pipes
+ * don't need it and since f_pos_lock is in a union may reuse
+ * the space for other purposes. They are expected to initialize
+ * the respective member when opening the file.
+ */
+ mutex_init(&f->f_pos_lock);
+ memset(&f->__f_path, 0, sizeof(f->f_path));
+ memset(&f->f_ra, 0, sizeof(f->f_ra));
+
+ f->f_flags = flags;
+ f->f_mode = OPEN_FMODE(flags);
+
+ f->f_op = NULL;
+ f->f_mapping = NULL;
+ f->private_data = NULL;
+ f->f_inode = NULL;
+ f->f_owner = NULL;
+#ifdef CONFIG_EPOLL
+ f->f_ep = NULL;
+#endif
+
+ f->f_iocb_flags = 0;
+ f->f_pos = 0;
+ f->f_wb_err = 0;
+ f->f_sb_err = 0;
+
+ /*
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While
+ * fget-rcu pattern users need to be able to handle spurious
+ * refcount bumps we should reinitialize the reused file first.
+ */
+ file_ref_init(&f->f_ref, 1);
+ /*
+ * Disable permission and pre-content events for all files by default.
+ * They may be enabled later by fsnotify_open_perm_and_set_mode().
+ */
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
+ return 0;
+}
+
/* Find an unused file structure and return a pointer to it.
* Returns an error pointer if some error happend e.g. we over file
* structures limit, run out of memory or operation is not permitted.
@@ -103,9 +215,8 @@ int proc_nr_files(ctl_table *table, int write,
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
-struct file *get_empty_filp(void)
+struct file *alloc_empty_file(int flags, const struct cred *cred)
{
- const struct cred *cred = current_cred();
static long old_max;
struct file *f;
int error;
@@ -113,7 +224,8 @@ struct file *get_empty_filp(void)
/*
* Privileged users can go above max_files
*/
- if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
+ if (unlikely(get_nr_files() >= files_stat.max_files) &&
+ !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
@@ -122,24 +234,18 @@ struct file *get_empty_filp(void)
goto over;
}
- f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
+ f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
if (unlikely(!f))
return ERR_PTR(-ENOMEM);
- percpu_counter_inc(&nr_files);
- f->f_cred = get_cred(cred);
- error = security_file_alloc(f);
+ error = init_file(f, flags, cred);
if (unlikely(error)) {
- file_free(f);
+ kmem_cache_free(filp_cachep, f);
return ERR_PTR(error);
}
- INIT_LIST_HEAD(&f->f_u.fu_list);
- atomic_long_set(&f->f_count, 1);
- rwlock_init(&f->f_owner.lock);
- spin_lock_init(&f->f_lock);
- eventpoll_init_file(f);
- /* f->f_version: 0 */
+ percpu_counter_inc(&nr_files);
+
return f;
over:
@@ -151,74 +257,184 @@ over:
return ERR_PTR(-ENFILE);
}
-/**
- * alloc_file - allocate and initialize a 'struct file'
- * @mnt: the vfsmount on which the file will reside
- * @dentry: the dentry representing the new file
- * @mode: the mode with which the new file will be opened
- * @fop: the 'struct file_operations' for the new file
+/*
+ * Variant of alloc_empty_file() that doesn't check and modify nr_files.
*
- * Use this instead of get_empty_filp() to get a new
- * 'struct file'. Do so because of the same initialization
- * pitfalls reasons listed for init_file(). This is a
- * preferred interface to using init_file().
+ * This is only for kernel internal use, and the allocate file must not be
+ * installed into file tables or such.
+ */
+struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
+{
+ struct file *f;
+ int error;
+
+ f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+ if (unlikely(!f))
+ return ERR_PTR(-ENOMEM);
+
+ error = init_file(f, flags, cred);
+ if (unlikely(error)) {
+ kmem_cache_free(filp_cachep, f);
+ return ERR_PTR(error);
+ }
+
+ f->f_mode |= FMODE_NOACCOUNT;
+
+ return f;
+}
+
+/*
+ * Variant of alloc_empty_file() that allocates a backing_file container
+ * and doesn't check and modify nr_files.
*
- * If all the callers of init_file() are eliminated, its
- * code should be moved into this function.
+ * This is only for kernel internal use, and the allocate file must not be
+ * installed into file tables or such.
*/
-struct file *alloc_file(struct path *path, fmode_t mode,
- const struct file_operations *fop)
+struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
{
- struct file *file;
+ struct backing_file *ff;
+ int error;
- file = get_empty_filp();
- if (IS_ERR(file))
- return file;
+ ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
+ if (unlikely(!ff))
+ return ERR_PTR(-ENOMEM);
- file->f_path = *path;
+ error = init_file(&ff->file, flags, cred);
+ if (unlikely(error)) {
+ kmem_cache_free(bfilp_cachep, ff);
+ return ERR_PTR(error);
+ }
+
+ ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
+ return &ff->file;
+}
+
+/**
+ * file_init_path - initialize a 'struct file' based on path
+ *
+ * @file: the file to set up
+ * @path: the (dentry, vfsmount) pair for the new file
+ * @fop: the 'struct file_operations' for the new file
+ */
+static void file_init_path(struct file *file, const struct path *path,
+ const struct file_operations *fop)
+{
+ file->__f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
- file->f_mode = mode;
+ file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
+ file->f_sb_err = file_sample_sb_err(file);
+ if (fop->llseek)
+ file->f_mode |= FMODE_LSEEK;
+ if ((file->f_mode & FMODE_READ) &&
+ likely(fop->read || fop->read_iter))
+ file->f_mode |= FMODE_CAN_READ;
+ if ((file->f_mode & FMODE_WRITE) &&
+ likely(fop->write || fop->write_iter))
+ file->f_mode |= FMODE_CAN_WRITE;
+ file->f_iocb_flags = iocb_flags(file);
+ file->f_mode |= FMODE_OPENED;
file->f_op = fop;
-
- /*
- * These mounts don't really matter in practice
- * for r/o bind mounts. They aren't userspace-
- * visible. We do this for consistency, and so
- * that we can do debugging checks at __fput()
- */
- if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
- file_take_write(file);
- WARN_ON(mnt_clone_write(path->mnt));
- }
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
- return file;
}
-EXPORT_SYMBOL(alloc_file);
/**
- * drop_file_write_access - give up ability to write to a file
- * @file: the file to which we will stop writing
+ * alloc_file - allocate and initialize a 'struct file'
*
- * This is a central place which will give up the ability
- * to write to @file, along with access to write through
- * its vfsmount.
+ * @path: the (dentry, vfsmount) pair for the new file
+ * @flags: O_... flags with which the new file will be opened
+ * @fop: the 'struct file_operations' for the new file
*/
-static void drop_file_write_access(struct file *file)
+static struct file *alloc_file(const struct path *path, int flags,
+ const struct file_operations *fop)
{
- struct vfsmount *mnt = file->f_path.mnt;
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct file *file;
- put_write_access(inode);
+ file = alloc_empty_file(flags, current_cred());
+ if (!IS_ERR(file))
+ file_init_path(file, path, fop);
+ return file;
+}
- if (special_file(inode->i_mode))
- return;
- if (file_check_writeable(file) != 0)
- return;
- __mnt_drop_write(mnt);
- file_release_write(file);
+static inline int alloc_path_pseudo(const char *name, struct inode *inode,
+ struct vfsmount *mnt, struct path *path)
+{
+ path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
+ if (!path->dentry)
+ return -ENOMEM;
+ path->mnt = mntget(mnt);
+ d_instantiate(path->dentry, inode);
+ return 0;
+}
+
+struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
+ const char *name, int flags,
+ const struct file_operations *fops)
+{
+ int ret;
+ struct path path;
+ struct file *file;
+
+ ret = alloc_path_pseudo(name, inode, mnt, &path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ file = alloc_file(&path, flags, fops);
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ return file;
+ }
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
+ return file;
+}
+EXPORT_SYMBOL(alloc_file_pseudo);
+
+struct file *alloc_file_pseudo_noaccount(struct inode *inode,
+ struct vfsmount *mnt, const char *name,
+ int flags,
+ const struct file_operations *fops)
+{
+ int ret;
+ struct path path;
+ struct file *file;
+
+ ret = alloc_path_pseudo(name, inode, mnt, &path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ file = alloc_empty_file_noaccount(flags, current_cred());
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ return file;
+ }
+ file_init_path(file, &path, fops);
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
+ return file;
+}
+EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
+
+struct file *alloc_file_clone(struct file *base, int flags,
+ const struct file_operations *fops)
+{
+ struct file *f;
+
+ f = alloc_file(&base->f_path, flags, fops);
+ if (!IS_ERR(f)) {
+ path_get(&f->f_path);
+ f->f_mapping = base->f_mapping;
+ }
+ return f;
}
/* the real guts of fput() - releasing the last reference to file
@@ -228,6 +444,10 @@ static void __fput(struct file *file)
struct dentry *dentry = file->f_path.dentry;
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
+ fmode_t mode = file->f_mode;
+
+ if (unlikely(!(file->f_mode & FMODE_OPENED)))
+ goto out;
might_sleep();
@@ -237,51 +457,47 @@ static void __fput(struct file *file)
* in the file cleanup chain.
*/
eventpoll_release(file);
- locks_remove_flock(file);
+ locks_remove_file(file);
+ security_file_release(file);
if (unlikely(file->f_flags & FASYNC)) {
- if (file->f_op && file->f_op->fasync)
+ if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
- ima_file_free(file);
- if (file->f_op && file->f_op->release)
+ if (file->f_op->release)
file->f_op->release(inode, file);
- security_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
- !(file->f_mode & FMODE_PATH))) {
+ !(mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
}
fops_put(file->f_op);
- put_pid(file->f_owner.pid);
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- i_readcount_dec(inode);
- if (file->f_mode & FMODE_WRITE)
- drop_file_write_access(file);
- file->f_path.dentry = NULL;
- file->f_path.mnt = NULL;
- file->f_inode = NULL;
- file_free(file);
+ file_f_owner_release(file);
+ put_file_access(file);
dput(dentry);
+ if (unlikely(mode & FMODE_NEED_UNMOUNT))
+ dissolve_on_fput(mnt);
mntput(mnt);
+out:
+ file_free(file);
}
static LLIST_HEAD(delayed_fput_list);
static void delayed_fput(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&delayed_fput_list);
- struct llist_node *next;
+ struct file *f, *t;
- for (; node; node = next) {
- next = llist_next(node);
- __fput(llist_entry(node, struct file, f_u.fu_llist));
- }
+ llist_for_each_entry_safe(f, t, node, f_llist)
+ __fput(f);
}
static void ____fput(struct callback_head *work)
{
- __fput(container_of(work, struct file, f_u.fu_rcuhead));
+ __fput(container_of(work, struct file, f_task_work));
}
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
@@ -295,32 +511,40 @@ static void ____fput(struct callback_head *work)
void flush_delayed_fput(void)
{
delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
}
+EXPORT_SYMBOL_GPL(flush_delayed_fput);
+
+static void __fput_deferred(struct file *file)
+{
+ struct task_struct *task = current;
-static DECLARE_WORK(delayed_fput_work, delayed_fput);
+ if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
+ file_free(file);
+ return;
+ }
+
+ if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ init_task_work(&file->f_task_work, ____fput);
+ if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
+ return;
+ /*
+ * After this task has run exit_task_work(),
+ * task_work_add() will fail. Fall through to delayed
+ * fput to avoid leaking *file.
+ */
+ }
+
+ if (llist_add(&file->f_llist, &delayed_fput_list))
+ schedule_delayed_work(&delayed_fput_work, 1);
+}
void fput(struct file *file)
{
- if (atomic_long_dec_and_test(&file->f_count)) {
- struct task_struct *task = current;
-
- file_sb_list_del(file);
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
- return;
- /*
- * After this task has run exit_task_work(),
- * task_work_add() will fail. free_ipc_ns()->
- * shm_destroy() can do this. Fall through to delayed
- * fput to avoid leaking *file.
- */
- }
-
- if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
- schedule_work(&delayed_fput_work);
- }
+ if (unlikely(file_ref_put(&file->f_ref)))
+ __fput_deferred(file);
}
+EXPORT_SYMBOL(fput);
/*
* synchronous analog of fput(); for kernel threads that might be needed
@@ -332,156 +556,65 @@ void fput(struct file *file)
*/
void __fput_sync(struct file *file)
{
- if (atomic_long_dec_and_test(&file->f_count)) {
- struct task_struct *task = current;
- file_sb_list_del(file);
- BUG_ON(!(task->flags & PF_KTHREAD));
+ if (file_ref_put(&file->f_ref))
__fput(file);
- }
-}
-
-EXPORT_SYMBOL(fput);
-
-void put_filp(struct file *file)
-{
- if (atomic_long_dec_and_test(&file->f_count)) {
- security_file_free(file);
- file_sb_list_del(file);
- file_free(file);
- }
}
+EXPORT_SYMBOL(__fput_sync);
-static inline int file_list_cpu(struct file *file)
-{
-#ifdef CONFIG_SMP
- return file->f_sb_list_cpu;
-#else
- return smp_processor_id();
-#endif
-}
-
-/* helper for file_sb_list_add to reduce ifdefs */
-static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
-{
- struct list_head *list;
-#ifdef CONFIG_SMP
- int cpu;
- cpu = smp_processor_id();
- file->f_sb_list_cpu = cpu;
- list = per_cpu_ptr(sb->s_files, cpu);
-#else
- list = &sb->s_files;
-#endif
- list_add(&file->f_u.fu_list, list);
-}
-
-/**
- * file_sb_list_add - add a file to the sb's file list
- * @file: file to add
- * @sb: sb to add it to
+/*
+ * Equivalent to __fput_sync(), but optimized for being called with the last
+ * reference.
*
- * Use this function to associate a file with the superblock of the inode it
- * refers to.
+ * See file_ref_put_close() for details.
*/
-void file_sb_list_add(struct file *file, struct super_block *sb)
+void fput_close_sync(struct file *file)
{
- lg_local_lock(&files_lglock);
- __file_sb_list_add(file, sb);
- lg_local_unlock(&files_lglock);
+ if (likely(file_ref_put_close(&file->f_ref)))
+ __fput(file);
}
-/**
- * file_sb_list_del - remove a file from the sb's file list
- * @file: file to remove
- * @sb: sb to remove it from
+/*
+ * Equivalent to fput(), but optimized for being called with the last
+ * reference.
*
- * Use this function to remove a file from its superblock.
+ * See file_ref_put_close() for details.
*/
-void file_sb_list_del(struct file *file)
+void fput_close(struct file *file)
{
- if (!list_empty(&file->f_u.fu_list)) {
- lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
- list_del_init(&file->f_u.fu_list);
- lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
- }
-}
-
-#ifdef CONFIG_SMP
-
-/*
- * These macros iterate all files on all CPUs for a given superblock.
- * files_lglock must be held globally.
- */
-#define do_file_list_for_each_entry(__sb, __file) \
-{ \
- int i; \
- for_each_possible_cpu(i) { \
- struct list_head *list; \
- list = per_cpu_ptr((__sb)->s_files, i); \
- list_for_each_entry((__file), list, f_u.fu_list)
-
-#define while_file_list_for_each_entry \
- } \
+ if (file_ref_put_close(&file->f_ref))
+ __fput_deferred(file);
}
-#else
-
-#define do_file_list_for_each_entry(__sb, __file) \
-{ \
- struct list_head *list; \
- list = &(sb)->s_files; \
- list_for_each_entry((__file), list, f_u.fu_list)
-
-#define while_file_list_for_each_entry \
+void __init files_init(void)
+{
+ struct kmem_cache_args args = {
+ .use_freeptr_offset = true,
+ .freeptr_offset = offsetof(struct file, f_freeptr),
+ };
+
+ filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+
+ args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
+ bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
+ &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+ percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
-#endif
-
-/**
- * mark_files_ro - mark all files read-only
- * @sb: superblock in question
- *
- * All files are marked read-only. We don't care about pending
- * delete files so this should be used in 'force' mode only.
+/*
+ * One file with associated inode and dcache is very roughly 1K. Per default
+ * do not use more than 10% of our memory for files.
*/
-void mark_files_ro(struct super_block *sb)
+void __init files_maxfiles_init(void)
{
- struct file *f;
-
- lg_global_lock(&files_lglock);
- do_file_list_for_each_entry(sb, f) {
- if (!S_ISREG(file_inode(f)->i_mode))
- continue;
- if (!file_count(f))
- continue;
- if (!(f->f_mode & FMODE_WRITE))
- continue;
- spin_lock(&f->f_lock);
- f->f_mode &= ~FMODE_WRITE;
- spin_unlock(&f->f_lock);
- if (file_check_writeable(f) != 0)
- continue;
- __mnt_drop_write(f->f_path.mnt);
- file_release_write(f);
- } while_file_list_for_each_entry;
- lg_global_unlock(&files_lglock);
-}
-
-void __init files_init(unsigned long mempages)
-{
unsigned long n;
+ unsigned long nr_pages = totalram_pages();
+ unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
- filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ memreserve = min(memreserve, nr_pages - 1);
+ n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
- /*
- * One file with associated inode and dcache is very roughly 1K.
- * Per default don't use more than 10% of our memory for files.
- */
-
- n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
- files_defer_init();
- lg_lock_init(&files_lglock, "files_lglock");
- percpu_counter_init(&nr_files, 0);
-}
+}