summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGlauber Costa <glommer@openvz.org>2013-08-28 10:18:18 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:32 -0400
commit5ca302c8e502ca53b7d75f12127ec0289904003a (patch)
tree80a5b248c01fc3f33392a0b6ef14a2baab86cdb0
parenta0b02131c5fcd8545b867db72224b3659e813f10 (diff)
list_lru: dynamically adjust node arrays
We currently use a compile-time constant to size the node array for the list_lru structure. Due to this, we don't need to allocate any memory at initialization time. But as a consequence, the structures that contain embedded list_lru lists can become way too big (the superblock for instance contains two of them). This patch aims at ameliorating this situation by dynamically allocating the node arrays with the firmware provided nr_node_ids. Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/super.c11
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_qm.c10
-rw-r--r--include/linux/list_lru.h13
-rw-r--r--mm/list_lru.c14
5 files changed, 37 insertions, 17 deletions
diff --git a/fs/super.c b/fs/super.c
index 181d42e2abff..269d96857caa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -195,8 +195,12 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
- list_lru_init(&s->s_dentry_lru);
- list_lru_init(&s->s_inode_lru);
+
+ if (list_lru_init(&s->s_dentry_lru))
+ goto err_out;
+ if (list_lru_init(&s->s_inode_lru))
+ goto err_out_dentry_lru;
+
INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -236,6 +240,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
}
out:
return s;
+
+err_out_dentry_lru:
+ list_lru_destroy(&s->s_dentry_lru);
err_out:
security_sb_free(s);
#ifdef CONFIG_SMP
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d46f6a3dc1de..49fdb7bed481 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1592,6 +1592,7 @@ xfs_free_buftarg(
struct xfs_mount *mp,
struct xfs_buftarg *btp)
{
+ list_lru_destroy(&btp->bt_lru);
unregister_shrinker(&btp->bt_shrinker);
if (mp->m_flags & XFS_MOUNT_BARRIER)
@@ -1666,9 +1667,12 @@ xfs_alloc_buftarg(
if (!btp->bt_bdi)
goto error;
- list_lru_init(&btp->bt_lru);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
+
+ if (list_lru_init(&btp->bt_lru))
+ goto error;
+
btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index a29169b062e3..7f4138629a80 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -831,11 +831,18 @@ xfs_qm_init_quotainfo(
qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
+ if ((error = list_lru_init(&qinf->qi_lru))) {
+ kmem_free(qinf);
+ mp->m_quotainfo = NULL;
+ return error;
+ }
+
/*
* See if quotainodes are setup, and if not, allocate them,
* and change the superblock accordingly.
*/
if ((error = xfs_qm_init_quotainos(mp))) {
+ list_lru_destroy(&qinf->qi_lru);
kmem_free(qinf);
mp->m_quotainfo = NULL;
return error;
@@ -846,8 +853,6 @@ xfs_qm_init_quotainfo(
INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
mutex_init(&qinf->qi_tree_lock);
- list_lru_init(&qinf->qi_lru);
-
/* mutex used to serialize quotaoffs */
mutex_init(&qinf->qi_quotaofflock);
@@ -935,6 +940,7 @@ xfs_qm_destroy_quotainfo(
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
+ list_lru_destroy(&qi->qi_lru);
unregister_shrinker(&qi->qi_shrinker);
if (qi->qi_uquotaip) {
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 4d02ad3badab..3ce541753c88 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -27,20 +27,11 @@ struct list_lru_node {
} ____cacheline_aligned_in_smp;
struct list_lru {
- /*
- * Because we use a fixed-size array, this struct can be very big if
- * MAX_NUMNODES is big. If this becomes a problem this is fixable by
- * turning this into a pointer and dynamically allocating this to
- * nr_node_ids. This quantity is firwmare-provided, and still would
- * provide room for all nodes at the cost of a pointer lookup and an
- * extra allocation. Because that allocation will most likely come from
- * a different slab cache than the main structure holding this
- * structure, we may very well fail.
- */
- struct list_lru_node node[MAX_NUMNODES];
+ struct list_lru_node *node;
nodemask_t active_nodes;
};
+void list_lru_destroy(struct list_lru *lru);
int list_lru_init(struct list_lru *lru);
/**
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f91c24188573..72467914b856 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/list_lru.h>
+#include <linux/slab.h>
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
@@ -115,9 +116,14 @@ EXPORT_SYMBOL_GPL(list_lru_walk_node);
int list_lru_init(struct list_lru *lru)
{
int i;
+ size_t size = sizeof(*lru->node) * nr_node_ids;
+
+ lru->node = kzalloc(size, GFP_KERNEL);
+ if (!lru->node)
+ return -ENOMEM;
nodes_clear(lru->active_nodes);
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
spin_lock_init(&lru->node[i].lock);
INIT_LIST_HEAD(&lru->node[i].list);
lru->node[i].nr_items = 0;
@@ -125,3 +131,9 @@ int list_lru_init(struct list_lru *lru)
return 0;
}
EXPORT_SYMBOL_GPL(list_lru_init);
+
+void list_lru_destroy(struct list_lru *lru)
+{
+ kfree(lru->node);
+}
+EXPORT_SYMBOL_GPL(list_lru_destroy);