summaryrefslogtreecommitdiff
path: root/mm/list_lru.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/list_lru.c')
-rw-r--r--mm/list_lru.c203
1 files changed, 66 insertions, 137 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 488dacd1f8ff..3fdae1688f82 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -52,21 +52,12 @@ static int lru_shrinker_id(struct list_lru *lru)
static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
- struct list_lru_memcg *mlrus;
- struct list_lru_node *nlru = &lru->node[nid];
-
- /*
- * Either lock or RCU protects the array of per cgroup lists
- * from relocation (see memcg_update_list_lru).
- */
- mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock));
- if (mlrus && idx >= 0) {
- struct list_lru_per_memcg *mlru;
+ if (list_lru_memcg_aware(lru) && idx >= 0) {
+ struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx);
- mlru = rcu_dereference_check(mlrus->mlru[idx], true);
return mlru ? &mlru->node[nid] : NULL;
}
- return &nlru->lru;
+ return &lru->node[nid].lru;
}
static inline struct list_lru_one *
@@ -77,7 +68,7 @@ list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
struct list_lru_one *l = &nlru->lru;
struct mem_cgroup *memcg = NULL;
- if (!lru->mlrus)
+ if (!list_lru_memcg_aware(lru))
goto out;
memcg = mem_cgroup_from_obj(ptr);
@@ -309,16 +300,20 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
unsigned long *nr_to_walk)
{
long isolated = 0;
- int memcg_idx;
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
nr_to_walk);
+
+#ifdef CONFIG_MEMCG_KMEM
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
- for_each_memcg_cache_index(memcg_idx) {
+ struct list_lru_per_memcg *mlru;
+ unsigned long index;
+
+ xa_for_each(&lru->xa, index, mlru) {
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
- isolated += __list_lru_walk_one(lru, nid, memcg_idx,
+ isolated += __list_lru_walk_one(lru, nid, index,
isolate, cb_arg,
nr_to_walk);
spin_unlock(&nlru->lock);
@@ -327,6 +322,8 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
break;
}
}
+#endif
+
return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
@@ -338,15 +335,6 @@ static void init_one_lru(struct list_lru_one *l)
}
#ifdef CONFIG_MEMCG_KMEM
-static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus,
- int begin, int end)
-{
- int i;
-
- for (i = begin; i < end; i++)
- kfree(mlrus->mlru[i]);
-}
-
static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
{
int nid;
@@ -364,14 +352,7 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
{
- struct list_lru_memcg *mlrus;
- struct list_lru_per_memcg *mlru;
-
- spin_lock_irq(&lru->lock);
- mlrus = rcu_dereference_protected(lru->mlrus, true);
- mlru = rcu_dereference_protected(mlrus->mlru[src_idx], true);
- rcu_assign_pointer(mlrus->mlru[src_idx], NULL);
- spin_unlock_irq(&lru->lock);
+ struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
/*
* The __list_lru_walk_one() can walk the list of this node.
@@ -383,78 +364,27 @@ static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
kvfree_rcu(mlru, rcu);
}
-static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
- struct list_lru_memcg *mlrus;
- int size = memcg_nr_cache_ids;
-
+ if (memcg_aware)
+ xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
lru->memcg_aware = memcg_aware;
- if (!memcg_aware)
- return 0;
-
- spin_lock_init(&lru->lock);
-
- mlrus = kvzalloc(struct_size(mlrus, mlru, size), GFP_KERNEL);
- if (!mlrus)
- return -ENOMEM;
-
- RCU_INIT_POINTER(lru->mlrus, mlrus);
-
- return 0;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
{
- struct list_lru_memcg *mlrus;
+ XA_STATE(xas, &lru->xa, 0);
+ struct list_lru_per_memcg *mlru;
if (!list_lru_memcg_aware(lru))
return;
- /*
- * This is called when shrinker has already been unregistered,
- * and nobody can use it. So, there is no need to use kvfree_rcu().
- */
- mlrus = rcu_dereference_protected(lru->mlrus, true);
- memcg_destroy_list_lru_range(mlrus, 0, memcg_nr_cache_ids);
- kvfree(mlrus);
-}
-
-static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size)
-{
- struct list_lru_memcg *old, *new;
-
- BUG_ON(old_size > new_size);
-
- old = rcu_dereference_protected(lru->mlrus,
- lockdep_is_held(&list_lrus_mutex));
- new = kvmalloc(struct_size(new, mlru, new_size), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
-
- spin_lock_irq(&lru->lock);
- memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size));
- memset(&new->mlru[old_size], 0, flex_array_size(new, mlru, new_size - old_size));
- rcu_assign_pointer(lru->mlrus, new);
- spin_unlock_irq(&lru->lock);
-
- kvfree_rcu(old, rcu);
- return 0;
-}
-
-int memcg_update_all_list_lrus(int new_size)
-{
- int ret = 0;
- struct list_lru *lru;
- int old_size = memcg_nr_cache_ids;
-
- mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &memcg_list_lrus, list) {
- ret = memcg_update_list_lru(lru, old_size, new_size);
- if (ret)
- break;
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, mlru, ULONG_MAX) {
+ kfree(mlru);
+ xas_store(&xas, NULL);
}
- mutex_unlock(&list_lrus_mutex);
- return ret;
+ xas_unlock_irq(&xas);
}
static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
@@ -521,7 +451,7 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
struct mem_cgroup *child;
child = mem_cgroup_from_css(css);
- child->kmemcg_id = parent->kmemcg_id;
+ WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
}
rcu_read_unlock();
@@ -531,21 +461,12 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
mutex_unlock(&list_lrus_mutex);
}
-static bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
- struct list_lru *lru)
+static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
+ struct list_lru *lru)
{
- bool allocated;
- int idx;
-
- idx = memcg->kmemcg_id;
- if (unlikely(idx < 0))
- return true;
+ int idx = memcg->kmemcg_id;
- rcu_read_lock();
- allocated = !!rcu_access_pointer(rcu_dereference(lru->mlrus)->mlru[idx]);
- rcu_read_unlock();
-
- return allocated;
+ return idx < 0 || xa_load(&lru->xa, idx);
}
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
@@ -553,11 +474,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
{
int i;
unsigned long flags;
- struct list_lru_memcg *mlrus;
struct list_lru_memcg_table {
struct list_lru_per_memcg *mlru;
struct mem_cgroup *memcg;
} *table;
+ XA_STATE(xas, &lru->xa, 0);
if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
return 0;
@@ -586,27 +507,48 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
}
}
- spin_lock_irqsave(&lru->lock, flags);
- mlrus = rcu_dereference_protected(lru->mlrus, true);
+ xas_lock_irqsave(&xas, flags);
while (i--) {
- int index = table[i].memcg->kmemcg_id;
+ int index = READ_ONCE(table[i].memcg->kmemcg_id);
struct list_lru_per_memcg *mlru = table[i].mlru;
- if (index < 0 || rcu_dereference_protected(mlrus->mlru[index], true))
+ xas_set(&xas, index);
+retry:
+ if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
kfree(mlru);
- else
- rcu_assign_pointer(mlrus->mlru[index], mlru);
+ } else {
+ xas_store(&xas, mlru);
+ if (xas_error(&xas) == -ENOMEM) {
+ xas_unlock_irqrestore(&xas, flags);
+ if (xas_nomem(&xas, gfp))
+ xas_set_err(&xas, 0);
+ xas_lock_irqsave(&xas, flags);
+ /*
+ * The xas lock has been released, this memcg
+ * can be reparented before us. So reload
+ * memcg id. More details see the comments
+ * in memcg_reparent_list_lrus().
+ */
+ index = READ_ONCE(table[i].memcg->kmemcg_id);
+ if (index < 0)
+ xas_set_err(&xas, 0);
+ else if (!xas_error(&xas) && index != xas.xa_index)
+ xas_set(&xas, index);
+ goto retry;
+ }
+ }
}
- spin_unlock_irqrestore(&lru->lock, flags);
-
+ /* xas_nomem() is used to free memory instead of memory allocation. */
+ if (xas.xa_alloc)
+ xas_nomem(&xas, gfp);
+ xas_unlock_irqrestore(&xas, flags);
kfree(table);
- return 0;
+ return xas_error(&xas);
}
#else
-static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
- return 0;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
@@ -618,7 +560,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker)
{
int i;
- int err = -ENOMEM;
#ifdef CONFIG_MEMCG_KMEM
if (shrinker)
@@ -626,11 +567,10 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
else
lru->shrinker_id = -1;
#endif
- memcg_get_cache_ids();
lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
if (!lru->node)
- goto out;
+ return -ENOMEM;
for_each_node(i) {
spin_lock_init(&lru->node[i].lock);
@@ -639,18 +579,10 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
init_one_lru(&lru->node[i].lru);
}
- err = memcg_init_list_lru(lru, memcg_aware);
- if (err) {
- kfree(lru->node);
- /* Do this so a list_lru_destroy() doesn't crash: */
- lru->node = NULL;
- goto out;
- }
-
+ memcg_init_list_lru(lru, memcg_aware);
list_lru_register(lru);
-out:
- memcg_put_cache_ids();
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__list_lru_init);
@@ -660,8 +592,6 @@ void list_lru_destroy(struct list_lru *lru)
if (!lru->node)
return;
- memcg_get_cache_ids();
-
list_lru_unregister(lru);
memcg_destroy_list_lru(lru);
@@ -671,6 +601,5 @@ void list_lru_destroy(struct list_lru *lru)
#ifdef CONFIG_MEMCG_KMEM
lru->shrinker_id = -1;
#endif
- memcg_put_cache_ids();
}
EXPORT_SYMBOL_GPL(list_lru_destroy);