summaryrefslogtreecommitdiff
path: root/mm/list_lru.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/list_lru.c')
-rw-r--r--mm/list_lru.c673
1 files changed, 321 insertions, 352 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 9222910ab1cb..ec48b5dadf51 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -13,20 +13,32 @@
#include <linux/mutex.h>
#include <linux/memcontrol.h>
#include "slab.h"
+#include "internal.h"
-#ifdef CONFIG_MEMCG_KMEM
-static LIST_HEAD(list_lrus);
+#ifdef CONFIG_MEMCG
+static LIST_HEAD(memcg_list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);
+static inline bool list_lru_memcg_aware(struct list_lru *lru)
+{
+ return lru->memcg_aware;
+}
+
static void list_lru_register(struct list_lru *lru)
{
+ if (!list_lru_memcg_aware(lru))
+ return;
+
mutex_lock(&list_lrus_mutex);
- list_add(&lru->list, &list_lrus);
+ list_add(&lru->list, &memcg_list_lrus);
mutex_unlock(&list_lrus_mutex);
}
static void list_lru_unregister(struct list_lru *lru)
{
+ if (!list_lru_memcg_aware(lru))
+ return;
+
mutex_lock(&list_lrus_mutex);
list_del(&lru->list);
mutex_unlock(&list_lrus_mutex);
@@ -37,45 +49,65 @@ static int lru_shrinker_id(struct list_lru *lru)
return lru->shrinker_id;
}
-static inline bool list_lru_memcg_aware(struct list_lru *lru)
+static inline struct list_lru_one *
+list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
- return lru->memcg_aware;
+ if (list_lru_memcg_aware(lru) && idx >= 0) {
+ struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
+
+ return mlru ? &mlru->node[nid] : NULL;
+ }
+ return &lru->node[nid].lru;
}
-static inline struct list_lru_one *
-list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
{
- struct list_lru_memcg *memcg_lrus;
- /*
- * Either lock or RCU protects the array of per cgroup lists
- * from relocation (see memcg_update_list_lru_node).
- */
- memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
- lockdep_is_held(&nlru->lock));
- if (memcg_lrus && idx >= 0)
- return memcg_lrus->lru[idx];
- return &nlru->lru;
+ if (irq)
+ spin_lock_irq(&l->lock);
+ else
+ spin_lock(&l->lock);
+ if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
+ if (irq)
+ spin_unlock_irq(&l->lock);
+ else
+ spin_unlock(&l->lock);
+ return false;
+ }
+ return true;
}
static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
- struct mem_cgroup **memcg_ptr)
+lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ bool irq, bool skip_empty)
{
- struct list_lru_one *l = &nlru->lru;
- struct mem_cgroup *memcg = NULL;
-
- if (!nlru->memcg_lrus)
- goto out;
+ struct list_lru_one *l;
- memcg = mem_cgroup_from_obj(ptr);
- if (!memcg)
- goto out;
+ rcu_read_lock();
+again:
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
+ if (likely(l) && lock_list_lru(l, irq)) {
+ rcu_read_unlock();
+ return l;
+ }
+ /*
+ * Caller may simply bail out if raced with reparenting or
+ * may iterate through the list_lru and expect empty slots.
+ */
+ if (skip_empty) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ VM_WARN_ON(!css_is_dying(&memcg->css));
+ memcg = parent_mem_cgroup(memcg);
+ goto again;
+}
- l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
-out:
- if (memcg_ptr)
- *memcg_ptr = memcg;
- return l;
+static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
+{
+ if (irq_off)
+ spin_unlock_irq(&l->lock);
+ else
+ spin_unlock(&l->lock);
}
#else
static void list_lru_register(struct list_lru *lru)
@@ -97,64 +129,110 @@ static inline bool list_lru_memcg_aware(struct list_lru *lru)
}
static inline struct list_lru_one *
-list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
+list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
- return &nlru->lru;
+ return &lru->node[nid].lru;
}
static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
- struct mem_cgroup **memcg_ptr)
+lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ bool irq, bool skip_empty)
{
- if (memcg_ptr)
- *memcg_ptr = NULL;
- return &nlru->lru;
+ struct list_lru_one *l = &lru->node[nid].lru;
+
+ if (irq)
+ spin_lock_irq(&l->lock);
+ else
+ spin_lock(&l->lock);
+
+ return l;
}
-#endif /* CONFIG_MEMCG_KMEM */
-bool list_lru_add(struct list_lru *lru, struct list_head *item)
+static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
+{
+ if (irq_off)
+ spin_unlock_irq(&l->lock);
+ else
+ spin_unlock(&l->lock);
+}
+#endif /* CONFIG_MEMCG */
+
+/* The caller must ensure the memcg lifetime. */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
{
- int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
- struct mem_cgroup *memcg;
struct list_lru_one *l;
- spin_lock(&nlru->lock);
+ l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
+ if (!l)
+ return false;
if (list_empty(item)) {
- l = list_lru_from_kmem(nlru, item, &memcg);
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
- memcg_set_shrinker_bit(memcg, nid,
- lru_shrinker_id(lru));
- nlru->nr_items++;
- spin_unlock(&nlru->lock);
+ set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
+ unlock_list_lru(l, false);
+ atomic_long_inc(&nlru->nr_items);
return true;
}
- spin_unlock(&nlru->lock);
+ unlock_list_lru(l, false);
return false;
}
-EXPORT_SYMBOL_GPL(list_lru_add);
-bool list_lru_del(struct list_lru *lru, struct list_head *item)
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
+
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_add(lru, item, nid, NULL);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(list_lru_add_obj);
+
+/* The caller must ensure the memcg lifetime. */
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+{
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
-
- spin_lock(&nlru->lock);
+ l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
+ if (!l)
+ return false;
if (!list_empty(item)) {
- l = list_lru_from_kmem(nlru, item, NULL);
list_del_init(item);
l->nr_items--;
- nlru->nr_items--;
- spin_unlock(&nlru->lock);
+ unlock_list_lru(l, false);
+ atomic_long_dec(&nlru->nr_items);
return true;
}
- spin_unlock(&nlru->lock);
+ unlock_list_lru(l, false);
return false;
}
-EXPORT_SYMBOL_GPL(list_lru_del);
+
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
+{
+ bool ret;
+ int nid = page_to_nid(virt_to_page(item));
+
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_del(lru, item, nid, NULL);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(list_lru_del_obj);
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
@@ -174,15 +252,17 @@ EXPORT_SYMBOL_GPL(list_lru_isolate_move);
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg)
{
- struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
- unsigned long count;
+ long count;
rcu_read_lock();
- l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
- count = l->nr_items;
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
+ count = l ? READ_ONCE(l->nr_items) : 0;
rcu_read_unlock();
+ if (unlikely(count < 0))
+ count = 0;
+
return count;
}
EXPORT_SYMBOL_GPL(list_lru_count_one);
@@ -192,22 +272,24 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
struct list_lru_node *nlru;
nlru = &lru->node[nid];
- return nlru->nr_items;
+ return atomic_long_read(&nlru->nr_items);
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
static unsigned long
-__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
+__list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
- unsigned long *nr_to_walk)
+ unsigned long *nr_to_walk, bool irq_off)
{
-
- struct list_lru_one *l;
+ struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_one *l = NULL;
struct list_head *item, *n;
unsigned long isolated = 0;
- l = list_lru_from_memcg_idx(nlru, memcg_idx);
restart:
+ l = lock_list_lru_of_memcg(lru, nid, memcg, irq_off, true);
+ if (!l)
+ return isolated;
list_for_each_safe(item, n, &l->list) {
enum lru_status ret;
@@ -219,19 +301,19 @@ restart:
break;
--*nr_to_walk;
- ret = isolate(item, l, &nlru->lock, cb_arg);
+ ret = isolate(item, l, cb_arg);
switch (ret) {
+ /*
+ * LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
+ * lock. List traversal will have to restart from scratch.
+ */
+ case LRU_RETRY:
+ goto restart;
case LRU_REMOVED_RETRY:
- assert_spin_locked(&nlru->lock);
fallthrough;
case LRU_REMOVED:
isolated++;
- nlru->nr_items--;
- /*
- * If the lru lock has been dropped, our list
- * traversal is now invalid and so we have to
- * restart from scratch.
- */
+ atomic_long_dec(&nlru->nr_items);
if (ret == LRU_REMOVED_RETRY)
goto restart;
break;
@@ -240,17 +322,14 @@ restart:
break;
case LRU_SKIP:
break;
- case LRU_RETRY:
- /*
- * The lru lock has been dropped, our list traversal is
- * now invalid and so we have to restart from scratch.
- */
- assert_spin_locked(&nlru->lock);
- goto restart;
+ case LRU_STOP:
+ goto out;
default:
BUG();
}
}
+ unlock_list_lru(l, irq_off);
+out:
return isolated;
}
@@ -259,14 +338,8 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
- struct list_lru_node *nlru = &lru->node[nid];
- unsigned long ret;
-
- spin_lock(&nlru->lock);
- ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
- nr_to_walk);
- spin_unlock(&nlru->lock);
- return ret;
+ return __list_lru_walk_one(lru, nid, memcg, isolate,
+ cb_arg, nr_to_walk, false);
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);
@@ -275,14 +348,8 @@ list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
- struct list_lru_node *nlru = &lru->node[nid];
- unsigned long ret;
-
- spin_lock_irq(&nlru->lock);
- ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
- nr_to_walk);
- spin_unlock_irq(&nlru->lock);
- return ret;
+ return __list_lru_walk_one(lru, nid, memcg, isolate,
+ cb_arg, nr_to_walk, true);
}
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
@@ -290,340 +357,245 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
unsigned long *nr_to_walk)
{
long isolated = 0;
- int memcg_idx;
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
nr_to_walk);
- if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
- for_each_memcg_cache_index(memcg_idx) {
- struct list_lru_node *nlru = &lru->node[nid];
- spin_lock(&nlru->lock);
- isolated += __list_lru_walk_one(nlru, memcg_idx,
+#ifdef CONFIG_MEMCG
+ if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
+ struct list_lru_memcg *mlru;
+ struct mem_cgroup *memcg;
+ unsigned long index;
+
+ xa_for_each(&lru->xa, index, mlru) {
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(index);
+ if (!mem_cgroup_tryget(memcg)) {
+ rcu_read_unlock();
+ continue;
+ }
+ rcu_read_unlock();
+ isolated += __list_lru_walk_one(lru, nid, memcg,
isolate, cb_arg,
- nr_to_walk);
- spin_unlock(&nlru->lock);
+ nr_to_walk, false);
+ mem_cgroup_put(memcg);
if (*nr_to_walk <= 0)
break;
}
}
+#endif
+
return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
-static void init_one_lru(struct list_lru_one *l)
+static void init_one_lru(struct list_lru *lru, struct list_lru_one *l)
{
INIT_LIST_HEAD(&l->list);
+ spin_lock_init(&l->lock);
l->nr_items = 0;
+#ifdef CONFIG_LOCKDEP
+ if (lru->key)
+ lockdep_set_class(&l->lock, lru->key);
+#endif
}
-#ifdef CONFIG_MEMCG_KMEM
-static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
- int begin, int end)
-{
- int i;
-
- for (i = begin; i < end; i++)
- kfree(memcg_lrus->lru[i]);
-}
-
-static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
- int begin, int end)
-{
- int i;
-
- for (i = begin; i < end; i++) {
- struct list_lru_one *l;
-
- l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
- if (!l)
- goto fail;
-
- init_one_lru(l);
- memcg_lrus->lru[i] = l;
- }
- return 0;
-fail:
- __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
- return -ENOMEM;
-}
-
-static int memcg_init_list_lru_node(struct list_lru_node *nlru)
-{
- struct list_lru_memcg *memcg_lrus;
- int size = memcg_nr_cache_ids;
-
- memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
- size * sizeof(void *), GFP_KERNEL);
- if (!memcg_lrus)
- return -ENOMEM;
-
- if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
- kvfree(memcg_lrus);
- return -ENOMEM;
- }
- RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
-
- return 0;
-}
-
-static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
-{
- struct list_lru_memcg *memcg_lrus;
- /*
- * This is called when shrinker has already been unregistered,
- * and nobody can use it. So, there is no need to use kvfree_rcu().
- */
- memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
- __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
- kvfree(memcg_lrus);
-}
-
-static void kvfree_rcu(struct rcu_head *head)
+#ifdef CONFIG_MEMCG
+static struct list_lru_memcg *memcg_init_list_lru_one(struct list_lru *lru, gfp_t gfp)
{
+ int nid;
struct list_lru_memcg *mlru;
- mlru = container_of(head, struct list_lru_memcg, rcu);
- kvfree(mlru);
-}
-
-static int memcg_update_list_lru_node(struct list_lru_node *nlru,
- int old_size, int new_size)
-{
- struct list_lru_memcg *old, *new;
-
- BUG_ON(old_size > new_size);
+ mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
+ if (!mlru)
+ return NULL;
- old = rcu_dereference_protected(nlru->memcg_lrus,
- lockdep_is_held(&list_lrus_mutex));
- new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
-
- if (__memcg_init_list_lru_node(new, old_size, new_size)) {
- kvfree(new);
- return -ENOMEM;
- }
-
- memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
-
- /*
- * The locking below allows readers that hold nlru->lock avoid taking
- * rcu_read_lock (see list_lru_from_memcg_idx).
- *
- * Since list_lru_{add,del} may be called under an IRQ-safe lock,
- * we have to use IRQ-safe primitives here to avoid deadlock.
- */
- spin_lock_irq(&nlru->lock);
- rcu_assign_pointer(nlru->memcg_lrus, new);
- spin_unlock_irq(&nlru->lock);
-
- call_rcu(&old->rcu, kvfree_rcu);
- return 0;
-}
-
-static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
- int old_size, int new_size)
-{
- struct list_lru_memcg *memcg_lrus;
+ for_each_node(nid)
+ init_one_lru(lru, &mlru->node[nid]);
- memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
- lockdep_is_held(&list_lrus_mutex));
- /* do not bother shrinking the array back to the old size, because we
- * cannot handle allocation failures here */
- __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
+ return mlru;
}
-static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
- int i;
-
+ if (memcg_aware)
+ xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
lru->memcg_aware = memcg_aware;
-
- if (!memcg_aware)
- return 0;
-
- for_each_node(i) {
- if (memcg_init_list_lru_node(&lru->node[i]))
- goto fail;
- }
- return 0;
-fail:
- for (i = i - 1; i >= 0; i--) {
- if (!lru->node[i].memcg_lrus)
- continue;
- memcg_destroy_list_lru_node(&lru->node[i]);
- }
- return -ENOMEM;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
{
- int i;
+ XA_STATE(xas, &lru->xa, 0);
+ struct list_lru_memcg *mlru;
if (!list_lru_memcg_aware(lru))
return;
- for_each_node(i)
- memcg_destroy_list_lru_node(&lru->node[i]);
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, mlru, ULONG_MAX) {
+ kfree(mlru);
+ xas_store(&xas, NULL);
+ }
+ xas_unlock_irq(&xas);
}
-static int memcg_update_list_lru(struct list_lru *lru,
- int old_size, int new_size)
+static void memcg_reparent_list_lru_one(struct list_lru *lru, int nid,
+ struct list_lru_one *src,
+ struct mem_cgroup *dst_memcg)
{
- int i;
+ int dst_idx = dst_memcg->kmemcg_id;
+ struct list_lru_one *dst;
- if (!list_lru_memcg_aware(lru))
- return 0;
+ spin_lock_irq(&src->lock);
+ dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
+ spin_lock_nested(&dst->lock, SINGLE_DEPTH_NESTING);
- for_each_node(i) {
- if (memcg_update_list_lru_node(&lru->node[i],
- old_size, new_size))
- goto fail;
+ list_splice_init(&src->list, &dst->list);
+ if (src->nr_items) {
+ WARN_ON(src->nr_items < 0);
+ dst->nr_items += src->nr_items;
+ set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
}
- return 0;
-fail:
- for (i = i - 1; i >= 0; i--) {
- if (!lru->node[i].memcg_lrus)
- continue;
+ /* Mark the list_lru_one dead */
+ src->nr_items = LONG_MIN;
- memcg_cancel_update_list_lru_node(&lru->node[i],
- old_size, new_size);
- }
- return -ENOMEM;
+ spin_unlock(&dst->lock);
+ spin_unlock_irq(&src->lock);
}
-static void memcg_cancel_update_list_lru(struct list_lru *lru,
- int old_size, int new_size)
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
{
+ struct list_lru *lru;
int i;
- if (!list_lru_memcg_aware(lru))
- return;
+ mutex_lock(&list_lrus_mutex);
+ list_for_each_entry(lru, &memcg_list_lrus, list) {
+ struct list_lru_memcg *mlru;
+ XA_STATE(xas, &lru->xa, memcg->kmemcg_id);
- for_each_node(i)
- memcg_cancel_update_list_lru_node(&lru->node[i],
- old_size, new_size);
-}
+ /*
+ * Lock the Xarray to ensure no on going list_lru_memcg
+ * allocation and further allocation will see css_is_dying().
+ */
+ xas_lock_irq(&xas);
+ mlru = xas_store(&xas, NULL);
+ xas_unlock_irq(&xas);
+ if (!mlru)
+ continue;
-int memcg_update_all_list_lrus(int new_size)
-{
- int ret = 0;
- struct list_lru *lru;
- int old_size = memcg_nr_cache_ids;
+ /*
+ * With Xarray value set to NULL, holding the lru lock below
+ * prevents list_lru_{add,del,isolate} from touching the lru,
+ * safe to reparent.
+ */
+ for_each_node(i)
+ memcg_reparent_list_lru_one(lru, i, &mlru->node[i], parent);
- mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &list_lrus, list) {
- ret = memcg_update_list_lru(lru, old_size, new_size);
- if (ret)
- goto fail;
+ /*
+ * Here all list_lrus corresponding to the cgroup are guaranteed
+ * to remain empty, we can safely free this lru, any further
+ * memcg_list_lru_alloc() call will simply bail out.
+ */
+ kvfree_rcu(mlru, rcu);
}
-out:
mutex_unlock(&list_lrus_mutex);
- return ret;
-fail:
- list_for_each_entry_continue_reverse(lru, &list_lrus, list)
- memcg_cancel_update_list_lru(lru, old_size, new_size);
- goto out;
}
-static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
- int src_idx, struct mem_cgroup *dst_memcg)
+static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
+ struct list_lru *lru)
{
- struct list_lru_node *nlru = &lru->node[nid];
- int dst_idx = dst_memcg->kmemcg_id;
- struct list_lru_one *src, *dst;
- bool set;
-
- /*
- * Since list_lru_{add,del} may be called under an IRQ-safe lock,
- * we have to use IRQ-safe primitives here to avoid deadlock.
- */
- spin_lock_irq(&nlru->lock);
+ int idx = memcg->kmemcg_id;
- src = list_lru_from_memcg_idx(nlru, src_idx);
- dst = list_lru_from_memcg_idx(nlru, dst_idx);
-
- list_splice_init(&src->list, &dst->list);
- set = (!dst->nr_items && src->nr_items);
- dst->nr_items += src->nr_items;
- if (set)
- memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
- src->nr_items = 0;
-
- spin_unlock_irq(&nlru->lock);
+ return idx < 0 || xa_load(&lru->xa, idx);
}
-static void memcg_drain_list_lru(struct list_lru *lru,
- int src_idx, struct mem_cgroup *dst_memcg)
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp)
{
- int i;
-
- if (!list_lru_memcg_aware(lru))
- return;
+ unsigned long flags;
+ struct list_lru_memcg *mlru = NULL;
+ struct mem_cgroup *pos, *parent;
+ XA_STATE(xas, &lru->xa, 0);
- for_each_node(i)
- memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
-}
+ if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
+ return 0;
-void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
-{
- struct list_lru *lru;
+ gfp &= GFP_RECLAIM_MASK;
+ /*
+ * Because the list_lru can be reparented to the parent cgroup's
+ * list_lru, we should make sure that this cgroup and all its
+ * ancestors have allocated list_lru_memcg.
+ */
+ do {
+ /*
+ * Keep finding the farest parent that wasn't populated
+ * until found memcg itself.
+ */
+ pos = memcg;
+ parent = parent_mem_cgroup(pos);
+ while (!memcg_list_lru_allocated(parent, lru)) {
+ pos = parent;
+ parent = parent_mem_cgroup(pos);
+ }
- mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &list_lrus, list)
- memcg_drain_list_lru(lru, src_idx, dst_memcg);
- mutex_unlock(&list_lrus_mutex);
+ if (!mlru) {
+ mlru = memcg_init_list_lru_one(lru, gfp);
+ if (!mlru)
+ return -ENOMEM;
+ }
+ xas_set(&xas, pos->kmemcg_id);
+ do {
+ xas_lock_irqsave(&xas, flags);
+ if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
+ xas_store(&xas, mlru);
+ if (!xas_error(&xas))
+ mlru = NULL;
+ }
+ xas_unlock_irqrestore(&xas, flags);
+ } while (xas_nomem(&xas, gfp));
+ } while (pos != memcg && !css_is_dying(&pos->css));
+
+ if (unlikely(mlru))
+ kfree(mlru);
+
+ return xas_error(&xas);
}
#else
-static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
+static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
- return 0;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
{
}
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG */
-int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key, struct shrinker *shrinker)
+int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct shrinker *shrinker)
{
int i;
- int err = -ENOMEM;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
if (shrinker)
lru->shrinker_id = shrinker->id;
else
lru->shrinker_id = -1;
+
+ if (mem_cgroup_kmem_disabled())
+ memcg_aware = false;
#endif
- memcg_get_cache_ids();
lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
if (!lru->node)
- goto out;
-
- for_each_node(i) {
- spin_lock_init(&lru->node[i].lock);
- if (key)
- lockdep_set_class(&lru->node[i].lock, key);
- init_one_lru(&lru->node[i].lru);
- }
+ return -ENOMEM;
- err = memcg_init_list_lru(lru, memcg_aware);
- if (err) {
- kfree(lru->node);
- /* Do this so a list_lru_destroy() doesn't crash: */
- lru->node = NULL;
- goto out;
- }
+ for_each_node(i)
+ init_one_lru(lru, &lru->node[i].lru);
+ memcg_init_list_lru(lru, memcg_aware);
list_lru_register(lru);
-out:
- memcg_put_cache_ids();
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__list_lru_init);
@@ -633,17 +605,14 @@ void list_lru_destroy(struct list_lru *lru)
if (!lru->node)
return;
- memcg_get_cache_ids();
-
list_lru_unregister(lru);
memcg_destroy_list_lru(lru);
kfree(lru->node);
lru->node = NULL;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
lru->shrinker_id = -1;
#endif
- memcg_put_cache_ids();
}
EXPORT_SYMBOL_GPL(list_lru_destroy);