summaryrefslogtreecommitdiff
path: root/include/linux/list_lru.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/list_lru.h')
-rw-r--r--include/linux/list_lru.h92
1 files changed, 55 insertions, 37 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 7675a48a0701..fe739d35a864 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -24,12 +24,16 @@ enum lru_status {
LRU_SKIP, /* item cannot be locked, skip */
LRU_RETRY, /* item not freeable. May drop the lock
internally, but has to return locked. */
+ LRU_STOP, /* stop lru list walking. May drop the lock
+ internally, but has to return locked. */
};
struct list_lru_one {
struct list_head list;
/* may become negative during memcg reparenting */
long nr_items;
+ /* protects all fields above */
+ spinlock_t lock;
};
struct list_lru_memcg {
@@ -39,33 +43,41 @@ struct list_lru_memcg {
};
struct list_lru_node {
- /* protects all lists on the node, including per cgroup */
- spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
- long nr_items;
+ atomic_long_t nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct list_head list;
int shrinker_id;
bool memcg_aware;
struct xarray xa;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct lock_class_key *key;
+#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key, struct shrinker *shrinker);
+ struct shrinker *shrinker);
#define list_lru_init(lru) \
- __list_lru_init((lru), false, NULL, NULL)
-#define list_lru_init_key(lru, key) \
- __list_lru_init((lru), false, (key), NULL)
+ __list_lru_init((lru), false, NULL)
#define list_lru_init_memcg(lru, shrinker) \
- __list_lru_init((lru), true, NULL, shrinker)
+ __list_lru_init((lru), true, shrinker)
+
+static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_LOCKDEP
+ lru->key = key;
+#endif
+ return list_lru_init_memcg(lru, shrinker);
+}
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
gfp_t gfp);
@@ -79,13 +91,24 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
- * nothing. Therefore the caller does not need to keep state about whether or
- * not the element already belongs in the list and is allowed to lazy update
- * it. Note however that this is valid for *a* list, not *this* list. If
- * the caller organize itself in a way that elements can be in more than
- * one type of list, it is up to the caller to fully remove the item from
- * the previous list (with list_lru_del() for instance) before moving it
- * to @lru.
+ * nothing. This means that it is not necessary to keep state about whether or
+ * not the element already belongs in the list. That said, this logic only
+ * works if the item is in *this* list. If the item might be in some other
+ * list, then you cannot rely on this check and you must remove it from the
+ * other list before trying to insert it.
+ *
+ * The lru list consists of many sublists internally; the @nid and @memcg
+ * parameters are used to determine which sublist to insert the item into.
+ * It's important to use the right value of @nid and @memcg when deleting the
+ * item, since it might otherwise get deleted from the wrong sublist.
+ *
+ * This also applies when attempting to insert the item multiple times - if
+ * the item is currently in one sublist and you call list_lru_add() again, you
+ * must pass the right @nid and @memcg parameters so that the same sublist is
+ * used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt).
*
* Return: true if the list was updated, false otherwise
*/
@@ -101,7 +124,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
@@ -113,8 +136,19 @@ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
* @memcg: the cgroup of the sublist to delete the item from.
*
* This function works analogously as list_lru_add() in terms of list
- * manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del().
+ * manipulation.
+ *
+ * The comments in list_lru_add() about an element already being in a list are
+ * also valid for list_lru_del(), that is, you can delete an item that has
+ * already been removed or never been added. However, if the item is in a
+ * list, it must be in *this* list, and you must pass the right value of @nid
+ * and @memcg so that the right sublist is used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries
+ * are automatically moved to the parent memcg. This is done in a race-free
+ * way, so during deletion of an memcg both the old and new memcg will resolve
+ * to the same sublist internally.
*
* Return: true if the list was updated, false otherwise
*/
@@ -130,7 +164,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise.
+ * Return: true if the list was updated, false otherwise.
*/
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
@@ -170,25 +204,9 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
-/**
- * list_lru_putback: undo list_lru_isolate
- * @lru: the lru pointer.
- * @item: the item to put back.
- * @nid: the node id of the sublist to put the item back to.
- * @memcg: the cgroup of the sublist to put the item back to.
- *
- * Put back an isolated item into its original LRU. Note that unlike
- * list_lru_add, this does not increment the node LRU count (as
- * list_lru_isolate does not originally decrement this count).
- *
- * Since we might have dropped the LRU lock in between, recompute list_lru_one
- * from the node's id and memcg.
- */
-void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
- struct mem_cgroup *memcg);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
- struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+ struct list_lru_one *list, void *cb_arg);
/**
* list_lru_walk_one: walk a @lru, isolating and disposing freeable items.