diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2025-06-11 16:59:06 +0100 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2025-06-18 13:06:26 +0200 |
commit | c5c44900f4739b14af71875bbd407c81bf576d04 (patch) | |
tree | c92da3cc3a13b733453638b627b76c777a5a6c5a | |
parent | 30908096dd8d79b66d987782df04d14e1c907c25 (diff) |
slab: Add SL_partial flag
Give slab its own name for this flag. Keep the PG_workingset alias
information in one place.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20250611155916.2579160-4-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r-- | mm/slub.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c index 9353da50b573..15d92c736af5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -91,14 +91,14 @@ * The partially empty slabs cached on the CPU partial list are used * for performance reasons, which speeds up the allocation process. * These slabs are not frozen, but are also exempt from list management, - * by clearing the PG_workingset flag when moving out of the node + * by clearing the SL_partial flag when moving out of the node * partial list. Please see __slab_free() for more details. * * To sum up, the current scheme is: - * - node partial slab: PG_Workingset && !frozen - * - cpu partial slab: !PG_Workingset && !frozen - * - cpu slab: !PG_Workingset && frozen - * - full slab: !PG_Workingset && !frozen + * - node partial slab: SL_partial && !frozen + * - cpu partial slab: !SL_partial && !frozen + * - cpu slab: !SL_partial && frozen + * - full slab: !SL_partial && !frozen * * list_lock * @@ -186,6 +186,7 @@ /** * enum slab_flags - How the slab flags bits are used. * @SL_locked: Is locked with slab_lock() + * @SL_partial: On the per-node partial list * * The slab flags share space with the page flags but some bits have * different interpretations. The high bits are used for information @@ -193,6 +194,7 @@ */ enum slab_flags { SL_locked = PG_locked, + SL_partial = PG_workingset, /* Historical reasons for this bit */ }; /* @@ -2729,23 +2731,19 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab) free_slab(s, slab); } -/* - * SLUB reuses PG_workingset bit to keep track of whether it's on - * the per-node partial list. - */ static inline bool slab_test_node_partial(const struct slab *slab) { - return folio_test_workingset(slab_folio(slab)); + return test_bit(SL_partial, &slab->flags); } static inline void slab_set_node_partial(struct slab *slab) { - set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); + set_bit(SL_partial, &slab->flags); } static inline void slab_clear_node_partial(struct slab *slab) { - clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); + clear_bit(SL_partial, &slab->flags); } /* |