diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2025-06-11 16:59:07 +0100 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2025-06-18 13:06:26 +0200 |
commit | 3df29914d9fd1a28ff0630ad5aa8a92abb97543d (patch) | |
tree | a58494eec310dfd9a98cd37011d4aceae3b368bc | |
parent | c5c44900f4739b14af71875bbd407c81bf576d04 (diff) |
slab: Add SL_pfmemalloc flag
Give slab its own name for this flag. Move the implementation from
slab.h to slub.c since it's only used inside slub.c.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20250611155916.2579160-5-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r-- | mm/slab.h | 24 | ||||
-rw-r--r-- | mm/slub.c | 21 |
2 files changed, 21 insertions, 24 deletions
diff --git a/mm/slab.h b/mm/slab.h index 32785ff3470a..248b34c839b7 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t) */ #define slab_page(s) folio_page(slab_folio(s), 0) -/* - * If network-based swap is enabled, sl*b must keep track of whether pages - * were allocated from pfmemalloc reserves. - */ -static inline bool slab_test_pfmemalloc(const struct slab *slab) -{ - return folio_test_active(slab_folio(slab)); -} - -static inline void slab_set_pfmemalloc(struct slab *slab) -{ - folio_set_active(slab_folio(slab)); -} - -static inline void slab_clear_pfmemalloc(struct slab *slab) -{ - folio_clear_active(slab_folio(slab)); -} - -static inline void __slab_clear_pfmemalloc(struct slab *slab) -{ - __folio_clear_active(slab_folio(slab)); -} - static inline void *slab_address(const struct slab *slab) { return folio_address(slab_folio(slab)); diff --git a/mm/slub.c b/mm/slub.c index 15d92c736af5..d44be423dd50 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -187,6 +187,7 @@ * enum slab_flags - How the slab flags bits are used. * @SL_locked: Is locked with slab_lock() * @SL_partial: On the per-node partial list + * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves * * The slab flags share space with the page flags but some bits have * different interpretations. The high bits are used for information @@ -195,6 +196,7 @@ enum slab_flags { SL_locked = PG_locked, SL_partial = PG_workingset, /* Historical reasons for this bit */ + SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ }; /* @@ -649,6 +651,25 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) #endif /* CONFIG_SLUB_CPU_PARTIAL */ /* + * If network-based swap is enabled, slub must keep track of whether memory + * were allocated from pfmemalloc reserves. + */ +static inline bool slab_test_pfmemalloc(const struct slab *slab) +{ + return test_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void slab_set_pfmemalloc(struct slab *slab) +{ + set_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void __slab_clear_pfmemalloc(struct slab *slab) +{ + __clear_bit(SL_pfmemalloc, &slab->flags); +} + +/* * Per slab locking using the pagelock */ static __always_inline void slab_lock(struct slab *slab) |