diff options
| author | Zhenhua Huang <quic_zhenhuah@quicinc.com> | 2025-04-21 15:52:32 +0800 | 
|---|---|---|
| committer | Vlastimil Babka <vbabka@suse.cz> | 2025-04-24 19:19:40 +0200 | 
| commit | be8250786ca94952a19ce87f98ad9906448bc9ef (patch) | |
| tree | 0e6a4606e70eec3d5b908cffdf4925108789611d | |
| parent | d2f5819b6ed357c0c350c0616b6b9f38be59adf6 (diff) | |
mm, slab: clean up slab->obj_exts always
When memory allocation profiling is disabled at runtime or due to an
error, shutdown_mem_profiling() is called: slab->obj_exts which
previously allocated remains.
It won't be cleared by unaccount_slab() because of
mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process...
..
[...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions")
Cc: stable@vger.kernel.org
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
| -rw-r--r-- | mm/slub.c | 30 | 
1 files changed, 8 insertions, 22 deletions
| diff --git a/mm/slub.c b/mm/slub.c index dc9e729e1d26..be8b09e09d30 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,  	return 0;  } -/* Should be called only if mem_alloc_profiling_enabled() */ -static noinline void free_slab_obj_exts(struct slab *slab) +static inline void free_slab_obj_exts(struct slab *slab)  {  	struct slabobj_ext *obj_exts; @@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab)  	slab->obj_exts = 0;  } -static inline bool need_slab_obj_ext(void) -{ -	if (mem_alloc_profiling_enabled()) -		return true; - -	/* -	 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally -	 * inside memcg_slab_post_alloc_hook. No other users for now. -	 */ -	return false; -} -  #else /* CONFIG_SLAB_OBJ_EXT */  static inline void init_slab_obj_exts(struct slab *slab) @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab)  {  } -static inline bool need_slab_obj_ext(void) -{ -	return false; -} -  #endif /* CONFIG_SLAB_OBJ_EXT */  #ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)  static inline void  alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)  { -	if (need_slab_obj_ext()) +	if (mem_alloc_profiling_enabled())  		__alloc_tagging_slab_alloc_hook(s, object, flags);  } @@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order,  static __always_inline void unaccount_slab(struct slab *slab, int order,  					   struct kmem_cache *s)  { -	if (memcg_kmem_online() || need_slab_obj_ext()) -		free_slab_obj_exts(slab); +	/* +	 * The slab object extensions should now be freed regardless of +	 * whether mem_alloc_profiling_enabled() or not because profiling +	 * might have been disabled after slab->obj_exts got allocated. +	 */ +	free_slab_obj_exts(slab);  	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),  			    -(PAGE_SIZE << order)); | 
