summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-10-03 15:27:11 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-12-06 11:57:21 +0100
commitb774d3e326d30fc8ef841101c399e44bdac2aa48 (patch)
tree4b8af66490308105e4609eba7f242029017a63b5 /mm/slab.h
parentb52ef56e9b324b172053b03d8c775ef4708fbc23 (diff)
mm/slab: move kfree() from slab_common.c to slub.c
This should result in better code. Currently kfree() makes a function call between compilation units to __kmem_cache_free() which does its own virt_to_slab(), throwing away the struct slab pointer we already had in kfree(). Now it can be reused. Additionally kfree() can now inline the whole SLUB freeing fastpath. Also move over free_large_kmalloc() as the only callsites are now in slub.c, and make it static. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h4
1 files changed, 0 insertions, 4 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 5ae6a978e9c2..35a55c4a407d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -395,8 +395,6 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller);
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
-
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
@@ -559,8 +557,6 @@ static inline int memcg_alloc_slab_cgroups(struct slab *slab,
}
#endif /* CONFIG_MEMCG_KMEM */
-void free_large_kmalloc(struct folio *folio, void *object);
-
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)