From 0445ee000498ec1a5b1ed31bf35816cbeaef5e1e Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 20 Nov 2023 17:11:10 +0100 Subject: mm/slab, docs: switch mm-api docs generation from slab.c to slub.c The SLAB implementation is going to be removed, and mm-api.rst currently uses mm/slab.c to obtain kerneldocs for some API functions. Switch it to mm/slub.c and move the relevant kerneldocs of exported functions from one to the other. The rest of kerneldocs in slab.c is for static SLAB implementation-specific functions that don't have counterparts in slub.c and thus can be simply removed with the implementation. Acked-by: David Rientjes Tested-by: David Rientjes Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- Documentation/core-api/mm-api.rst | 2 +- mm/slab.c | 21 --------------------- mm/slub.c | 21 +++++++++++++++++++++ 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst index 2d091c873d1e..af8151db88b2 100644 --- a/Documentation/core-api/mm-api.rst +++ b/Documentation/core-api/mm-api.rst @@ -37,7 +37,7 @@ The Slab Cache .. kernel-doc:: include/linux/slab.h :internal: -.. kernel-doc:: mm/slab.c +.. kernel-doc:: mm/slub.c :export: .. kernel-doc:: mm/slab_common.c diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5..37efe3241f9c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3491,19 +3491,6 @@ error: } EXPORT_SYMBOL(kmem_cache_alloc_bulk); -/** - * kmem_cache_alloc_node - Allocate an object on the specified node - * @cachep: The cache to allocate from. - * @flags: See kmalloc(). - * @nodeid: node number of the target node. - * - * Identical to kmem_cache_alloc but it will allocate memory on the given - * node, which can improve the performance for cpu bound structures. - * - * Fallback to other node is possible if __GFP_THISNODE is not set. - * - * Return: pointer to the new object or %NULL in case of error - */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_); @@ -3564,14 +3551,6 @@ void __kmem_cache_free(struct kmem_cache *cachep, void *objp, __do_kmem_cache_free(cachep, objp, caller); } -/** - * kmem_cache_free - Deallocate an object - * @cachep: The cache the allocation was from. - * @objp: The previously allocated object. - * - * Free an object which was previously allocated from this - * cache. - */ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { cachep = cache_from_obj(cachep, objp); diff --git a/mm/slub.c b/mm/slub.c index 63d281dfacdb..3e01731783df 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3518,6 +3518,19 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, caller, orig_size); } +/** + * kmem_cache_alloc_node - Allocate an object on the specified node + * @s: The cache to allocate from. + * @gfpflags: See kmalloc(). + * @node: node number of the target node. + * + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. + * + * Return: pointer to the new object or %NULL in case of error + */ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); @@ -3822,6 +3835,14 @@ void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); } +/** + * kmem_cache_free - Deallocate an object + * @s: The cache the allocation was from. + * @x: The previously allocated object. + * + * Free an object which was previously allocated from this + * cache. + */ void kmem_cache_free(struct kmem_cache *s, void *x) { s = cache_from_obj(s, x); -- cgit