summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-10-03 14:57:49 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-12-06 11:57:21 +0100
commitb52ef56e9b324b172053b03d8c775ef4708fbc23 (patch)
treefa6c1c55b4feadfdab97bb4a75c49d17ab371cec
parent0bedcc66d2a43a50ab660273842f4737a293dd8a (diff)
mm/slab: move struct kmem_cache_node from slab.h to slub.c
The declaration and associated helpers are not used anywhere else anymore. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--mm/slab.h29
-rw-r--r--mm/slub.c27
2 files changed, 27 insertions, 29 deletions
diff --git a/mm/slab.h b/mm/slab.h
index a81ef7c9282d..5ae6a978e9c2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -588,35 +588,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
return s->size;
}
-
-/*
- * The slab lists for all objects.
- */
-struct kmem_cache_node {
- spinlock_t list_lock;
- unsigned long nr_partial;
- struct list_head partial;
-#ifdef CONFIG_SLUB_DEBUG
- atomic_long_t nr_slabs;
- atomic_long_t total_objects;
- struct list_head full;
-#endif
-};
-
-static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
-{
- return s->node[node];
-}
-
-/*
- * Iterator over all nodes. The body will be executed for each node that has
- * a kmem_cache_node structure allocated (which is true for all online nodes)
- */
-#define for_each_kmem_cache_node(__s, __node, __n) \
- for (__node = 0; __node < nr_node_ids; __node++) \
- if ((__n = get_node(__s, __node)))
-
-
#ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void);
#else
diff --git a/mm/slub.c b/mm/slub.c
index 844e0beb84ee..cc801f8258fe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -397,6 +397,33 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
}
/*
+ * The slab lists for all objects.
+ */
+struct kmem_cache_node {
+ spinlock_t list_lock;
+ unsigned long nr_partial;
+ struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
+ atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
+ struct list_head full;
+#endif
+};
+
+static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
+{
+ return s->node[node];
+}
+
+/*
+ * Iterator over all nodes. The body will be executed for each node that has
+ * a kmem_cache_node structure allocated (which is true for all online nodes)
+ */
+#define for_each_kmem_cache_node(__s, __node, __n) \
+ for (__node = 0; __node < nr_node_ids; __node++) \
+ if ((__n = get_node(__s, __node)))
+
+/*
* Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
* Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
* differ during memory hotplug/hotremove operations.