summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-11-10 09:48:37 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-11-21 10:35:38 +0100
commit14d3eb66e16a55d279598c8ed7ae1ca85066ff5b (patch)
tree2ef1278ecb927a287285e2a97fce5bf320aa6196 /mm/slab.h
parent4b28ba9eeab4345af43e45e6eb4056eb2f1cb764 (diff)
parentb539ce9f1a31c442098c3f351cb4d03ba27c2720 (diff)
Merge branch 'slab/for-6.2/locking' into slab/for-next
A patch from Jiri Kosina that makes SLAB's list_lock a raw_spinlock_t. While there are no plans to make SLAB actually compatible with PREEMPT_RT or any other future, it makes !PREEMPT_RT lockdep happy.
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 0202a8c2f0d2..19e1baac807c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -750,9 +750,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* The slab lists for all objects.
*/
struct kmem_cache_node {
- spinlock_t list_lock;
-
#ifdef CONFIG_SLAB
+ raw_spinlock_t list_lock;
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
@@ -768,6 +767,7 @@ struct kmem_cache_node {
#endif
#ifdef CONFIG_SLUB
+ spinlock_t list_lock;
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG