summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slub.c20
2 files changed, 21 insertions, 0 deletions
diff --git a/mm/slab.h b/mm/slab.h
index e82e51c44bd0..43245d9207b6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -234,6 +234,7 @@ struct kmem_cache_order_objects {
struct kmem_cache {
#ifndef CONFIG_SLUB_TINY
struct kmem_cache_cpu __percpu *cpu_slab;
+ struct lock_class_key lock_key;
#endif
struct slub_percpu_sheaves __percpu *cpu_sheaves;
/* Used for retrieving partial slabs, etc. */
diff --git a/mm/slub.c b/mm/slub.c
index 114ec18d223b..ee575ed9250f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3586,12 +3586,29 @@ static inline void note_cmpxchg_failure(const char *n,
static void init_kmem_cache_cpus(struct kmem_cache *s)
{
+#ifdef CONFIG_PREEMPT_RT
+ /*
+ * Register lockdep key for non-boot kmem caches to avoid
+ * WARN_ON_ONCE(static_obj(key))) in lockdep_register_key()
+ */
+ bool finegrain_lockdep = !init_section_contains(s, 1);
+#else
+ /*
+ * Don't bother with different lockdep classes for each
+ * kmem_cache, since we only use local_trylock_irqsave().
+ */
+ bool finegrain_lockdep = false;
+#endif
int cpu;
struct kmem_cache_cpu *c;
+ if (finegrain_lockdep)
+ lockdep_register_key(&s->lock_key);
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(s->cpu_slab, cpu);
local_lock_init(&c->lock);
+ if (finegrain_lockdep)
+ lockdep_set_class(&c->lock, &s->lock_key);
c->tid = init_tid(cpu);
}
}
@@ -7210,6 +7227,9 @@ void __kmem_cache_release(struct kmem_cache *s)
if (s->cpu_sheaves)
pcs_destroy(s);
#ifndef CONFIG_SLUB_TINY
+#ifdef CONFIG_PREEMPT_RT
+ lockdep_unregister_key(&s->lock_key);
+#endif
free_percpu(s->cpu_slab);
#endif
free_kmem_cache_nodes(s);