From 71492580571467fb7177aade19c18ce7486267f5 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 9 Jan 2019 23:03:25 -0500 Subject: locking/lockdep: Add debug_locks check in __lock_downgrade() Tetsuo Handa had reported he saw an incorrect "downgrading a read lock" warning right after a previous lockdep warning. It is likely that the previous warning turned off lock debugging causing the lockdep to have inconsistency states leading to the lock downgrade warning. Fix that by add a check for debug_locks at the beginning of __lock_downgrade(). Debugged-by: Tetsuo Handa Reported-by: Tetsuo Handa Reported-by: syzbot+53383ae265fb161ef488@syzkaller.appspotmail.com Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: https://lkml.kernel.org/r/1547093005-26085-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 95932333a48b..e805fe3bf87f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3535,6 +3535,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) unsigned int depth; int i; + if (unlikely(!debug_locks)) + return 0; + depth = curr->lockdep_depth; /* * This function is about (re)setting the class of a held lock, -- cgit From 436a49ae7b693161c4fdf98b575ef16243dc2dfa Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 28 Dec 2018 06:02:00 +0100 Subject: locking/lockdep: Simplify mark_held_locks() The enum mark_type appears a bit artificial here. We can directly pass the base enum lock_usage_bit value to mark_held_locks(). All we need then is to add the read index for each lock if necessary. It makes the code clearer. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: https://lkml.kernel.org/r/1545973321-24422-2-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e805fe3bf87f..1dcd8341e35b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2709,35 +2709,28 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, return 1; } -enum mark_type { -#define LOCKDEP_STATE(__STATE) __STATE, -#include "lockdep_states.h" -#undef LOCKDEP_STATE -}; - /* * Mark all held locks with a usage bit: */ static int -mark_held_locks(struct task_struct *curr, enum mark_type mark) +mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) { - enum lock_usage_bit usage_bit; struct held_lock *hlock; int i; for (i = 0; i < curr->lockdep_depth; i++) { + enum lock_usage_bit hlock_bit = base_bit; hlock = curr->held_locks + i; - usage_bit = 2 + (mark << 2); /* ENABLED */ if (hlock->read) - usage_bit += 1; /* READ */ + hlock_bit += 1; /* READ */ - BUG_ON(usage_bit >= LOCK_USAGE_STATES); + BUG_ON(hlock_bit >= LOCK_USAGE_STATES); if (!hlock->check) continue; - if (!mark_lock(curr, hlock, usage_bit)) + if (!mark_lock(curr, hlock, hlock_bit)) return 0; } @@ -2758,7 +2751,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip) * We are going to turn hardirqs on, so set the * usage bit for all held locks: */ - if (!mark_held_locks(curr, HARDIRQ)) + if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) return; /* * If we have softirqs enabled, then set the usage @@ -2766,7 +2759,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip) * this bit from being set before) */ if (curr->softirqs_enabled) - if (!mark_held_locks(curr, SOFTIRQ)) + if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ)) return; curr->hardirq_enable_ip = ip; @@ -2880,7 +2873,7 @@ void trace_softirqs_on(unsigned long ip) * enabled too: */ if (curr->hardirqs_enabled) - mark_held_locks(curr, SOFTIRQ); + mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); current->lockdep_recursion = 0; } -- cgit From bba2a8f1f974a45ca6ceaf688b2be7bc1c418a2f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 28 Dec 2018 06:02:01 +0100 Subject: locking/lockdep: Provide enum lock_usage_bit mask names It makes the code more self-explanatory and tells throughout the code what magic number refers to: - state (Hardirq/Softirq) - direction (used in or enabled above state) - read or write We can even remove some comments that were compensating for the lack of those constant names. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: https://lkml.kernel.org/r/1545973321-24422-3-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 33 +++++++++++---------------------- kernel/locking/lockdep_internals.h | 4 ++++ 2 files changed, 15 insertions(+), 22 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1dcd8341e35b..608f74ed8bb9 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1624,29 +1624,18 @@ static const char *state_rnames[] = { static inline const char *state_name(enum lock_usage_bit bit) { - return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; + return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2]; } static int exclusive_bit(int new_bit) { - /* - * USED_IN - * USED_IN_READ - * ENABLED - * ENABLED_READ - * - * bit 0 - write/read - * bit 1 - used_in/enabled - * bit 2+ state - */ - - int state = new_bit & ~3; - int dir = new_bit & 2; + int state = new_bit & LOCK_USAGE_STATE_MASK; + int dir = new_bit & LOCK_USAGE_DIR_MASK; /* * keep state, bit flip the direction and strip read. */ - return state | (dir ^ 2); + return state | (dir ^ LOCK_USAGE_DIR_MASK); } static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, @@ -2662,8 +2651,8 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { int excl_bit = exclusive_bit(new_bit); - int read = new_bit & 1; - int dir = new_bit & 2; + int read = new_bit & LOCK_USAGE_READ_MASK; + int dir = new_bit & LOCK_USAGE_DIR_MASK; /* * mark USED_IN has to look forwards -- to ensure no dependency @@ -2687,19 +2676,19 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, * states. */ if ((!read || !dir || STRICT_READ_CHECKS) && - !usage(curr, this, excl_bit, state_name(new_bit & ~1))) + !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK))) return 0; /* * Check for read in write conflicts */ if (!read) { - if (!valid_state(curr, this, new_bit, excl_bit + 1)) + if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK)) return 0; if (STRICT_READ_CHECKS && - !usage(curr, this, excl_bit + 1, - state_name(new_bit + 1))) + !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK, + state_name(new_bit + LOCK_USAGE_READ_MASK))) return 0; } @@ -2723,7 +2712,7 @@ mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) hlock = curr->held_locks + i; if (hlock->read) - hlock_bit += 1; /* READ */ + hlock_bit += LOCK_USAGE_READ_MASK; BUG_ON(hlock_bit >= LOCK_USAGE_STATES); diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 88c847a41c8a..2ebb9d0ea91c 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -22,6 +22,10 @@ enum lock_usage_bit { LOCK_USAGE_STATES }; +#define LOCK_USAGE_READ_MASK 1 +#define LOCK_USAGE_DIR_MASK 2 +#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) + /* * Usage-state bitmasks: */ -- cgit From 513e1073d52e55b8024b4f238a48de7587c64ccf Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 9 Jan 2019 23:03:25 -0500 Subject: locking/lockdep: Add debug_locks check in __lock_downgrade() Tetsuo Handa had reported he saw an incorrect "downgrading a read lock" warning right after a previous lockdep warning. It is likely that the previous warning turned off lock debugging causing the lockdep to have inconsistency states leading to the lock downgrade warning. Fix that by add a check for debug_locks at the beginning of __lock_downgrade(). Reported-by: Tetsuo Handa Reported-by: syzbot+53383ae265fb161ef488@syzkaller.appspotmail.com Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: https://lkml.kernel.org/r/1547093005-26085-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 608f74ed8bb9..7f7db23fc002 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3479,6 +3479,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name, unsigned int depth; int i; + if (unlikely(!debug_locks)) + return 0; + depth = curr->lockdep_depth; /* * This function is about (re)setting the class of a held lock, -- cgit From 07879c6a3740fbbf3c8891a0ab484c20a12794d8 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 18 Dec 2018 11:53:52 -0800 Subject: sched/wake_q: Reduce reference counting for special users Some users, specifically futexes and rwsems, required fixes that allowed the callers to be safe when wakeups occur before they are expected by wake_up_q(). Such scenarios also play games and rely on reference counting, and until now were pivoting on wake_q doing it. With the wake_q_add() call being moved down, this can no longer be the case. As such we end up with a a double task refcounting overhead; and these callers care enough about this (being rather core-ish). This patch introduces a wake_q_add_safe() call that serves for callers that have already done refcounting and therefore the task is 'safe' from wake_q point of view (int that it requires reference throughout the entire queue/>wakeup cycle). In the one case it has internal reference counting, in the other case it consumes the reference counting. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: Xie Yongji Cc: Yongji Xie Cc: andrea.parri@amarulasolutions.com Cc: lilin24@baidu.com Cc: liuqi16@baidu.com Cc: nixun@baidu.com Cc: yuanlinsi01@baidu.com Cc: zhangyu31@baidu.com Link: https://lkml.kernel.org/r/20181218195352.7orq3upiwfdbrdne@linux-r8p5 Signed-off-by: Ingo Molnar --- kernel/locking/rwsem-xadd.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 50d9af615dc4..fbe96341beee 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * Ensure issuing the wakeup (either by us or someone else) * after setting the reader waiter to nil. */ - wake_q_add(wake_q, tsk); - /* wake_q_add() already take the task ref */ - put_task_struct(tsk); + wake_q_add_safe(wake_q, tsk); } adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; -- cgit From d682b596d99345ef0000e7017db714ba7f29e017 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 29 Jan 2019 22:53:45 +0100 Subject: locking/qspinlock: Handle > 4 slowpath nesting levels Four queue nodes per CPU are allocated to enable up to 4 nesting levels using the per-CPU nodes. Nested NMIs are possible in some architectures. Still it is very unlikely that we will ever hit more than 4 nested levels with contention in the slowpath. When that rare condition happens, however, it is likely that the system will hang or crash shortly after that. It is not good and we need to handle this exception case. This is done by spinning directly on the lock using repeated trylock. This alternative code path should only be used when there is nested NMIs. Assuming that the locks used by those NMI handlers will not be heavily contended, a simple TAS locking should work out. Suggested-by: Peter Zijlstra Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Andrew Morton Cc: Borislav Petkov Cc: H. Peter Anvin Cc: James Morse Cc: Linus Torvalds Cc: Paul E. McKenney Cc: SRINIVAS Cc: Thomas Gleixner Cc: Zhenzhong Duan Link: https://lkml.kernel.org/r/1548798828-16156-2-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 8a8c3c208c5e..0875053c4050 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -412,6 +412,21 @@ pv_queue: idx = node->count++; tail = encode_tail(smp_processor_id(), idx); + /* + * 4 nodes are allocated based on the assumption that there will + * not be nested NMIs taking spinlocks. That may not be true in + * some architectures even though the chance of needing more than + * 4 nodes will still be extremely unlikely. When that happens, + * we fall back to spinning on the lock directly without using + * any MCS node. This is not the most elegant solution, but is + * simple enough. + */ + if (unlikely(idx >= MAX_NODES)) { + while (!queued_spin_trylock(lock)) + cpu_relax(); + goto release; + } + node = grab_mcs_node(node, idx); /* -- cgit From 412f34a82ccf7dd52f6b197f6450a33f03342523 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 29 Jan 2019 22:53:46 +0100 Subject: locking/qspinlock_stat: Track the no MCS node available case Track the number of slowpath locking operations that are being done without any MCS node available as well renaming lock_index[123] to make them more descriptive. Using these stat counters is one way to find out if a code path is being exercised. Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Borislav Petkov Cc: H. Peter Anvin Cc: James Morse Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: SRINIVAS Cc: Thomas Gleixner Cc: Will Deacon Cc: Zhenzhong Duan Link: https://lkml.kernel.org/r/1548798828-16156-3-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 3 ++- kernel/locking/qspinlock_stat.h | 21 +++++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 0875053c4050..21ee51b47961 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -422,6 +422,7 @@ pv_queue: * simple enough. */ if (unlikely(idx >= MAX_NODES)) { + qstat_inc(qstat_lock_no_node, true); while (!queued_spin_trylock(lock)) cpu_relax(); goto release; @@ -432,7 +433,7 @@ pv_queue: /* * Keep counts of non-zero index values: */ - qstat_inc(qstat_lock_idx1 + idx - 1, idx); + qstat_inc(qstat_lock_use_node2 + idx - 1, idx); /* * Ensure that we increment the head node->count before initialising diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 42d3d8dc8f49..d73f85388d5c 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -30,6 +30,13 @@ * pv_wait_node - # of vCPU wait's at a non-head queue node * lock_pending - # of locking operations via pending code * lock_slowpath - # of locking operations via MCS lock queue + * lock_use_node2 - # of locking operations that use 2nd per-CPU node + * lock_use_node3 - # of locking operations that use 3rd per-CPU node + * lock_use_node4 - # of locking operations that use 4th per-CPU node + * lock_no_node - # of locking operations without using per-CPU node + * + * Subtracting lock_use_node[234] from lock_slowpath will give you + * lock_use_node1. * * Writing to the "reset_counters" file will reset all the above counter * values. @@ -55,9 +62,10 @@ enum qlock_stats { qstat_pv_wait_node, qstat_lock_pending, qstat_lock_slowpath, - qstat_lock_idx1, - qstat_lock_idx2, - qstat_lock_idx3, + qstat_lock_use_node2, + qstat_lock_use_node3, + qstat_lock_use_node4, + qstat_lock_no_node, qstat_num, /* Total number of statistical counters */ qstat_reset_cnts = qstat_num, }; @@ -85,9 +93,10 @@ static const char * const qstat_names[qstat_num + 1] = { [qstat_pv_wait_node] = "pv_wait_node", [qstat_lock_pending] = "lock_pending", [qstat_lock_slowpath] = "lock_slowpath", - [qstat_lock_idx1] = "lock_index1", - [qstat_lock_idx2] = "lock_index2", - [qstat_lock_idx3] = "lock_index3", + [qstat_lock_use_node2] = "lock_use_node2", + [qstat_lock_use_node3] = "lock_use_node3", + [qstat_lock_use_node4] = "lock_use_node4", + [qstat_lock_no_node] = "lock_no_node", [qstat_reset_cnts] = "reset_counters", }; -- cgit From 733000c7ffd9d9c8c4fdfd82f0d41956c8cf0537 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sun, 24 Feb 2019 20:14:13 -0500 Subject: locking/qspinlock: Remove unnecessary BUG_ON() call With the > 4 nesting levels case handled by the commit: d682b596d993 ("locking/qspinlock: Handle > 4 slowpath nesting levels") the BUG_ON() call in encode_tail() will never actually be triggered. Remove it. Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/1551057253-3231-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 21ee51b47961..5e9247dc2515 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -124,9 +124,6 @@ static inline __pure u32 encode_tail(int cpu, int idx) { u32 tail; -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(idx > 3); -#endif tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ -- cgit From 09d75ecb122d8b600d76e3b8d53a10ffbe3bcec2 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:36 -0800 Subject: locking/lockdep: Fix two 32-bit compiler warnings Use %zu to format size_t instead of %lu to avoid that the compiler complains about a mismatch between format specifier and argument on 32-bit systems. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-2-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7f7db23fc002..5c5283bf499c 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4266,7 +4266,7 @@ void __init lockdep_init(void) printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); - printk(" memory used by lock dependency info: %lu kB\n", + printk(" memory used by lock dependency info: %zu kB\n", (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + sizeof(struct list_head) * CLASSHASH_SIZE + sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + @@ -4278,7 +4278,7 @@ void __init lockdep_init(void) ) / 1024 ); - printk(" per task-struct memory footprint: %lu bytes\n", + printk(" per task-struct memory footprint: %zu bytes\n", sizeof(struct held_lock) * MAX_LOCK_DEPTH); } -- cgit From 7ff8517e1034f26dde03d6df4026f085480408f0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:37 -0800 Subject: locking/lockdep: Fix reported required memory size (1/2) Change the sizeof(array element time) * (array size) expressions into sizeof(array). This fixes the size computations of the classhash_table[] and chainhash_table[] arrays. The reason is that commit: a63f38cc4ccf ("locking/lockdep: Convert hash tables to hlists") changed the type of the elements of that array from 'struct list_head' into 'struct hlist_head'. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-3-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 5c5283bf499c..57a523f0273c 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4267,19 +4267,19 @@ void __init lockdep_init(void) printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); printk(" memory used by lock dependency info: %zu kB\n", - (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + - sizeof(struct list_head) * CLASSHASH_SIZE + - sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + - sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + - sizeof(struct list_head) * CHAINHASH_SIZE + (sizeof(lock_classes) + + sizeof(classhash_table) + + sizeof(list_entries) + + sizeof(lock_chains) + + sizeof(chainhash_table) #ifdef CONFIG_PROVE_LOCKING - + sizeof(struct circular_queue) + + sizeof(lock_cq) #endif ) / 1024 ); printk(" per task-struct memory footprint: %zu bytes\n", - sizeof(struct held_lock) * MAX_LOCK_DEPTH); + sizeof(((struct task_struct *)NULL)->held_locks)); } static void -- cgit From 15ea86b58c71d05e0921bebcf707aa30e43e9e25 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:38 -0800 Subject: locking/lockdep: Fix reported required memory size (2/2) Lock chains are only tracked with CONFIG_PROVE_LOCKING=y. Do not report the memory required for the lock chain array if CONFIG_PROVE_LOCKING=n. See also commit: ca58abcb4a6d ("lockdep: sanitise CONFIG_PROVE_LOCKING") Include the size of the chain_hlocks[] array. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-4-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 57a523f0273c..ec6f6aff4d8d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4270,10 +4270,11 @@ void __init lockdep_init(void) (sizeof(lock_classes) + sizeof(classhash_table) + sizeof(list_entries) + - sizeof(lock_chains) + sizeof(chainhash_table) #ifdef CONFIG_PROVE_LOCKING + sizeof(lock_cq) + + sizeof(lock_chains) + + sizeof(chain_hlocks) #endif ) / 1024 ); -- cgit From 523b113bace5e64e860d8c61d7aa25057d274753 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:39 -0800 Subject: locking/lockdep: Avoid that add_chain_cache() adds an invalid chain to the cache Make sure that add_chain_cache() returns 0 and does not modify the chain hash if nr_chain_hlocks == MAX_LOCKDEP_CHAIN_HLOCKS before this function is called. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-5-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ec6f6aff4d8d..21d84510e28f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2195,16 +2195,8 @@ static inline int add_chain_cache(struct task_struct *curr, chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; - } - - if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) nr_chain_hlocks += chain->depth; - -#ifdef CONFIG_DEBUG_LOCKDEP - /* - * Important for check_no_collision(). - */ - if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { + } else { if (!debug_locks_off_graph_unlock()) return 0; @@ -2212,7 +2204,6 @@ static inline int add_chain_cache(struct task_struct *curr, dump_stack(); return 0; } -#endif hlist_add_head_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); -- cgit From 86cffb80a525f7b8f969c8c79669d383e02f17d1 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:41 -0800 Subject: locking/lockdep: Make zap_class() remove all matching lock order entries Make sure that all lock order entries that refer to a class are removed from the list_entries[] array when a kernel module is unloaded. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-7-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 21d84510e28f..28fbeb2a10cc 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -859,7 +859,8 @@ static struct lock_list *alloc_list_entry(void) /* * Add a new dependency to the head of the list: */ -static int add_lock_to_list(struct lock_class *this, struct list_head *head, +static int add_lock_to_list(struct lock_class *this, + struct lock_class *links_to, struct list_head *head, unsigned long ip, int distance, struct stack_trace *trace) { @@ -873,6 +874,7 @@ static int add_lock_to_list(struct lock_class *this, struct list_head *head, return 0; entry->class = this; + entry->links_to = links_to; entry->distance = distance; entry->trace = *trace; /* @@ -1907,14 +1909,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * Ok, all validations passed, add the new lock * to the previous lock's dependency list: */ - ret = add_lock_to_list(hlock_class(next), + ret = add_lock_to_list(hlock_class(next), hlock_class(prev), &hlock_class(prev)->locks_after, next->acquire_ip, distance, trace); if (!ret) return 0; - ret = add_lock_to_list(hlock_class(prev), + ret = add_lock_to_list(hlock_class(prev), hlock_class(next), &hlock_class(next)->locks_before, next->acquire_ip, distance, trace); if (!ret) @@ -4107,15 +4109,20 @@ void lockdep_reset(void) */ static void zap_class(struct lock_class *class) { + struct lock_list *entry; int i; /* * Remove all dependencies this lock is * involved in: */ - for (i = 0; i < nr_list_entries; i++) { - if (list_entries[i].class == class) - list_del_rcu(&list_entries[i].entry); + for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) { + if (entry->class != class && entry->links_to != class) + continue; + list_del_rcu(&entry->entry); + /* Clear .class and .links_to to avoid double removal. */ + WRITE_ONCE(entry->class, NULL); + WRITE_ONCE(entry->links_to, NULL); } /* * Unhash the class and remove it from the all_lock_classes list: -- cgit From feb0a3865ed2f7d66a1f2686f7ad784422c249ad Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:42 -0800 Subject: locking/lockdep: Initialize the locks_before and locks_after lists earlier This patch does not change any functionality. A later patch will reuse lock classes that have been freed. In combination with that patch this patch wil have the effect of initializing lock class order lists once instead of every time a lock class structure is reinitialized. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-8-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 28fbeb2a10cc..d1a6daf1f51f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -735,6 +735,25 @@ static bool assign_lock_key(struct lockdep_map *lock) return true; } +/* + * Initialize the lock_classes[] array elements. + */ +static void init_data_structures_once(void) +{ + static bool initialization_happened; + int i; + + if (likely(initialization_happened)) + return; + + initialization_happened = true; + + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { + INIT_LIST_HEAD(&lock_classes[i].locks_after); + INIT_LIST_HEAD(&lock_classes[i].locks_before); + } +} + /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object @@ -775,6 +794,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) goto out_unlock_set; } + init_data_structures_once(); + /* * Allocate a new key from the static array, and add it to * the hash: @@ -793,8 +814,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) class->key = key; class->name = lock->name; class->subclass = subclass; - INIT_LIST_HEAD(&class->locks_before); - INIT_LIST_HEAD(&class->locks_after); + WARN_ON_ONCE(!list_empty(&class->locks_before)); + WARN_ON_ONCE(!list_empty(&class->locks_after)); class->name_version = count_matching_names(class); /* * We use RCU's safe list-add method to make @@ -4155,6 +4176,8 @@ void lockdep_free_key_range(void *start, unsigned long size) int i; int locked; + init_data_structures_once(); + raw_local_irq_save(flags); locked = graph_lock(); @@ -4218,6 +4241,8 @@ void lockdep_reset_lock(struct lockdep_map *lock) unsigned long flags; int j, locked; + init_data_structures_once(); + raw_local_irq_save(flags); locked = graph_lock(); -- cgit From 956f3563a8387beb7758f2e8ee483639ef91afc6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:43 -0800 Subject: locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock() This patch does not change the behavior of these functions but makes the patch that frees unused lock classes easier to read. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-9-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 72 ++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index d1a6daf1f51f..2d4c21a02546 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4160,6 +4160,24 @@ static inline int within(const void *addr, void *start, unsigned long size) return addr >= start && addr < start + size; } +static void __lockdep_free_key_range(void *start, unsigned long size) +{ + struct lock_class *class; + struct hlist_head *head; + int i; + + /* Unhash all classes that were created by a module. */ + for (i = 0; i < CLASSHASH_SIZE; i++) { + head = classhash_table + i; + hlist_for_each_entry_rcu(class, head, hash_entry) { + if (!within(class->key, start, size) && + !within(class->name, start, size)) + continue; + zap_class(class); + } + } +} + /* * Used in module.c to remove lock classes from memory that is going to be * freed; and possibly re-used by other modules. @@ -4170,30 +4188,14 @@ static inline int within(const void *addr, void *start, unsigned long size) */ void lockdep_free_key_range(void *start, unsigned long size) { - struct lock_class *class; - struct hlist_head *head; unsigned long flags; - int i; int locked; init_data_structures_once(); raw_local_irq_save(flags); locked = graph_lock(); - - /* - * Unhash all classes that were created by this module: - */ - for (i = 0; i < CLASSHASH_SIZE; i++) { - head = classhash_table + i; - hlist_for_each_entry_rcu(class, head, hash_entry) { - if (within(class->key, start, size)) - zap_class(class); - else if (within(class->name, start, size)) - zap_class(class); - } - } - + __lockdep_free_key_range(start, size); if (locked) graph_unlock(); raw_local_irq_restore(flags); @@ -4235,16 +4237,11 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock) return false; } -void lockdep_reset_lock(struct lockdep_map *lock) +/* The caller must hold the graph lock. Does not sleep. */ +static void __lockdep_reset_lock(struct lockdep_map *lock) { struct lock_class *class; - unsigned long flags; - int j, locked; - - init_data_structures_once(); - - raw_local_irq_save(flags); - locked = graph_lock(); + int j; /* * Remove all classes this lock might have: @@ -4261,19 +4258,22 @@ void lockdep_reset_lock(struct lockdep_map *lock) * Debug check: in the end all mapped classes should * be gone. */ - if (unlikely(lock_class_cache_is_registered(lock))) { - if (debug_locks_off_graph_unlock()) { - /* - * We all just reset everything, how did it match? - */ - WARN_ON(1); - } - goto out_restore; - } + if (WARN_ON_ONCE(lock_class_cache_is_registered(lock))) + debug_locks_off(); +} + +void lockdep_reset_lock(struct lockdep_map *lock) +{ + unsigned long flags; + int locked; + + init_data_structures_once(); + + raw_local_irq_save(flags); + locked = graph_lock(); + __lockdep_reset_lock(lock); if (locked) graph_unlock(); - -out_restore: raw_local_irq_restore(flags); } -- cgit From cdc84d794947b5431c0a6916c303aee7114819d2 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:44 -0800 Subject: locking/lockdep: Make it easy to detect whether or not inside a selftest The patch that frees unused lock classes will modify the behavior of lockdep_free_key_range() and lockdep_reset_lock() depending on whether or not these functions are called from the context of the lockdep selftests. Hence make it easy to detect whether or not lockdep code is called from the context of a lockdep selftest. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-10-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2d4c21a02546..34cd87c65f5d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -81,6 +81,7 @@ module_param(lock_stat, int, 0644); * code to recurse back into the lockdep code... */ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; +static struct task_struct *lockdep_selftest_task_struct; static int graph_lock(void) { @@ -331,6 +332,11 @@ void lockdep_on(void) } EXPORT_SYMBOL(lockdep_on); +void lockdep_set_selftest_task(struct task_struct *task) +{ + lockdep_selftest_task_struct = task; +} + /* * Debugging switches: */ -- cgit From 29fc33fb7283970701355dc89badba4ed21c7092 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:45 -0800 Subject: locking/lockdep: Update two outdated comments synchronize_sched() has been removed recently. Update the comments that refer to synchronize_sched(). Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Fixes: 51959d85f32d ("lockdep: Replace synchronize_sched() with synchronize_rcu()") # v5.0-rc1 Link: https://lkml.kernel.org/r/20190214230058.196511-11-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 34cd87c65f5d..c7ca3a4def7e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4188,9 +4188,9 @@ static void __lockdep_free_key_range(void *start, unsigned long size) * Used in module.c to remove lock classes from memory that is going to be * freed; and possibly re-used by other modules. * - * We will have had one sync_sched() before getting here, so we're guaranteed - * nobody will look up these exact classes -- they're properly dead but still - * allocated. + * We will have had one synchronize_rcu() before getting here, so we're + * guaranteed nobody will look up these exact classes -- they're properly dead + * but still allocated. */ void lockdep_free_key_range(void *start, unsigned long size) { @@ -4209,8 +4209,6 @@ void lockdep_free_key_range(void *start, unsigned long size) /* * Wait for any possible iterators from look_up_lock_class() to pass * before continuing to free the memory they refer to. - * - * sync_sched() is sufficient because the read-side is IRQ disable. */ synchronize_rcu(); -- cgit From a0b0fd53e1e67639b303b15939b9c653dbe7a8c4 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:46 -0800 Subject: locking/lockdep: Free lock classes that are no longer in use Instead of leaving lock classes that are no longer in use in the lock_classes array, reuse entries from that array that are no longer in use. Maintain a linked list of free lock classes with list head 'free_lock_class'. Only add freed lock classes to the free_lock_classes list after a grace period to avoid that a lock_classes[] element would be reused while an RCU reader is accessing it. Since the lockdep selftests run in a context where sleeping is not allowed and since the selftests require that lock resetting/zapping works with debug_locks off, make the behavior of lockdep_free_key_range() and lockdep_reset_lock() depend on whether or not these are called from the context of the lockdep selftests. Thanks to Peter for having shown how to modify get_pending_free() such that that function does not have to sleep. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-12-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 396 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 348 insertions(+), 48 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c7ca3a4def7e..8ecf355dd163 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -50,6 +50,7 @@ #include #include #include +#include #include @@ -135,8 +136,8 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; /* * All data structures here are protected by the global debug_lock. * - * Mutex key structs only get allocated, once during bootup, and never - * get freed - this significantly simplifies the debugging code. + * nr_lock_classes is the number of elements of lock_classes[] that is + * in use. */ unsigned long nr_lock_classes; #ifndef CONFIG_DEBUG_LOCKDEP @@ -278,11 +279,39 @@ static inline void lock_release_holdtime(struct held_lock *hlock) #endif /* - * We keep a global list of all lock classes. The list only grows, - * never shrinks. The list is only accessed with the lockdep - * spinlock lock held. + * We keep a global list of all lock classes. The list is only accessed with + * the lockdep spinlock lock held. free_lock_classes is a list with free + * elements. These elements are linked together by the lock_entry member in + * struct lock_class. */ LIST_HEAD(all_lock_classes); +static LIST_HEAD(free_lock_classes); + +/** + * struct pending_free - information about data structures about to be freed + * @zapped: Head of a list with struct lock_class elements. + */ +struct pending_free { + struct list_head zapped; +}; + +/** + * struct delayed_free - data structures used for delayed freeing + * + * A data structure for delayed freeing of data structures that may be + * accessed by RCU readers at the time these were freed. + * + * @rcu_head: Used to schedule an RCU callback for freeing data structures. + * @index: Index of @pf to which freed data structures are added. + * @scheduled: Whether or not an RCU callback has been scheduled. + * @pf: Array with information about data structures about to be freed. + */ +static struct delayed_free { + struct rcu_head rcu_head; + int index; + int scheduled; + struct pending_free pf[2]; +} delayed_free; /* * The lockdep classes are in a hash-table as well, for fast lookup: @@ -742,7 +771,8 @@ static bool assign_lock_key(struct lockdep_map *lock) } /* - * Initialize the lock_classes[] array elements. + * Initialize the lock_classes[] array elements, the free_lock_classes list + * and also the delayed_free structure. */ static void init_data_structures_once(void) { @@ -754,7 +784,12 @@ static void init_data_structures_once(void) initialization_happened = true; + init_rcu_head(&delayed_free.rcu_head); + INIT_LIST_HEAD(&delayed_free.pf[0].zapped); + INIT_LIST_HEAD(&delayed_free.pf[1].zapped); + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { + list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes); INIT_LIST_HEAD(&lock_classes[i].locks_after); INIT_LIST_HEAD(&lock_classes[i].locks_before); } @@ -802,11 +837,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) init_data_structures_once(); - /* - * Allocate a new key from the static array, and add it to - * the hash: - */ - if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { + /* Allocate a new lock class and add it to the hash. */ + class = list_first_entry_or_null(&free_lock_classes, typeof(*class), + lock_entry); + if (!class) { if (!debug_locks_off_graph_unlock()) { return NULL; } @@ -815,7 +849,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) dump_stack(); return NULL; } - class = lock_classes + nr_lock_classes++; + nr_lock_classes++; debug_atomic_inc(nr_unused_locks); class->key = key; class->name = lock->name; @@ -829,9 +863,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) */ hlist_add_head_rcu(&class->hash_entry, hash_head); /* - * Add it to the global list of classes: + * Remove the class from the free list and add it to the global list + * of classes. */ - list_add_tail(&class->lock_entry, &all_lock_classes); + list_move_tail(&class->lock_entry, &all_lock_classes); if (verbose(class)) { graph_unlock(); @@ -1860,6 +1895,24 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, struct lock_list this; int ret; + if (!hlock_class(prev)->key || !hlock_class(next)->key) { + /* + * The warning statements below may trigger a use-after-free + * of the class name. It is better to trigger a use-after free + * and to have the class name most of the time instead of not + * having the class name available. + */ + WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, + "Detected use-after-free of lock class %px/%s\n", + hlock_class(prev), + hlock_class(prev)->name); + WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, + "Detected use-after-free of lock class %px/%s\n", + hlock_class(next), + hlock_class(next)->name); + return 2; + } + /* * Prove that the new -> dependency would not * create a circular dependency in the graph. (We do this by @@ -2242,19 +2295,16 @@ static inline int add_chain_cache(struct task_struct *curr, } /* - * Look up a dependency chain. + * Look up a dependency chain. Must be called with either the graph lock or + * the RCU read lock held. */ static inline struct lock_chain *lookup_chain_cache(u64 chain_key) { struct hlist_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; - /* - * We can walk it lock-free, because entries only get added - * to the hash: - */ hlist_for_each_entry_rcu(chain, hash_head, entry) { - if (chain->chain_key == chain_key) { + if (READ_ONCE(chain->chain_key) == chain_key) { debug_atomic_inc(chain_lookup_hits); return chain; } @@ -3337,6 +3387,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (nest_lock && !__lock_is_held(nest_lock, -1)) return print_lock_nested_lock_not_held(curr, hlock, ip); + if (!debug_locks_silent) { + WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); + WARN_ON_ONCE(!hlock_class(hlock)->key); + } + if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) return 0; @@ -4131,14 +4186,92 @@ void lockdep_reset(void) raw_local_irq_restore(flags); } +/* Remove a class from a lock chain. Must be called with the graph lock held. */ +static void remove_class_from_lock_chain(struct lock_chain *chain, + struct lock_class *class) +{ +#ifdef CONFIG_PROVE_LOCKING + struct lock_chain *new_chain; + u64 chain_key; + int i; + + for (i = chain->base; i < chain->base + chain->depth; i++) { + if (chain_hlocks[i] != class - lock_classes) + continue; + /* The code below leaks one chain_hlock[] entry. */ + if (--chain->depth > 0) + memmove(&chain_hlocks[i], &chain_hlocks[i + 1], + (chain->base + chain->depth - i) * + sizeof(chain_hlocks[0])); + /* + * Each lock class occurs at most once in a lock chain so once + * we found a match we can break out of this loop. + */ + goto recalc; + } + /* Since the chain has not been modified, return. */ + return; + +recalc: + chain_key = 0; + for (i = chain->base; i < chain->base + chain->depth; i++) + chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1); + if (chain->depth && chain->chain_key == chain_key) + return; + /* Overwrite the chain key for concurrent RCU readers. */ + WRITE_ONCE(chain->chain_key, chain_key); + /* + * Note: calling hlist_del_rcu() from inside a + * hlist_for_each_entry_rcu() loop is safe. + */ + hlist_del_rcu(&chain->entry); + if (chain->depth == 0) + return; + /* + * If the modified lock chain matches an existing lock chain, drop + * the modified lock chain. + */ + if (lookup_chain_cache(chain_key)) + return; + if (WARN_ON_ONCE(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { + debug_locks_off(); + return; + } + /* + * Leak *chain because it is not safe to reinsert it before an RCU + * grace period has expired. + */ + new_chain = lock_chains + nr_lock_chains++; + *new_chain = *chain; + hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); +#endif +} + +/* Must be called with the graph lock held. */ +static void remove_class_from_lock_chains(struct lock_class *class) +{ + struct lock_chain *chain; + struct hlist_head *head; + int i; + + for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { + head = chainhash_table + i; + hlist_for_each_entry_rcu(chain, head, entry) { + remove_class_from_lock_chain(chain, class); + } + } +} + /* * Remove all references to a lock class. The caller must hold the graph lock. */ -static void zap_class(struct lock_class *class) +static void zap_class(struct pending_free *pf, struct lock_class *class) { struct lock_list *entry; int i; + WARN_ON_ONCE(!class->key); + /* * Remove all dependencies this lock is * involved in: @@ -4151,14 +4284,33 @@ static void zap_class(struct lock_class *class) WRITE_ONCE(entry->class, NULL); WRITE_ONCE(entry->links_to, NULL); } - /* - * Unhash the class and remove it from the all_lock_classes list: - */ - hlist_del_rcu(&class->hash_entry); - list_del(&class->lock_entry); + if (list_empty(&class->locks_after) && + list_empty(&class->locks_before)) { + list_move_tail(&class->lock_entry, &pf->zapped); + hlist_del_rcu(&class->hash_entry); + WRITE_ONCE(class->key, NULL); + WRITE_ONCE(class->name, NULL); + nr_lock_classes--; + } else { + WARN_ONCE(true, "%s() failed for class %s\n", __func__, + class->name); + } - RCU_INIT_POINTER(class->key, NULL); - RCU_INIT_POINTER(class->name, NULL); + remove_class_from_lock_chains(class); +} + +static void reinit_class(struct lock_class *class) +{ + void *const p = class; + const unsigned int offset = offsetof(struct lock_class, key); + + WARN_ON_ONCE(!class->lock_entry.next); + WARN_ON_ONCE(!list_empty(&class->locks_after)); + WARN_ON_ONCE(!list_empty(&class->locks_before)); + memset(p + offset, 0, sizeof(*class) - offset); + WARN_ON_ONCE(!class->lock_entry.next); + WARN_ON_ONCE(!list_empty(&class->locks_after)); + WARN_ON_ONCE(!list_empty(&class->locks_before)); } static inline int within(const void *addr, void *start, unsigned long size) @@ -4166,7 +4318,87 @@ static inline int within(const void *addr, void *start, unsigned long size) return addr >= start && addr < start + size; } -static void __lockdep_free_key_range(void *start, unsigned long size) +static bool inside_selftest(void) +{ + return current == lockdep_selftest_task_struct; +} + +/* The caller must hold the graph lock. */ +static struct pending_free *get_pending_free(void) +{ + return delayed_free.pf + delayed_free.index; +} + +static void free_zapped_rcu(struct rcu_head *cb); + +/* + * Schedule an RCU callback if no RCU callback is pending. Must be called with + * the graph lock held. + */ +static void call_rcu_zapped(struct pending_free *pf) +{ + WARN_ON_ONCE(inside_selftest()); + + if (list_empty(&pf->zapped)) + return; + + if (delayed_free.scheduled) + return; + + delayed_free.scheduled = true; + + WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); + delayed_free.index ^= 1; + + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); +} + +/* The caller must hold the graph lock. May be called from RCU context. */ +static void __free_zapped_classes(struct pending_free *pf) +{ + struct lock_class *class; + + list_for_each_entry(class, &pf->zapped, lock_entry) + reinit_class(class); + + list_splice_init(&pf->zapped, &free_lock_classes); +} + +static void free_zapped_rcu(struct rcu_head *ch) +{ + struct pending_free *pf; + unsigned long flags; + + if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) + return; + + raw_local_irq_save(flags); + if (!graph_lock()) + goto out_irq; + + /* closed head */ + pf = delayed_free.pf + (delayed_free.index ^ 1); + __free_zapped_classes(pf); + delayed_free.scheduled = false; + + /* + * If there's anything on the open list, close and start a new callback. + */ + call_rcu_zapped(delayed_free.pf + delayed_free.index); + + graph_unlock(); +out_irq: + raw_local_irq_restore(flags); +} + +/* + * Remove all lock classes from the class hash table and from the + * all_lock_classes list whose key or name is in the address range [start, + * start + size). Move these lock classes to the zapped_classes list. Must + * be called with the graph lock held. + */ +static void __lockdep_free_key_range(struct pending_free *pf, void *start, + unsigned long size) { struct lock_class *class; struct hlist_head *head; @@ -4179,7 +4411,7 @@ static void __lockdep_free_key_range(void *start, unsigned long size) if (!within(class->key, start, size) && !within(class->name, start, size)) continue; - zap_class(class); + zap_class(pf, class); } } } @@ -4192,8 +4424,9 @@ static void __lockdep_free_key_range(void *start, unsigned long size) * guaranteed nobody will look up these exact classes -- they're properly dead * but still allocated. */ -void lockdep_free_key_range(void *start, unsigned long size) +static void lockdep_free_key_range_reg(void *start, unsigned long size) { + struct pending_free *pf; unsigned long flags; int locked; @@ -4201,9 +4434,15 @@ void lockdep_free_key_range(void *start, unsigned long size) raw_local_irq_save(flags); locked = graph_lock(); - __lockdep_free_key_range(start, size); - if (locked) - graph_unlock(); + if (!locked) + goto out_irq; + + pf = get_pending_free(); + __lockdep_free_key_range(pf, start, size); + call_rcu_zapped(pf); + + graph_unlock(); +out_irq: raw_local_irq_restore(flags); /* @@ -4211,12 +4450,35 @@ void lockdep_free_key_range(void *start, unsigned long size) * before continuing to free the memory they refer to. */ synchronize_rcu(); +} - /* - * XXX at this point we could return the resources to the pool; - * instead we leak them. We would need to change to bitmap allocators - * instead of the linear allocators we have now. - */ +/* + * Free all lockdep keys in the range [start, start+size). Does not sleep. + * Ignores debug_locks. Must only be used by the lockdep selftests. + */ +static void lockdep_free_key_range_imm(void *start, unsigned long size) +{ + struct pending_free *pf = delayed_free.pf; + unsigned long flags; + + init_data_structures_once(); + + raw_local_irq_save(flags); + arch_spin_lock(&lockdep_lock); + __lockdep_free_key_range(pf, start, size); + __free_zapped_classes(pf); + arch_spin_unlock(&lockdep_lock); + raw_local_irq_restore(flags); +} + +void lockdep_free_key_range(void *start, unsigned long size) +{ + init_data_structures_once(); + + if (inside_selftest()) + lockdep_free_key_range_imm(start, size); + else + lockdep_free_key_range_reg(start, size); } /* @@ -4242,7 +4504,8 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock) } /* The caller must hold the graph lock. Does not sleep. */ -static void __lockdep_reset_lock(struct lockdep_map *lock) +static void __lockdep_reset_lock(struct pending_free *pf, + struct lockdep_map *lock) { struct lock_class *class; int j; @@ -4256,7 +4519,7 @@ static void __lockdep_reset_lock(struct lockdep_map *lock) */ class = look_up_lock_class(lock, j); if (class) - zap_class(class); + zap_class(pf, class); } /* * Debug check: in the end all mapped classes should @@ -4266,21 +4529,57 @@ static void __lockdep_reset_lock(struct lockdep_map *lock) debug_locks_off(); } -void lockdep_reset_lock(struct lockdep_map *lock) +/* + * Remove all information lockdep has about a lock if debug_locks == 1. Free + * released data structures from RCU context. + */ +static void lockdep_reset_lock_reg(struct lockdep_map *lock) { + struct pending_free *pf; unsigned long flags; int locked; - init_data_structures_once(); - raw_local_irq_save(flags); locked = graph_lock(); - __lockdep_reset_lock(lock); - if (locked) - graph_unlock(); + if (!locked) + goto out_irq; + + pf = get_pending_free(); + __lockdep_reset_lock(pf, lock); + call_rcu_zapped(pf); + + graph_unlock(); +out_irq: + raw_local_irq_restore(flags); +} + +/* + * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the + * lockdep selftests. + */ +static void lockdep_reset_lock_imm(struct lockdep_map *lock) +{ + struct pending_free *pf = delayed_free.pf; + unsigned long flags; + + raw_local_irq_save(flags); + arch_spin_lock(&lockdep_lock); + __lockdep_reset_lock(pf, lock); + __free_zapped_classes(pf); + arch_spin_unlock(&lockdep_lock); raw_local_irq_restore(flags); } +void lockdep_reset_lock(struct lockdep_map *lock) +{ + init_data_structures_once(); + + if (inside_selftest()) + lockdep_reset_lock_imm(lock); + else + lockdep_reset_lock_reg(lock); +} + void __init lockdep_init(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); @@ -4297,7 +4596,8 @@ void __init lockdep_init(void) (sizeof(lock_classes) + sizeof(classhash_table) + sizeof(list_entries) + - sizeof(chainhash_table) + sizeof(chainhash_table) + + sizeof(delayed_free) #ifdef CONFIG_PROVE_LOCKING + sizeof(lock_cq) + sizeof(lock_chains) -- cgit From ace35a7ac493d4284a57ad807579011bebba891c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:47 -0800 Subject: locking/lockdep: Reuse list entries that are no longer in use Instead of abandoning elements of list_entries[] that are no longer in use, make alloc_list_entry() reuse array elements that have been freed. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-13-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8ecf355dd163..2c6d0b67e7b6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -132,6 +133,7 @@ static inline int debug_locks_off_graph_unlock(void) unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; +static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); /* * All data structures here are protected by the global debug_lock. @@ -907,7 +909,10 @@ out_set_class_cache: */ static struct lock_list *alloc_list_entry(void) { - if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { + int idx = find_first_zero_bit(list_entries_in_use, + ARRAY_SIZE(list_entries)); + + if (idx >= ARRAY_SIZE(list_entries)) { if (!debug_locks_off_graph_unlock()) return NULL; @@ -915,7 +920,9 @@ static struct lock_list *alloc_list_entry(void) dump_stack(); return NULL; } - return list_entries + nr_list_entries++; + nr_list_entries++; + __set_bit(idx, list_entries_in_use); + return list_entries + idx; } /* @@ -1019,7 +1026,7 @@ static inline void mark_lock_accessed(struct lock_list *lock, unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ lock->parent = parent; lock->class->dep_gen_id = lockdep_dependency_gen_id; } @@ -1029,7 +1036,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock) unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ return lock->class->dep_gen_id == lockdep_dependency_gen_id; } @@ -4276,13 +4283,13 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) * Remove all dependencies this lock is * involved in: */ - for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) { + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { + entry = list_entries + i; if (entry->class != class && entry->links_to != class) continue; + __clear_bit(i, list_entries_in_use); + nr_list_entries--; list_del_rcu(&entry->entry); - /* Clear .class and .links_to to avoid double removal. */ - WRITE_ONCE(entry->class, NULL); - WRITE_ONCE(entry->links_to, NULL); } if (list_empty(&class->locks_after) && list_empty(&class->locks_before)) { @@ -4596,6 +4603,7 @@ void __init lockdep_init(void) (sizeof(lock_classes) + sizeof(classhash_table) + sizeof(list_entries) + + sizeof(list_entries_in_use) + sizeof(chainhash_table) + sizeof(delayed_free) #ifdef CONFIG_PROVE_LOCKING -- cgit From 2212684adff79e2704a2792ff46682afb9246fc8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:48 -0800 Subject: locking/lockdep: Introduce lockdep_next_lockchain() and lock_chain_count() This patch does not change any functionality but makes the next patch in this series easier to read. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-14-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 16 +++++++++++++++- kernel/locking/lockdep_internals.h | 3 ++- kernel/locking/lockdep_proc.c | 12 ++++++------ 3 files changed, 23 insertions(+), 8 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2c6d0b67e7b6..753a9b758266 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2096,7 +2096,7 @@ out_bug: return 0; } -unsigned long nr_lock_chains; +static unsigned long nr_lock_chains; struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; int nr_chain_hlocks; static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; @@ -2230,6 +2230,20 @@ static int check_no_collision(struct task_struct *curr, return 1; } +/* + * Given an index that is >= -1, return the index of the next lock chain. + * Return -2 if there is no next lock chain. + */ +long lockdep_next_lockchain(long i) +{ + return i + 1 < nr_lock_chains ? i + 1 : -2; +} + +unsigned long lock_chain_count(void) +{ + return nr_lock_chains; +} + /* * Adds a dependency chain into chain hashtable. And must be called with * graph_lock held. diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 2ebb9d0ea91c..d4c197425f68 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -100,7 +100,8 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); extern unsigned long nr_lock_classes; extern unsigned long nr_list_entries; -extern unsigned long nr_lock_chains; +long lockdep_next_lockchain(long i); +unsigned long lock_chain_count(void); extern int nr_chain_hlocks; extern unsigned long nr_stack_trace_entries; diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 3d31f9b0059e..9c49ec645d8b 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -104,18 +104,18 @@ static const struct seq_operations lockdep_ops = { #ifdef CONFIG_PROVE_LOCKING static void *lc_start(struct seq_file *m, loff_t *pos) { + if (*pos < 0) + return NULL; + if (*pos == 0) return SEQ_START_TOKEN; - if (*pos - 1 < nr_lock_chains) - return lock_chains + (*pos - 1); - - return NULL; + return lock_chains + (*pos - 1); } static void *lc_next(struct seq_file *m, void *v, loff_t *pos) { - (*pos)++; + *pos = lockdep_next_lockchain(*pos - 1) + 1; return lc_start(m, pos); } @@ -268,7 +268,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) #ifdef CONFIG_PROVE_LOCKING seq_printf(m, " dependency chains: %11lu [max: %lu]\n", - nr_lock_chains, MAX_LOCKDEP_CHAINS); + lock_chain_count(), MAX_LOCKDEP_CHAINS); seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n", nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS); #endif -- cgit From 527af3ea273b2cf0c017a2c90090b3c94af8aba4 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:49 -0800 Subject: locking/lockdep: Fix a comment in add_chain_cache() Reflect that add_chain_cache() is always called with the graph lock held. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-15-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 753a9b758266..ec0cb794f70d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2266,7 +2266,7 @@ static inline int add_chain_cache(struct task_struct *curr, */ /* - * We might need to take the graph lock, ensure we've got IRQs + * The caller must hold the graph lock, ensure we've got IRQs * disabled to make this an IRQ-safe lock.. for recursion reasons * lockdep won't complain about its own locking errors. */ -- cgit From de4643a77356a77bce73f64275b125b4b71a69cf Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:50 -0800 Subject: locking/lockdep: Reuse lock chains that have been freed A previous patch introduced a lock chain leak. Fix that leak by reusing lock chains that have been freed. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-16-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 57 +++++++++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 20 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ec0cb794f70d..0bb204464afe 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -292,9 +292,12 @@ static LIST_HEAD(free_lock_classes); /** * struct pending_free - information about data structures about to be freed * @zapped: Head of a list with struct lock_class elements. + * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements + * are about to be freed. */ struct pending_free { struct list_head zapped; + DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS); }; /** @@ -2096,8 +2099,8 @@ out_bug: return 0; } -static unsigned long nr_lock_chains; struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; +static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS); int nr_chain_hlocks; static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; @@ -2236,12 +2239,25 @@ static int check_no_collision(struct task_struct *curr, */ long lockdep_next_lockchain(long i) { - return i + 1 < nr_lock_chains ? i + 1 : -2; + i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1); + return i < ARRAY_SIZE(lock_chains) ? i : -2; } unsigned long lock_chain_count(void) { - return nr_lock_chains; + return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains)); +} + +/* Must be called with the graph lock held. */ +static struct lock_chain *alloc_lock_chain(void) +{ + int idx = find_first_zero_bit(lock_chains_in_use, + ARRAY_SIZE(lock_chains)); + + if (unlikely(idx >= ARRAY_SIZE(lock_chains))) + return NULL; + __set_bit(idx, lock_chains_in_use); + return lock_chains + idx; } /* @@ -2260,11 +2276,6 @@ static inline int add_chain_cache(struct task_struct *curr, struct lock_chain *chain; int i, j; - /* - * Allocate a new chain entry from the static array, and add - * it to the hash: - */ - /* * The caller must hold the graph lock, ensure we've got IRQs * disabled to make this an IRQ-safe lock.. for recursion reasons @@ -2273,7 +2284,8 @@ static inline int add_chain_cache(struct task_struct *curr, if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; - if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { + chain = alloc_lock_chain(); + if (!chain) { if (!debug_locks_off_graph_unlock()) return 0; @@ -2281,7 +2293,6 @@ static inline int add_chain_cache(struct task_struct *curr, dump_stack(); return 0; } - chain = lock_chains + nr_lock_chains++; chain->chain_key = chain_key; chain->irq_context = hlock->irq_context; i = get_first_held_lock(curr, hlock); @@ -4208,7 +4219,8 @@ void lockdep_reset(void) } /* Remove a class from a lock chain. Must be called with the graph lock held. */ -static void remove_class_from_lock_chain(struct lock_chain *chain, +static void remove_class_from_lock_chain(struct pending_free *pf, + struct lock_chain *chain, struct lock_class *class) { #ifdef CONFIG_PROVE_LOCKING @@ -4246,6 +4258,7 @@ recalc: * hlist_for_each_entry_rcu() loop is safe. */ hlist_del_rcu(&chain->entry); + __set_bit(chain - lock_chains, pf->lock_chains_being_freed); if (chain->depth == 0) return; /* @@ -4254,22 +4267,19 @@ recalc: */ if (lookup_chain_cache(chain_key)) return; - if (WARN_ON_ONCE(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { + new_chain = alloc_lock_chain(); + if (WARN_ON_ONCE(!new_chain)) { debug_locks_off(); return; } - /* - * Leak *chain because it is not safe to reinsert it before an RCU - * grace period has expired. - */ - new_chain = lock_chains + nr_lock_chains++; *new_chain = *chain; hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); #endif } /* Must be called with the graph lock held. */ -static void remove_class_from_lock_chains(struct lock_class *class) +static void remove_class_from_lock_chains(struct pending_free *pf, + struct lock_class *class) { struct lock_chain *chain; struct hlist_head *head; @@ -4278,7 +4288,7 @@ static void remove_class_from_lock_chains(struct lock_class *class) for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { head = chainhash_table + i; hlist_for_each_entry_rcu(chain, head, entry) { - remove_class_from_lock_chain(chain, class); + remove_class_from_lock_chain(pf, chain, class); } } } @@ -4317,7 +4327,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) class->name); } - remove_class_from_lock_chains(class); + remove_class_from_lock_chains(pf, class); } static void reinit_class(struct lock_class *class) @@ -4383,6 +4393,12 @@ static void __free_zapped_classes(struct pending_free *pf) reinit_class(class); list_splice_init(&pf->zapped, &free_lock_classes); + +#ifdef CONFIG_PROVE_LOCKING + bitmap_andnot(lock_chains_in_use, lock_chains_in_use, + pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); + bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); +#endif } static void free_zapped_rcu(struct rcu_head *ch) @@ -4623,6 +4639,7 @@ void __init lockdep_init(void) #ifdef CONFIG_PROVE_LOCKING + sizeof(lock_cq) + sizeof(lock_chains) + + sizeof(lock_chains_in_use) + sizeof(chain_hlocks) #endif ) / 1024 -- cgit From b526b2e39a53b312f5a6867ce57824247aa0ce8b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:51 -0800 Subject: locking/lockdep: Check data structure consistency Debugging lockdep data structure inconsistencies is challenging. Add code that verifies data structure consistency at runtime. That code is disabled by default because it is very CPU intensive. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-17-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 167 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 0bb204464afe..630be9ac6253 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -74,6 +74,8 @@ module_param(lock_stat, int, 0644); #define lock_stat 0 #endif +static bool check_data_structure_consistency; + /* * lockdep_lock: protects the lockdep graph, the hashes and the * class/list/hash allocators. @@ -775,6 +777,168 @@ static bool assign_lock_key(struct lockdep_map *lock) return true; } +/* Check whether element @e occurs in list @h */ +static bool in_list(struct list_head *e, struct list_head *h) +{ + struct list_head *f; + + list_for_each(f, h) { + if (e == f) + return true; + } + + return false; +} + +/* + * Check whether entry @e occurs in any of the locks_after or locks_before + * lists. + */ +static bool in_any_class_list(struct list_head *e) +{ + struct lock_class *class; + int i; + + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { + class = &lock_classes[i]; + if (in_list(e, &class->locks_after) || + in_list(e, &class->locks_before)) + return true; + } + return false; +} + +static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) +{ + struct lock_list *e; + + list_for_each_entry(e, h, entry) { + if (e->links_to != c) { + printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s", + c->name ? : "(?)", + (unsigned long)(e - list_entries), + e->links_to && e->links_to->name ? + e->links_to->name : "(?)", + e->class && e->class->name ? e->class->name : + "(?)"); + return false; + } + } + return true; +} + +static u16 chain_hlocks[]; + +static bool check_lock_chain_key(struct lock_chain *chain) +{ +#ifdef CONFIG_PROVE_LOCKING + u64 chain_key = 0; + int i; + + for (i = chain->base; i < chain->base + chain->depth; i++) + chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1); + /* + * The 'unsigned long long' casts avoid that a compiler warning + * is reported when building tools/lib/lockdep. + */ + if (chain->chain_key != chain_key) + printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", + (unsigned long long)(chain - lock_chains), + (unsigned long long)chain->chain_key, + (unsigned long long)chain_key); + return chain->chain_key == chain_key; +#else + return true; +#endif +} + +static bool in_any_zapped_class_list(struct lock_class *class) +{ + struct pending_free *pf; + int i; + + for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); + i++, pf++) + if (in_list(&class->lock_entry, &pf->zapped)) + return true; + + return false; +} + +static bool check_data_structures(void) +{ + struct lock_class *class; + struct lock_chain *chain; + struct hlist_head *head; + struct lock_list *e; + int i; + + /* Check whether all classes occur in a lock list. */ + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { + class = &lock_classes[i]; + if (!in_list(&class->lock_entry, &all_lock_classes) && + !in_list(&class->lock_entry, &free_lock_classes) && + !in_any_zapped_class_list(class)) { + printk(KERN_INFO "class %px/%s is not in any class list\n", + class, class->name ? : "(?)"); + return false; + return false; + } + } + + /* Check whether all classes have valid lock lists. */ + for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { + class = &lock_classes[i]; + if (!class_lock_list_valid(class, &class->locks_before)) + return false; + if (!class_lock_list_valid(class, &class->locks_after)) + return false; + } + + /* Check the chain_key of all lock chains. */ + for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { + head = chainhash_table + i; + hlist_for_each_entry_rcu(chain, head, entry) { + if (!check_lock_chain_key(chain)) + return false; + } + } + + /* + * Check whether all list entries that are in use occur in a class + * lock list. + */ + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { + e = list_entries + i; + if (!in_any_class_list(&e->entry)) { + printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n", + (unsigned int)(e - list_entries), + e->class->name ? : "(?)", + e->links_to->name ? : "(?)"); + return false; + } + } + + /* + * Check whether all list entries that are not in use do not occur in + * a class lock list. + */ + for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { + e = list_entries + i; + if (in_any_class_list(&e->entry)) { + printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n", + (unsigned int)(e - list_entries), + e->class && e->class->name ? e->class->name : + "(?)", + e->links_to && e->links_to->name ? + e->links_to->name : "(?)"); + return false; + } + } + + return true; +} + /* * Initialize the lock_classes[] array elements, the free_lock_classes list * and also the delayed_free structure. @@ -4389,6 +4553,9 @@ static void __free_zapped_classes(struct pending_free *pf) { struct lock_class *class; + if (check_data_structure_consistency) + WARN_ON_ONCE(!check_data_structures()); + list_for_each_entry(class, &pf->zapped, lock_entry) reinit_class(class); -- cgit From 4bf508621855613ca2ac782f70c3171e0e8bb011 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:52 -0800 Subject: locking/lockdep: Verify whether lock objects are small enough to be used as class keys Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-18-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 630be9ac6253..84427441824e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -758,6 +758,17 @@ static bool assign_lock_key(struct lockdep_map *lock) { unsigned long can_addr, addr = (unsigned long)lock; +#ifdef __KERNEL__ + /* + * lockdep_free_key_range() assumes that struct lock_class_key + * objects do not overlap. Since we use the address of lock + * objects as class key for static objects, check whether the + * size of lock_class_key objects does not exceed the size of + * the smallest lock object. + */ + BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t)); +#endif + if (__is_kernel_percpu_address(addr, &can_addr)) lock->key = (void *)can_addr; else if (__is_module_percpu_address(addr, &can_addr)) -- cgit From 108c14858b9ea224686e476c8f5ec345a0df9e27 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:53 -0800 Subject: locking/lockdep: Add support for dynamic keys A shortcoming of the current lockdep implementation is that it requires lock keys to be allocated statically. That forces all instances of lock objects that occur in a given data structure to share a lock key. Since lock dependency analysis groups lock objects per key sharing lock keys can cause false positive lockdep reports. Make it possible to avoid such false positive reports by allowing lock keys to be allocated dynamically. Require that dynamically allocated lock keys are registered before use by calling lockdep_register_key(). Complain about attempts to register the same lock key pointer twice without calling lockdep_unregister_key() between successive registration calls. The purpose of the new lock_keys_hash[] data structure that keeps track of all dynamic keys is twofold: - Verify whether the lockdep_register_key() and lockdep_unregister_key() functions are used correctly. - Avoid that lockdep_init_map() complains when encountering a dynamically allocated key. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-19-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 121 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 113 insertions(+), 8 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 84427441824e..c73bc4334bee 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -143,6 +143,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); * nr_lock_classes is the number of elements of lock_classes[] that is * in use. */ +#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) +#define KEYHASH_SIZE (1UL << KEYHASH_BITS) +static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; unsigned long nr_lock_classes; #ifndef CONFIG_DEBUG_LOCKDEP static @@ -641,7 +644,7 @@ static int very_verbose(struct lock_class *class) * Is this the address of a static object: */ #ifdef __KERNEL__ -static int static_obj(void *obj) +static int static_obj(const void *obj) { unsigned long start = (unsigned long) &_stext, end = (unsigned long) &_end, @@ -975,6 +978,71 @@ static void init_data_structures_once(void) } } +static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) +{ + unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); + + return lock_keys_hash + hash; +} + +/* Register a dynamically allocated key. */ +void lockdep_register_key(struct lock_class_key *key) +{ + struct hlist_head *hash_head; + struct lock_class_key *k; + unsigned long flags; + + if (WARN_ON_ONCE(static_obj(key))) + return; + hash_head = keyhashentry(key); + + raw_local_irq_save(flags); + if (!graph_lock()) + goto restore_irqs; + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { + if (WARN_ON_ONCE(k == key)) + goto out_unlock; + } + hlist_add_head_rcu(&key->hash_entry, hash_head); +out_unlock: + graph_unlock(); +restore_irqs: + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lockdep_register_key); + +/* Check whether a key has been registered as a dynamic key. */ +static bool is_dynamic_key(const struct lock_class_key *key) +{ + struct hlist_head *hash_head; + struct lock_class_key *k; + bool found = false; + + if (WARN_ON_ONCE(static_obj(key))) + return false; + + /* + * If lock debugging is disabled lock_keys_hash[] may contain + * pointers to memory that has already been freed. Avoid triggering + * a use-after-free in that case by returning early. + */ + if (!debug_locks) + return true; + + hash_head = keyhashentry(key); + + rcu_read_lock(); + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { + if (k == key) { + found = true; + break; + } + } + rcu_read_unlock(); + + return found; +} + /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object @@ -996,7 +1064,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) if (!lock->key) { if (!assign_lock_key(lock)) return NULL; - } else if (!static_obj(lock->key)) { + } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { return NULL; } @@ -3378,13 +3446,12 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, if (DEBUG_LOCKS_WARN_ON(!key)) return; /* - * Sanity check, the lock-class key must be persistent: + * Sanity check, the lock-class key must either have been allocated + * statically or must have been registered as a dynamic key. */ - if (!static_obj(key)) { - printk("BUG: key %px not in .data!\n", key); - /* - * What it says above ^^^^^, I suggest you read it. - */ + if (!static_obj(key) && !is_dynamic_key(key)) { + if (debug_locks) + printk(KERN_ERR "BUG: key %px has not been registered!\n", key); DEBUG_LOCKS_WARN_ON(1); return; } @@ -4795,6 +4862,44 @@ void lockdep_reset_lock(struct lockdep_map *lock) lockdep_reset_lock_reg(lock); } +/* Unregister a dynamically allocated key. */ +void lockdep_unregister_key(struct lock_class_key *key) +{ + struct hlist_head *hash_head = keyhashentry(key); + struct lock_class_key *k; + struct pending_free *pf; + unsigned long flags; + bool found = false; + + might_sleep(); + + if (WARN_ON_ONCE(static_obj(key))) + return; + + raw_local_irq_save(flags); + if (!graph_lock()) + goto out_irq; + + pf = get_pending_free(); + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { + if (k == key) { + hlist_del_rcu(&k->hash_entry); + found = true; + break; + } + } + WARN_ON_ONCE(!found); + __lockdep_free_key_range(pf, key, 1); + call_rcu_zapped(pf); + graph_unlock(); +out_irq: + raw_local_irq_restore(flags); + + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(lockdep_unregister_key); + void __init lockdep_init(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); -- cgit From 72dcd505e8585857397207f28782c8ba55e553b5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Feb 2019 15:56:32 +0100 Subject: locking/lockdep: Add module_param to enable consistency checks And move the whole lot under CONFIG_DEBUG_LOCKDEP. Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 46 +++++++++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 13 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c73bc4334bee..fda370ca529c 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -74,8 +74,6 @@ module_param(lock_stat, int, 0644); #define lock_stat 0 #endif -static bool check_data_structure_consistency; - /* * lockdep_lock: protects the lockdep graph, the hashes and the * class/list/hash allocators. @@ -791,6 +789,8 @@ static bool assign_lock_key(struct lockdep_map *lock) return true; } +#ifdef CONFIG_DEBUG_LOCKDEP + /* Check whether element @e occurs in list @h */ static bool in_list(struct list_head *e, struct list_head *h) { @@ -855,15 +855,15 @@ static bool check_lock_chain_key(struct lock_chain *chain) * The 'unsigned long long' casts avoid that a compiler warning * is reported when building tools/lib/lockdep. */ - if (chain->chain_key != chain_key) + if (chain->chain_key != chain_key) { printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", (unsigned long long)(chain - lock_chains), (unsigned long long)chain->chain_key, (unsigned long long)chain_key); - return chain->chain_key == chain_key; -#else - return true; + return false; + } #endif + return true; } static bool in_any_zapped_class_list(struct lock_class *class) @@ -871,15 +871,15 @@ static bool in_any_zapped_class_list(struct lock_class *class) struct pending_free *pf; int i; - for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); - i++, pf++) + for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { if (in_list(&class->lock_entry, &pf->zapped)) return true; + } return false; } -static bool check_data_structures(void) +static bool __check_data_structures(void) { struct lock_class *class; struct lock_chain *chain; @@ -896,7 +896,6 @@ static bool check_data_structures(void) printk(KERN_INFO "class %px/%s is not in any class list\n", class, class->name ? : "(?)"); return false; - return false; } } @@ -953,6 +952,27 @@ static bool check_data_structures(void) return true; } +int check_consistency = 0; +module_param(check_consistency, int, 0644); + +static void check_data_structures(void) +{ + static bool once = false; + + if (check_consistency && !once) { + if (!__check_data_structures()) { + once = true; + WARN_ON(once); + } + } +} + +#else /* CONFIG_DEBUG_LOCKDEP */ + +static inline void check_data_structures(void) { } + +#endif /* CONFIG_DEBUG_LOCKDEP */ + /* * Initialize the lock_classes[] array elements, the free_lock_classes list * and also the delayed_free structure. @@ -4474,10 +4494,11 @@ static void remove_class_from_lock_chain(struct pending_free *pf, if (chain_hlocks[i] != class - lock_classes) continue; /* The code below leaks one chain_hlock[] entry. */ - if (--chain->depth > 0) + if (--chain->depth > 0) { memmove(&chain_hlocks[i], &chain_hlocks[i + 1], (chain->base + chain->depth - i) * sizeof(chain_hlocks[0])); + } /* * Each lock class occurs at most once in a lock chain so once * we found a match we can break out of this loop. @@ -4631,8 +4652,7 @@ static void __free_zapped_classes(struct pending_free *pf) { struct lock_class *class; - if (check_data_structure_consistency) - WARN_ON_ONCE(!check_data_structures()); + check_data_structures(); list_for_each_entry(class, &pf->zapped, lock_entry) reinit_class(class); -- cgit