summaryrefslogtreecommitdiff
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2020-02-06 10:24:06 -0500
committerIngo Molnar <mingo@kernel.org>2020-02-11 13:10:50 +0100
commit836bd74b5957779171c9648e9e29145fc089fffe (patch)
treedba3face57c6512c2408c472d9dcb4281a3e1427 /kernel/locking/lockdep.c
parent1d44bcb4fdb650b2a57c9ff593a4d246a10ad801 (diff)
locking/lockdep: Throw away all lock chains with zapped class
If a lock chain contains a class that is zapped, the whole lock chain is likely to be invalid. If the zapped class is at the end of the chain, the partial chain without the zapped class should have been stored already as the current code will store all its predecessor chains. If the zapped class is somewhere in the middle, there is no guarantee that the partial chain will actually happen. It may just clutter up the hash and make searching slower. I would rather prefer storing the chain only when it actually happens. So just dump the corresponding chain_hlocks entries for now. A latter patch will try to reuse the freed chain_hlocks entries. This patch also changes the type of nr_chain_hlocks to unsigned integer to be consistent with the other counters. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20200206152408.24165-5-longman@redhat.com
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c37
1 files changed, 4 insertions, 33 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 28222d03345f..ef2a6432dd10 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2625,8 +2625,8 @@ out_bug:
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
-int nr_chain_hlocks;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+unsigned int nr_chain_hlocks;
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
{
@@ -4772,36 +4772,23 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
struct lock_class *class)
{
#ifdef CONFIG_PROVE_LOCKING
- struct lock_chain *new_chain;
- u64 chain_key;
int i;
for (i = chain->base; i < chain->base + chain->depth; i++) {
if (chain_hlocks[i] != class - lock_classes)
continue;
- /* The code below leaks one chain_hlock[] entry. */
- if (--chain->depth > 0) {
- memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
- (chain->base + chain->depth - i) *
- sizeof(chain_hlocks[0]));
- }
/*
* Each lock class occurs at most once in a lock chain so once
* we found a match we can break out of this loop.
*/
- goto recalc;
+ goto free_lock_chain;
}
/* Since the chain has not been modified, return. */
return;
-recalc:
- chain_key = INITIAL_CHAIN_KEY;
- for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
- if (chain->depth && chain->chain_key == chain_key)
- return;
+free_lock_chain:
/* Overwrite the chain key for concurrent RCU readers. */
- WRITE_ONCE(chain->chain_key, chain_key);
+ WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
dec_chains(chain->irq_context);
/*
@@ -4810,22 +4797,6 @@ recalc:
*/
hlist_del_rcu(&chain->entry);
__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
- if (chain->depth == 0)
- return;
- /*
- * If the modified lock chain matches an existing lock chain, drop
- * the modified lock chain.
- */
- if (lookup_chain_cache(chain_key))
- return;
- new_chain = alloc_lock_chain();
- if (WARN_ON_ONCE(!new_chain)) {
- debug_locks_off();
- return;
- }
- *new_chain = *chain;
- hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
- inc_chains(new_chain->irq_context);
#endif
}