summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c52
1 files changed, 47 insertions, 5 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 56f69cc53ddc..eda8114ef793 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4681,6 +4681,17 @@ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
static atomic_t cross_gen_id; /* Can be wrapped */
/*
+ * Make an entry of the ring buffer invalid.
+ */
+static inline void invalidate_xhlock(struct hist_lock *xhlock)
+{
+ /*
+ * Normally, xhlock->hlock.instance must be !NULL.
+ */
+ xhlock->hlock.instance = NULL;
+}
+
+/*
* Lock history stacks; we have 3 nested lock history stacks:
*
* Hard IRQ
@@ -4712,14 +4723,28 @@ static atomic_t cross_gen_id; /* Can be wrapped */
*/
void crossrelease_hist_start(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx_hist[c] = current->xhlock_idx;
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ cur->xhlock_idx_hist[c] = cur->xhlock_idx;
+ cur->hist_id_save[c] = cur->hist_id;
+ }
}
void crossrelease_hist_end(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_hist[c];
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ unsigned int idx = cur->xhlock_idx_hist[c];
+ struct hist_lock *h = &xhlock(idx);
+
+ cur->xhlock_idx = idx;
+
+ /* Check if the ring was overwritten. */
+ if (h->hist_id != cur->hist_id_save[c])
+ invalidate_xhlock(h);
+ }
}
static int cross_lock(struct lockdep_map *lock)
@@ -4765,6 +4790,7 @@ static inline int depend_after(struct held_lock *hlock)
* Check if the xhlock is valid, which would be false if,
*
* 1. Has not used after initializaion yet.
+ * 2. Got invalidated.
*
* Remind hist_lock is implemented as a ring buffer.
*/
@@ -4796,6 +4822,7 @@ static void add_xhlock(struct held_lock *hlock)
/* Initialize hist_lock's members */
xhlock->hlock = *hlock;
+ xhlock->hist_id = current->hist_id++;
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
@@ -4934,6 +4961,7 @@ static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
static void commit_xhlocks(struct cross_lock *xlock)
{
unsigned int cur = current->xhlock_idx;
+ unsigned int prev_hist_id = xhlock(cur).hist_id;
unsigned int i;
if (!graph_lock())
@@ -4952,6 +4980,17 @@ static void commit_xhlocks(struct cross_lock *xlock)
break;
/*
+ * Filter out the cases that the ring buffer was
+ * overwritten and the previous entry has a bigger
+ * hist_id than the following one, which is impossible
+ * otherwise.
+ */
+ if (unlikely(before(xhlock->hist_id, prev_hist_id)))
+ break;
+
+ prev_hist_id = xhlock->hist_id;
+
+ /*
* commit_xhlock() returns 0 with graph_lock already
* released if fail.
*/
@@ -5024,9 +5063,12 @@ void lockdep_init_task(struct task_struct *task)
int i;
task->xhlock_idx = UINT_MAX;
+ task->hist_id = 0;
- for (i = 0; i < XHLOCK_CTX_NR; i++)
+ for (i = 0; i < XHLOCK_CTX_NR; i++) {
task->xhlock_idx_hist[i] = UINT_MAX;
+ task->hist_id_save[i] = 0;
+ }
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL);