summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-26 13:01:18 +0200
committerIngo Molnar <mingo@kernel.org>2018-10-16 17:33:53 +0200
commit53bf57fab7321fb42b703056a4c80fc9d986d170 (patch)
tree9235fe433960327c1da49e63e796e992b196173f /kernel/locking
parentec57e2f0acb01710cd465bc04495ed03a9e0fea1 (diff)
locking/qspinlock: Re-order code
Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL) such that we loose the indent. This also result in a more natural code flow IMO. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/qspinlock.c56
1 files changed, 27 insertions, 29 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index bfaeb05123ff..ec343276f975 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -330,39 +330,37 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* 0,0,1 -> 0,1,1 ; pending
*/
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
- if (!(val & ~_Q_LOCKED_MASK)) {
- /*
- * We're pending, wait for the owner to go away.
- *
- * *,1,1 -> *,1,0
- *
- * this wait loop must be a load-acquire such that we match the
- * store-release that clears the locked bit and create lock
- * sequentiality; this is because not all
- * clear_pending_set_locked() implementations imply full
- * barriers.
- */
- if (val & _Q_LOCKED_MASK) {
- atomic_cond_read_acquire(&lock->val,
- !(VAL & _Q_LOCKED_MASK));
- }
-
- /*
- * take ownership and clear the pending bit.
- *
- * *,1,0 -> *,0,1
- */
- clear_pending_set_locked(lock);
- qstat_inc(qstat_lock_pending, true);
- return;
+ /*
+ * If we observe any contention; undo and queue.
+ */
+ if (unlikely(val & ~_Q_LOCKED_MASK)) {
+ if (!(val & _Q_PENDING_MASK))
+ clear_pending(lock);
+ goto queue;
}
/*
- * If pending was clear but there are waiters in the queue, then
- * we need to undo our setting of pending before we queue ourselves.
+ * We're pending, wait for the owner to go away.
+ *
+ * 0,1,1 -> 0,1,0
+ *
+ * this wait loop must be a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because not all
+ * clear_pending_set_locked() implementations imply full
+ * barriers.
+ */
+ if (val & _Q_LOCKED_MASK)
+ atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
+
+ /*
+ * take ownership and clear the pending bit.
+ *
+ * 0,1,0 -> 0,0,1
*/
- if (!(val & _Q_PENDING_MASK))
- clear_pending(lock);
+ clear_pending_set_locked(lock);
+ qstat_inc(qstat_lock_pending, true);
+ return;
/*
* End of pending bit optimistic spinning and beginning of MCS