summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-07-18 15:08:53 +0200
committerIngo Molnar <mingo@kernel.org>2019-07-25 15:39:25 +0200
commit6ffddfb9e1de21c3d0c0cfa4fe4a20dd3291a812 (patch)
tree8515b223b81fbcdf12bceba14362cc0d31d1184d /kernel/locking
parent952041a8639a7a3a73a2b6573cb8aa8518bc39f8 (diff)
locking/rwsem: Add ACQUIRE comments
Since we just reviewed read_slowpath for ACQUIRE correctness, add a few coments to retain our findings. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rwsem.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 571938887cc8..bd0f0d05724c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1004,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
adjustment = 0;
if (rwsem_optimistic_spin(sem, false)) {
+ /* rwsem_optimistic_spin() implies ACQUIRE on success */
/*
* Wake up other readers in the wait list if the front
* waiter is a reader.
@@ -1018,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
}
return sem;
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
+ /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
return sem;
}
@@ -1071,10 +1073,10 @@ queue:
wake_up_q(&wake_q);
/* wait to be given the lock */
- while (true) {
+ for (;;) {
set_current_state(state);
if (!smp_load_acquire(&waiter.task)) {
- /* Orders against rwsem_mark_wake()'s smp_store_release() */
+ /* Matches rwsem_mark_wake()'s smp_store_release(). */
break;
}
if (signal_pending_state(state, current)) {
@@ -1082,6 +1084,7 @@ queue:
if (waiter.task)
goto out_nolock;
raw_spin_unlock_irq(&sem->wait_lock);
+ /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
break;
}
schedule();
@@ -1091,6 +1094,7 @@ queue:
__set_current_state(TASK_RUNNING);
lockevent_inc(rwsem_rlock);
return sem;
+
out_nolock:
list_del(&waiter.list);
if (list_empty(&sem->wait_list)) {
@@ -1131,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
/* do optimistic spinning and steal lock if possible */
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
- rwsem_optimistic_spin(sem, true))
+ rwsem_optimistic_spin(sem, true)) {
+ /* rwsem_optimistic_spin() implies ACQUIRE on success */
return sem;
+ }
/*
* Disable reader optimistic spinning for this rwsem after
@@ -1192,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
wait:
/* wait until we successfully acquire the lock */
set_current_state(state);
- while (true) {
- if (rwsem_try_write_lock(sem, wstate))
+ for (;;) {
+ if (rwsem_try_write_lock(sem, wstate)) {
+ /* rwsem_try_write_lock() implies ACQUIRE on success */
break;
+ }
raw_spin_unlock_irq(&sem->wait_lock);