diff options
Diffstat (limited to 'kernel/locking/mcs_spinlock.h')
| -rw-r--r-- | kernel/locking/mcs_spinlock.h | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 6a385aabcce7..5c92ba199b90 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * MCS lock defines * @@ -6,7 +7,7 @@ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * with the desirable properties of being fair, and with each cpu trying * to acquire the lock spinning on a local variable. - * It avoids expensive cache bouncings that common test-and-set spin-lock + * It avoids expensive cache bounces that common test-and-set spin-lock * implementations incur. */ #ifndef __LINUX_MCS_SPINLOCK_H @@ -14,22 +15,16 @@ #include <asm/mcs_spinlock.h> -struct mcs_spinlock { - struct mcs_spinlock *next; - int locked; /* 1 if lock acquired */ - int count; /* nesting count, see qspinlock.c */ -}; - #ifndef arch_mcs_spin_lock_contended /* - * Using smp_load_acquire() provides a memory barrier that ensures - * subsequent operations happen after the lock is acquired. + * Using smp_cond_load_acquire() provides the acquire semantics + * required so that subsequent operations happen after the + * lock is acquired. Additionally, some architectures such as + * ARM64 would like to do spin-waiting instead of purely + * spinning, and smp_cond_load_acquire() provides that behavior. */ #define arch_mcs_spin_lock_contended(l) \ -do { \ - while (!(smp_load_acquire(l))) \ - cpu_relax(); \ -} while (0) + smp_cond_load_acquire(l, VAL) #endif #ifndef arch_mcs_spin_unlock_contended |
