From a61ba2c8a48f150f5e6b6d14328fc7f1aa32969d Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Fri, 29 Sep 2017 19:06:18 +0300 Subject: locking/arch, s390: Add __down_read_killable() Similar to __down_write_killable(), and read killable primitive. Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arnd@arndb.de Cc: avagin@virtuozzo.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: gorcunov@virtuozzo.com Cc: heiko.carstens@de.ibm.com Cc: hpa@zytor.com Cc: ink@jurassic.park.msu.ru Cc: mattst88@gmail.com Cc: rientjes@google.com Cc: rth@twiddle.net Cc: schwidefsky@de.ibm.com Cc: tony.luck@intel.com Cc: viro@zeniv.linux.org.uk Link: http://lkml.kernel.org/r/150670117817.23930.13068785028558453848.stgit@localhost.localdomain Signed-off-by: Ingo Molnar --- arch/s390/include/asm/rwsem.h | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 597e7e96b59e..fda9481dec5c 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -49,7 +49,7 @@ /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline int ___down_read(struct rw_semaphore *sem) { signed long old, new; @@ -62,10 +62,25 @@ static inline void __down_read(struct rw_semaphore *sem) : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); - if (old < 0) + return (old < 0); +} + +static inline void __down_read(struct rw_semaphore *sem) +{ + if (___down_read(sem)) rwsem_down_read_failed(sem); } +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (___down_read(sem)) { + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + } + + return 0; +} + /* * trylock for reading -- returns 1 if successful, 0 if contention */ -- cgit From a8a217c22116eff6c120d753c9934089fb229af0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 3 Oct 2017 19:25:27 +0100 Subject: locking/core: Remove {read,spin,write}_can_lock() Outside of the locking code itself, {read,spin,write}_can_lock() have no users in tree. Apparmor (the last remaining user of write_can_lock()) got moved over to lockdep by the previous patch. This patch removes the use of {read,spin,write}_can_lock() from the BUILD_LOCK_OPS macro, deferring to the trylock operation for testing the lock status, and subsequently removes the unused macros altogether. They aren't guaranteed to work in a concurrent environment and can give incorrect results in the case of qrwlock. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/spinlock.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 8182b521c42f..dc9c58ed9e4c 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -110,18 +110,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) * read-locks. */ -/** - * read_can_lock - would read_trylock() succeed? - * @lock: the rwlock in question. - */ -#define arch_read_can_lock(x) ((int)(x)->lock >= 0) - -/** - * write_can_lock - would write_trylock() succeed? - * @lock: the rwlock in question. - */ -#define arch_write_can_lock(x) ((x)->lock == 0) - extern int _raw_read_trylock_retry(arch_rwlock_t *lp); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -- cgit From 0160fb177d484367e041ac251fca591a3e49660c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 3 Oct 2017 19:25:28 +0100 Subject: locking/arch: Remove dummy arch_{read,spin,write}_relax() implementations arch_{read,spin,write}_relax() are defined as cpu_relax() by the core code, so architectures that can't do better (i.e. most of them) don't need to bother with the dummy definitions. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-3-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/spinlock.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index dc9c58ed9e4c..4eca60cc81e4 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -45,6 +45,7 @@ static inline void arch_spin_relax(arch_spinlock_t *lock) { arch_lock_relax(lock->lock); } +#define arch_spin_relax arch_spin_relax static inline u32 arch_spin_lockval(int cpu) { @@ -256,10 +257,12 @@ static inline void arch_read_relax(arch_rwlock_t *rw) { arch_lock_relax(rw->owner); } +#define arch_read_relax arch_read_relax static inline void arch_write_relax(arch_rwlock_t *rw) { arch_lock_relax(rw->owner); } +#define arch_write_relax arch_write_relax #endif /* __ASM_SPINLOCK_H */ -- cgit From a4c1887d4c1462b0ec5a8989f8ba3cdd9057a299 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 3 Oct 2017 19:25:29 +0100 Subject: locking/arch: Remove dummy arch_{read,spin,write}_lock_flags() implementations The arch_{read,spin,write}_lock_flags() macros are simply mapped to the non-flags versions by the majority of architectures, so do this in core code and remove the dummy implementations. Also remove the implementation in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call local_irq_save(flags) anyway. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/spinlock.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 4eca60cc81e4..9fa855f91e55 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -81,6 +81,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp, if (!arch_spin_trylock_once(lp)) arch_spin_lock_wait_flags(lp, flags); } +#define arch_spin_lock_flags arch_spin_lock_flags static inline int arch_spin_trylock(arch_spinlock_t *lp) { @@ -114,9 +115,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) extern int _raw_read_trylock_retry(arch_rwlock_t *lp); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) - static inline int arch_read_trylock_once(arch_rwlock_t *rw) { int old = ACCESS_ONCE(rw->lock); -- cgit From 6aa7de059173a986114ac43b8f50b297a86f09a8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Oct 2017 14:07:29 -0700 Subject: locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE() Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland Signed-off-by: Paul E. McKenney Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/spinlock.h | 6 +++--- arch/s390/lib/spinlock.c | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 9fa855f91e55..66f4160010ef 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp); static inline int arch_read_trylock_once(arch_rwlock_t *rw) { - int old = ACCESS_ONCE(rw->lock); + int old = READ_ONCE(rw->lock); return likely(old >= 0 && __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); } static inline int arch_write_trylock_once(arch_rwlock_t *rw) { - int old = ACCESS_ONCE(rw->lock); + int old = READ_ONCE(rw->lock); return likely(old == 0 && __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); } @@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) int old; do { - old = ACCESS_ONCE(rw->lock); + old = READ_ONCE(rw->lock); } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index b12663d653d8..34e30b9ea234 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) smp_yield_cpu(~owner); count = spin_retry; } - old = ACCESS_ONCE(rw->lock); - owner = ACCESS_ONCE(rw->owner); + old = READ_ONCE(rw->lock); + owner = READ_ONCE(rw->owner); if (old < 0) continue; if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) @@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) int old; while (count-- > 0) { - old = ACCESS_ONCE(rw->lock); + old = READ_ONCE(rw->lock); if (old < 0) continue; if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) @@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev) smp_yield_cpu(~owner); count = spin_retry; } - old = ACCESS_ONCE(rw->lock); - owner = ACCESS_ONCE(rw->owner); + old = READ_ONCE(rw->lock); + owner = READ_ONCE(rw->owner); smp_mb(); if (old >= 0) { prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); @@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) smp_yield_cpu(~owner); count = spin_retry; } - old = ACCESS_ONCE(rw->lock); - owner = ACCESS_ONCE(rw->owner); + old = READ_ONCE(rw->lock); + owner = READ_ONCE(rw->owner); if (old >= 0 && __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000)) prev = old; @@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) int old; while (count-- > 0) { - old = ACCESS_ONCE(rw->lock); + old = READ_ONCE(rw->lock); if (old) continue; if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)) -- cgit