summaryrefslogtreecommitdiff
path: root/arch/sh/include
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-10-03 19:25:29 +0100
committerIngo Molnar <mingo@kernel.org>2017-10-10 11:50:19 +0200
commita4c1887d4c1462b0ec5a8989f8ba3cdd9057a299 (patch)
tree4b0f4364502ec0e8f790ebbe61fa3f4b019a667c /arch/sh/include
parent0160fb177d484367e041ac251fca591a3e49660c (diff)
locking/arch: Remove dummy arch_{read,spin,write}_lock_flags() implementations
The arch_{read,spin,write}_lock_flags() macros are simply mapped to the non-flags versions by the majority of architectures, so do this in core code and remove the dummy implementations. Also remove the implementation in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call local_irq_save(flags) anyway. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/sh/include')
-rw-r--r--arch/sh/include/asm/spinlock-cas.h4
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h4
2 files changed, 0 insertions, 8 deletions
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
index 295993c2598e..270ee4d3e25b 100644
--- a/arch/sh/include/asm/spinlock-cas.h
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -27,7 +27,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
@@ -90,7 +89,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* __ASM_SH_SPINLOCK_CAS_H */
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index a6f9edd15317..715595de286a 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -19,7 +19,6 @@
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -197,7 +196,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* __ASM_SH_SPINLOCK_LLSC_H */