summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-11-28 15:50:48 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-04-12 08:43:33 +0200
commit02c503ff237cdcd8e012a122a638295550db10a5 (patch)
treecb5af588f282809cab4d37b1d4a6847814cda5ee /arch/s390/include
parentdf26c2e87e6cf3ced1fbd589e40d633a6a7f20cb (diff)
s390/spinlock: use atomic primitives for spinlocks
Add a couple more __atomic_xxx function to atomic_ops.h and use them to replace the compare-and-swap inlines in the spinlock code. This changes the type of the lock value from unsigned int to int. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/atomic_ops.h22
-rw-r--r--arch/s390/include/asm/spinlock.h45
-rw-r--r--arch/s390/include/asm/spinlock_types.h6
3 files changed, 35 insertions, 38 deletions
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index ac9e2b939d04..ba6d29412344 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -111,20 +111,22 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new) : "cc", "memory");
- return old;
+ return __sync_val_compare_and_swap(ptr, old, new);
+}
+
+static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
+{
+ return __sync_bool_compare_and_swap(ptr, old, new);
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new) : "cc", "memory");
- return old;
+ return __sync_val_compare_and_swap(ptr, old, new);
+}
+
+static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+{
+ return __sync_bool_compare_and_swap(ptr, old, new);
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index ffc45048ea7d..f7838ecd83c6 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -10,6 +10,7 @@
#define __ASM_SPINLOCK_H
#include <linux/smp.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
@@ -17,12 +18,6 @@
extern int spin_retry;
-static inline int
-_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
-{
- return __sync_bool_compare_and_swap(lock, old, new);
-}
-
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else
@@ -40,7 +35,7 @@ bool arch_vcpu_is_preempted(int cpu);
* (the type definitions are in asm/spinlock_types.h)
*/
-void arch_lock_relax(unsigned int cpu);
+void arch_lock_relax(int cpu);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
@@ -70,7 +65,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
barrier();
return likely(arch_spin_value_unlocked(*lp) &&
- _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -95,7 +90,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
- typecheck(unsigned int, lp->lock);
+ typecheck(int, lp->lock);
asm volatile(
"st %1,%0\n"
: "+Q" (lp->lock)
@@ -141,16 +136,16 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
- unsigned int old = ACCESS_ONCE(rw->lock);
- return likely((int) old >= 0 &&
- _raw_compare_and_swap(&rw->lock, old, old + 1));
+ int old = ACCESS_ONCE(rw->lock);
+ return likely(old >= 0 &&
+ __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
}
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{
- unsigned int old = ACCESS_ONCE(rw->lock);
+ int old = ACCESS_ONCE(rw->lock);
return likely(old == 0 &&
- _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
+ __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
}
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -161,9 +156,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
#define __RAW_LOCK(ptr, op_val, op_string) \
({ \
- unsigned int old_val; \
+ int old_val; \
\
- typecheck(unsigned int *, ptr); \
+ typecheck(int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
"bcr 14,0\n" \
@@ -175,9 +170,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
#define __RAW_UNLOCK(ptr, op_val, op_string) \
({ \
- unsigned int old_val; \
+ int old_val; \
\
- typecheck(unsigned int *, ptr); \
+ typecheck(int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \
@@ -187,14 +182,14 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
})
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
- if ((int) old < 0)
+ if (old < 0)
_raw_read_lock_wait(rw);
}
@@ -205,7 +200,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
if (old != 0)
@@ -232,11 +227,11 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
do {
old = ACCESS_ONCE(rw->lock);
- } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
+ } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
}
static inline void arch_write_lock(arch_rwlock_t *rw)
@@ -248,7 +243,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- typecheck(unsigned int, rw->lock);
+ typecheck(int, rw->lock);
rw->owner = 0;
asm volatile(
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index d84b6939237c..fe755eec275f 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -6,14 +6,14 @@
#endif
typedef struct {
- unsigned int lock;
+ int lock;
} __attribute__ ((aligned (4))) arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct {
- unsigned int lock;
- unsigned int owner;
+ int lock;
+ int owner;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }