diff options
Diffstat (limited to 'arch/alpha/include/asm/atomic.h')
| -rw-r--r-- | arch/alpha/include/asm/atomic.h | 164 |
1 files changed, 62 insertions, 102 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 498933a7df97..cbd9244571af 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H @@ -13,15 +14,23 @@ * than regular operations. */ +/* + * To ensure dependency ordering is preserved for the _relaxed and + * _release atomics, an smp_mb() is unconditionally inserted into the + * _relaxed variants, which are used to build the barriered versions. + * Avoid redundant back-to-back fences in the _acquire and _fence + * versions. + */ +#define __atomic_acquire_fence() +#define __atomic_post_full_fence() -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic64_read(v) READ_ONCE((v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) -#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) /* * To get proper branch prediction for the main line, we must branch @@ -30,7 +39,7 @@ */ #define ATOMIC_OP(op, asm_op) \ -static __inline__ void atomic_##op(int i, atomic_t * v) \ +static __inline__ void arch_atomic_##op(int i, atomic_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ @@ -46,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ } \ #define ATOMIC_OP_RETURN(op, asm_op) \ -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ long temp, result; \ __asm__ __volatile__( \ @@ -60,11 +69,12 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ ".previous" \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ return result; \ } #define ATOMIC_FETCH_OP(op, asm_op) \ -static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ long temp, result; \ __asm__ __volatile__( \ @@ -77,13 +87,14 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ ".previous" \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ return result; \ } #define ATOMIC64_OP(op, asm_op) \ -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \ { \ - unsigned long temp; \ + s64 temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%2,%0\n" \ @@ -97,9 +108,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ } \ #define ATOMIC64_OP_RETURN(op, asm_op) \ -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 \ +arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%3,%2\n" \ @@ -111,13 +123,15 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ ".previous" \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ return result; \ } #define ATOMIC64_FETCH_OP(op, asm_op) \ -static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 \ +arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %2,%1\n" \ " " #asm_op " %2,%3,%0\n" \ @@ -128,6 +142,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ ".previous" \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ return result; \ } @@ -142,18 +157,18 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed -#define atomic_andnot atomic_andnot -#define atomic64_andnot atomic64_andnot +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic64_andnot arch_atomic64_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, asm) \ @@ -167,15 +182,15 @@ ATOMIC_OPS(andnot, bic) ATOMIC_OPS(or, bis) ATOMIC_OPS(xor, xor) -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #undef ATOMIC_OPS #undef ATOMIC64_FETCH_OP @@ -185,22 +200,7 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) - -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, new, old; smp_mb(); @@ -221,49 +221,34 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) smp_mb(); return old; } +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless - -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns true iff @v was not @u. - */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - long c, tmp; + s64 c, new, old; smp_mb(); __asm__ __volatile__( - "1: ldq_l %[tmp],%[mem]\n" - " cmpeq %[tmp],%[u],%[c]\n" - " addq %[tmp],%[a],%[tmp]\n" + "1: ldq_l %[old],%[mem]\n" + " cmpeq %[old],%[u],%[c]\n" + " addq %[old],%[a],%[new]\n" " bne %[c],2f\n" - " stq_c %[tmp],%[mem]\n" - " beq %[tmp],3f\n" + " stq_c %[new],%[mem]\n" + " beq %[new],3f\n" "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" - : [tmp] "=&r"(tmp), [c] "=&r"(c) + : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) : "memory"); smp_mb(); - return !c; + return old; } +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless -/* - * atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { - long old, tmp; + s64 old, tmp; smp_mb(); __asm__ __volatile__( "1: ldq_l %[old],%[mem]\n" @@ -281,31 +266,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) smp_mb(); return old - 1; } - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) - -#define atomic_inc_return(v) atomic_add_return(1,(v)) -#define atomic64_inc_return(v) atomic64_add_return(1,(v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) - -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic64_inc(v) atomic64_add(1,(v)) - -#define atomic_dec(v) atomic_sub(1,(v)) -#define atomic64_dec(v) atomic64_sub(1,(v)) +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive #endif /* _ALPHA_ATOMIC_H */ |
