diff options
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
| -rw-r--r-- | arch/powerpc/include/asm/atomic.h | 228 |
1 files changed, 49 insertions, 179 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index a1732a79e92a..d1ea554c33ed 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,6 +11,7 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> #include <asm/asm-const.h> +#include <asm/asm-compat.h> /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -27,72 +28,80 @@ static __inline__ int arch_atomic_read(const atomic_t *v) { int t; - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); + /* -mprefixed can generate offsets beyond range, fall back hack */ + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter)); + else + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); return t; } static __inline__ void arch_atomic_set(atomic_t *v, int i) { - __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); + /* -mprefixed can generate offsets beyond range, fall back hack */ + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); + else + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); } -#define ATOMIC_OP(op, asm_op) \ +#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \ static __inline__ void arch_atomic_##op(int a, atomic_t *v) \ { \ int t; \ \ __asm__ __volatile__( \ "1: lwarx %0,0,%3 # atomic_" #op "\n" \ - #asm_op " %0,%2,%0\n" \ + #asm_op "%I2" suffix " %0,%0,%2\n" \ " stwcx. %0,0,%3 \n" \ " bne- 1b\n" \ : "=&r" (t), "+m" (v->counter) \ - : "r" (a), "r" (&v->counter) \ - : "cc"); \ + : "r"#sign (a), "r" (&v->counter) \ + : "cc", ##__VA_ARGS__); \ } \ -#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ +#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \ static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \ { \ int t; \ \ __asm__ __volatile__( \ "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \ - #asm_op " %0,%2,%0\n" \ + #asm_op "%I2" suffix " %0,%0,%2\n" \ " stwcx. %0,0,%3\n" \ " bne- 1b\n" \ : "=&r" (t), "+m" (v->counter) \ - : "r" (a), "r" (&v->counter) \ - : "cc"); \ + : "r"#sign (a), "r" (&v->counter) \ + : "cc", ##__VA_ARGS__); \ \ return t; \ } -#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \ +#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \ static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ { \ int res, t; \ \ __asm__ __volatile__( \ "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \ - #asm_op " %1,%3,%0\n" \ + #asm_op "%I3" suffix " %1,%0,%3\n" \ " stwcx. %1,0,%4\n" \ " bne- 1b\n" \ : "=&r" (res), "=&r" (t), "+m" (v->counter) \ - : "r" (a), "r" (&v->counter) \ - : "cc"); \ + : "r"#sign (a), "r" (&v->counter) \ + : "cc", ##__VA_ARGS__); \ \ return res; \ } -#define ATOMIC_OPS(op, asm_op) \ - ATOMIC_OP(op, asm_op) \ - ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ - ATOMIC_FETCH_OP_RELAXED(op, asm_op) +#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \ + ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \ + ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\ + ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__) -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, subf) +ATOMIC_OPS(add, add, "c", I, "xer") +ATOMIC_OPS(sub, sub, "c", I, "xer") #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed @@ -101,13 +110,13 @@ ATOMIC_OPS(sub, subf) #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #undef ATOMIC_OPS -#define ATOMIC_OPS(op, asm_op) \ - ATOMIC_OP(op, asm_op) \ - ATOMIC_FETCH_OP_RELAXED(op, asm_op) +#define ATOMIC_OPS(op, asm_op, suffix, sign) \ + ATOMIC_OP(op, asm_op, suffix, sign) \ + ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign) -ATOMIC_OPS(and, and) -ATOMIC_OPS(or, or) -ATOMIC_OPS(xor, xor) +ATOMIC_OPS(and, and, ".", K) +ATOMIC_OPS(or, or, "", K) +ATOMIC_OPS(xor, xor, "", K) #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed @@ -118,111 +127,6 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP -static __inline__ void arch_atomic_inc(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_inc\n\ - addic %0,%0,1\n" -" stwcx. %0,0,%2 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); -} -#define arch_atomic_inc arch_atomic_inc - -static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" -" addic %0,%0,1\n" -" stwcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; -} - -static __inline__ void arch_atomic_dec(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_dec\n\ - addic %0,%0,-1\n" -" stwcx. %0,0,%2\n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); -} -#define arch_atomic_dec arch_atomic_dec - -static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" -" addic %0,%0,-1\n" -" stwcx. %0,0,%2\n" -" bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (&v->counter) - : "cc", "xer"); - - return t; -} - -#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed -#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed - -#define arch_atomic_cmpxchg(v, o, n) \ - (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_cmpxchg_relaxed(v, o, n) \ - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) -#define arch_atomic_cmpxchg_acquire(v, o, n) \ - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) - -#define arch_atomic_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) -#define arch_atomic_xchg_relaxed(v, new) \ - arch_xchg_relaxed(&((v)->counter), (new)) - -/* - * Don't want to override the generic atomic_try_cmpxchg_acquire, because - * we add a lock hint to the lwarx, which may not be wanted for the - * _acquire case (and is not used by the other _acquire variants so it - * would be a surprise). - */ -static __always_inline bool -arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) -{ - int r, o = *old; - - __asm__ __volatile__ ( -"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n" -" cmpw 0,%0,%3 \n" -" bne- 2f \n" -" stwcx. %4,0,%2 \n" -" bne- 1b \n" -"\t" PPC_ACQUIRE_BARRIER " \n" -"2: \n" - : "=&r" (r), "+m" (v->counter) - : "r" (&v->counter), "r" (o), "r" (new) - : "cr0", "memory"); - - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} - /** * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t @@ -241,50 +145,20 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ cmpw 0,%0,%3 \n\ beq 2f \n\ - add %0,%2,%0 \n" + add%I2c %0,%0,%2 \n" " stwcx. %0,0,%1 \n\ bne- 1b \n" PPC_ATOMIC_EXIT_BARRIER -" subf %0,%2,%0 \n\ +" sub%I2c %0,%0,%2 \n\ 2:" : "=&r" (t) - : "r" (&v->counter), "r" (a), "r" (u) - : "cc", "memory"); + : "r" (&v->counter), "rI" (a), "r" (u) + : "cc", "memory", "xer"); return t; } #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, so long as @v is non-zero. - * Returns non-zero if @v was non-zero, and zero otherwise. - */ -static __inline__ int arch_atomic_inc_not_zero(atomic_t *v) -{ - int t1, t2; - - __asm__ __volatile__ ( - PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\ - cmpwi 0,%0,0\n\ - beq- 2f\n\ - addic %1,%0,1\n" -" stwcx. %1,0,%2\n\ - bne- 1b\n" - PPC_ATOMIC_EXIT_BARRIER - "\n\ -2:" - : "=&r" (t1), "=&r" (t2) - : "r" (&v->counter) - : "cc", "xer", "memory"); - - return t1; -} -#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v)) - /* * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1, even if @@ -320,14 +194,22 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) { s64 t; - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); + /* -mprefixed can generate offsets beyond range, fall back hack */ + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); + else + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); return t; } static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) { - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); + /* -mprefixed can generate offsets beyond range, fall back hack */ + if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) + __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); + else + __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ @@ -503,18 +385,6 @@ static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) } #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -#define arch_atomic64_cmpxchg(v, o, n) \ - (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_cmpxchg_relaxed(v, o, n) \ - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) -#define arch_atomic64_cmpxchg_acquire(v, o, n) \ - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) - -#define arch_atomic64_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) -#define arch_atomic64_xchg_relaxed(v, new) \ - arch_xchg_relaxed(&((v)->counter), (new)) - /** * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t |
