diff options
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
| -rw-r--r-- | arch/arm/include/asm/atomic.h | 542 |
1 files changed, 283 insertions, 259 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index da1c77d39327..f0e3b01afa74 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -1,24 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/atomic.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2002 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H #include <linux/compiler.h> +#include <linux/prefetch.h> #include <linux/types.h> #include <linux/irqflags.h> #include <asm/barrier.h> #include <asm/cmpxchg.h> -#define ATOMIC_INIT(i) { (i) } - #ifdef __KERNEL__ /* @@ -26,8 +22,8 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -36,87 +32,83 @@ * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - - __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - smp_mb(); - - return result; +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + prefetchw(&v->counter); \ + __asm__ __volatile__("@ atomic_" #op "\n" \ +"1: ldrex %0, [%3]\n" \ +" " #asm_op " %0, %0, %4\n" \ +" strex %1, %0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic_" #op "_return\n" \ +"1: ldrex %0, [%3]\n" \ +" " #asm_op " %0, %0, %4\n" \ +" strex %1, %0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ + \ + return result; \ } -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result, val; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ +"1: ldrex %0, [%4]\n" \ +" " #asm_op " %1, %0, %5\n" \ +" strex %2, %1, [%4]\n" \ +" teq %2, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ + \ + return result; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - - __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed - return result; -} +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { - unsigned long oldval, res; + int oldval; + unsigned long res; - smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -129,25 +121,37 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); - smp_mb(); - return oldval; } +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { - unsigned long tmp, tmp2; + int oldval, newval; + unsigned long tmp; - __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%3]\n" -" bic %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) - : "r" (addr), "Ir" (mask) + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; } +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #else /* ARM_ARCH_6 */ @@ -155,35 +159,55 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #error SMP not supported on pre-ARMv6 CPUs #endif -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long flags; - int val; - - raw_local_irq_save(flags); - val = v->counter; - v->counter = val += i; - raw_local_irq_restore(flags); +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int val; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + val = v->counter; \ + raw_local_irq_restore(flags); \ + \ + return val; \ +} - return val; +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int val; \ + \ + raw_local_irq_save(flags); \ + val = v->counter; \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return val; \ } -#define atomic_add(i, v) (void) atomic_add_return(i, v) -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long flags; - int val; +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub - raw_local_irq_save(flags); - val = v->counter; - v->counter = val -= i; - raw_local_irq_restore(flags); +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor - return val; -} -#define atomic_sub(i, v) (void) atomic_sub_return(i, v) - -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags; @@ -196,57 +220,46 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } - -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ - unsigned long flags; - - raw_local_irq_save(flags); - *addr &= ~mask; - raw_local_irq_restore(flags); -} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg #endif /* __LINUX_ARM_ARCH__ */ -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) - c = old; - return c; -} +ATOMIC_OPS(add, +=, add) +ATOMIC_OPS(sub, -=, sub) -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) +#define arch_atomic_andnot arch_atomic_andnot -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) -#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(andnot, &= ~, bic) +ATOMIC_OPS(or, |=, orr) +ATOMIC_OPS(xor, ^=, eor) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { - u64 __aligned(8) counter; + s64 counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE -static inline u64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { - u64 result; + s64 result; __asm__ __volatile__("@ atomic64_read\n" " ldrd %0, %H0, [%1]" @@ -257,7 +270,7 @@ static inline u64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { __asm__ __volatile__("@ atomic64_set\n" " strd %2, %H2, [%1]" @@ -266,9 +279,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) ); } #else -static inline u64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { - u64 result; + s64 result; __asm__ __volatile__("@ atomic64_read\n" " ldrexd %0, %H0, [%1]" @@ -279,10 +292,11 @@ static inline u64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { - u64 tmp; + s64 tmp; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_set\n" "1: ldrexd %0, %H0, [%2]\n" " strexd %0, %3, %H3, [%2]\n" @@ -294,92 +308,112 @@ static inline void atomic64_set(atomic64_t *v, u64 i) } #endif -static inline void atomic64_add(u64 i, atomic64_t *v) -{ - u64 result; - unsigned long tmp; - - __asm__ __volatile__("@ atomic64_add\n" -"1: ldrexd %0, %H0, [%3]\n" -" adds %0, %0, %4\n" -" adc %H0, %H0, %H4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); +#define ATOMIC64_OP(op, op1, op2) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ +{ \ + s64 result; \ + unsigned long tmp; \ + \ + prefetchw(&v->counter); \ + __asm__ __volatile__("@ atomic64_" #op "\n" \ +"1: ldrexd %0, %H0, [%3]\n" \ +" " #op1 " %Q0, %Q0, %Q4\n" \ +" " #op2 " %R0, %R0, %R4\n" \ +" strexd %1, %0, %H0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ +} \ + +#define ATOMIC64_OP_RETURN(op, op1, op2) \ +static inline s64 \ +arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ +{ \ + s64 result; \ + unsigned long tmp; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic64_" #op "_return\n" \ +"1: ldrexd %0, %H0, [%3]\n" \ +" " #op1 " %Q0, %Q0, %Q4\n" \ +" " #op2 " %R0, %R0, %R4\n" \ +" strexd %1, %0, %H0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ + \ + return result; \ } -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) -{ - u64 result; - unsigned long tmp; - - smp_mb(); - - __asm__ __volatile__("@ atomic64_add_return\n" -"1: ldrexd %0, %H0, [%3]\n" -" adds %0, %0, %4\n" -" adc %H0, %H0, %H4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); - - smp_mb(); - - return result; +#define ATOMIC64_FETCH_OP(op, op1, op2) \ +static inline s64 \ +arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ +{ \ + s64 result, val; \ + unsigned long tmp; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ +"1: ldrexd %0, %H0, [%4]\n" \ +" " #op1 " %Q1, %Q0, %Q5\n" \ +" " #op2 " %R1, %R0, %R5\n" \ +" strexd %2, %1, %H1, [%4]\n" \ +" teq %2, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ + \ + return result; \ } -static inline void atomic64_sub(u64 i, atomic64_t *v) -{ - u64 result; - unsigned long tmp; +#define ATOMIC64_OPS(op, op1, op2) \ + ATOMIC64_OP(op, op1, op2) \ + ATOMIC64_OP_RETURN(op, op1, op2) \ + ATOMIC64_FETCH_OP(op, op1, op2) - __asm__ __volatile__("@ atomic64_sub\n" -"1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, %4\n" -" sbc %H0, %H0, %H4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); -} +ATOMIC64_OPS(add, adds, adc) +ATOMIC64_OPS(sub, subs, sbc) -static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) -{ - u64 result; - unsigned long tmp; +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed - smp_mb(); +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op, op1, op2) \ + ATOMIC64_OP(op, op1, op2) \ + ATOMIC64_FETCH_OP(op, op1, op2) - __asm__ __volatile__("@ atomic64_sub_return\n" -"1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, %4\n" -" sbc %H0, %H0, %H4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); +#define arch_atomic64_andnot arch_atomic64_andnot - smp_mb(); +ATOMIC64_OPS(and, and, and) +ATOMIC64_OPS(andnot, bic, bic) +ATOMIC64_OPS(or, orr, orr) +ATOMIC64_OPS(xor, eor, eor) - return result; -} +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed -static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) { - u64 oldval; + s64 oldval; unsigned long res; - smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -393,17 +427,16 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) : "cc"); } while (res); - smp_mb(); - return oldval; } +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed -static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) { - u64 result; + s64 result; unsigned long tmp; - smp_mb(); + prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" @@ -414,23 +447,23 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) : "r" (&ptr->counter), "r" (new) : "cc"); - smp_mb(); - return result; } +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed -static inline u64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { - u64 result; + s64 result; unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, #1\n" -" sbc %H0, %H0, #0\n" -" teq %H0, #0\n" +" subs %Q0, %Q0, #1\n" +" sbc %R0, %R0, #0\n" +" teq %R0, #0\n" " bmi 2f\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" @@ -444,46 +477,37 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) return result; } +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - u64 val; + s64 oldval, newval; unsigned long tmp; - int ret = 1; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" " teq %0, %5\n" " teqeq %H0, %H5\n" -" moveq %1, #0\n" " beq 2f\n" -" adds %0, %0, %6\n" -" adc %H0, %H0, %H6\n" -" strexd %2, %0, %H0, [%4]\n" +" adds %Q1, %Q0, %Q6\n" +" adc %R1, %R0, %R6\n" +" strexd %2, %1, %H1, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" - : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); - if (ret) + if (oldval != u) smp_mb(); - return ret; + return oldval; } - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif |
