summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/atomic.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-06-21 13:13:15 +0100
committerIngo Molnar <mingo@kernel.org>2018-06-21 14:25:24 +0200
commit4f44b4b2b337e100ae655d03c42d140816c8caf9 (patch)
tree685c58793beddb8367cd055e27189703ca71f564 /arch/powerpc/include/asm/atomic.h
parentfee8ca9fa5659d8d79432d38c52fa18da67f1e71 (diff)
atomics/powerpc: Define atomic64_fetch_add_unless()
As a step towards unifying the atomic/atomic64/atomic_long APIs, this patch converts the arch/powerpc implementation of atomic64_add_unless() into an implementation of atomic64_fetch_add_unless(). A wrapper in <linux/atomic.h> will build atomic_add_unless() atop of this, provided it is given a preprocessor definition. No functional change is intended as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-13-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r--arch/powerpc/include/asm/atomic.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b5646c079c16..233dbf31911c 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -525,7 +525,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -533,13 +533,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # atomic_fetch_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
cmpd 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -552,8 +552,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero