diff options
Diffstat (limited to 'arch/parisc/include/asm/atomic.h')
| -rw-r--r-- | arch/parisc/include/asm/atomic.h | 288 |
1 files changed, 134 insertions, 154 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 472886ceab1d..d4f023887ff8 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> */ @@ -7,6 +8,7 @@ #include <linux/types.h> #include <asm/cmpxchg.h> +#include <asm/barrier.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -32,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) @@ -54,24 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; * are atomic, so a reader never sees inconsistent values. */ -/* It's possible to reduce all atomic operations to either - * __atomic_add_return, atomic_set and atomic_read (the latter - * is there only for consistency). - */ - -static __inline__ int __atomic_add_return(int i, atomic_t *v) -{ - int ret; - unsigned long flags; - _atomic_spin_lock_irqsave(v, flags); - - ret = (v->counter += i); - - _atomic_spin_unlock_irqrestore(v, flags); - return ret; -} - -static __inline__ void atomic_set(atomic_t *v, int i) +static __inline__ void arch_atomic_set(atomic_t *v, int i) { unsigned long flags; _atomic_spin_lock_irqsave(v, flags); @@ -81,175 +66,170 @@ static __inline__ void atomic_set(atomic_t *v, int i) _atomic_spin_unlock_irqrestore(v, flags); } -static __inline__ int atomic_read(const atomic_t *v) +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) + +static __inline__ int arch_atomic_read(const atomic_t *v) { - return (*(volatile int *)&(v)->counter); + return READ_ONCE((v)->counter); } -/* exported interface */ -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define ATOMIC_OP(op, c_op) \ +static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + v->counter c_op i; \ + _atomic_spin_unlock_irqrestore(v, flags); \ +} -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; +#define ATOMIC_OP_RETURN(op, c_op) \ +static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + ret = (v->counter c_op i); \ + _atomic_spin_unlock_irqrestore(v, flags); \ + \ + return ret; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + ret = v->counter; \ + v->counter c_op i; \ + _atomic_spin_unlock_irqrestore(v, flags); \ + \ + return ret; \ +} -#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) -#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) -#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) -#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) +#define ATOMIC_OPS(op, c_op) \ + ATOMIC_OP(op, c_op) \ + ATOMIC_OP_RETURN(op, c_op) \ + ATOMIC_FETCH_OP(op, c_op) -#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) -#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) -#define atomic_inc_return(v) (__atomic_add_return( 1,(v))) -#define atomic_dec_return(v) (__atomic_add_return( -1,(v))) +ATOMIC_OPS(add, +=) +ATOMIC_OPS(sub, -=) -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op) \ + ATOMIC_OP(op, c_op) \ + ATOMIC_FETCH_OP(op, c_op) -#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) +ATOMIC_OPS(and, &=) +ATOMIC_OPS(or, |=) +ATOMIC_OPS(xor, ^=) -#define ATOMIC_INIT(i) { (i) } +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } -static __inline__ s64 -__atomic64_add_return(s64 i, atomic64_t *v) -{ - s64 ret; - unsigned long flags; - _atomic_spin_lock_irqsave(v, flags); +#define ATOMIC64_OP(op, c_op) \ +static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \ +{ \ + unsigned long flags; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + v->counter c_op i; \ + _atomic_spin_unlock_irqrestore(v, flags); \ +} - ret = (v->counter += i); +#define ATOMIC64_OP_RETURN(op, c_op) \ +static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \ +{ \ + unsigned long flags; \ + s64 ret; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + ret = (v->counter c_op i); \ + _atomic_spin_unlock_irqrestore(v, flags); \ + \ + return ret; \ +} - _atomic_spin_unlock_irqrestore(v, flags); - return ret; +#define ATOMIC64_FETCH_OP(op, c_op) \ +static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ +{ \ + unsigned long flags; \ + s64 ret; \ + \ + _atomic_spin_lock_irqsave(v, flags); \ + ret = v->counter; \ + v->counter c_op i; \ + _atomic_spin_unlock_irqrestore(v, flags); \ + \ + return ret; \ } -static __inline__ void -atomic64_set(atomic64_t *v, s64 i) -{ - unsigned long flags; - _atomic_spin_lock_irqsave(v, flags); +#define ATOMIC64_OPS(op, c_op) \ + ATOMIC64_OP(op, c_op) \ + ATOMIC64_OP_RETURN(op, c_op) \ + ATOMIC64_FETCH_OP(op, c_op) - v->counter = i; +ATOMIC64_OPS(add, +=) +ATOMIC64_OPS(sub, -=) - _atomic_spin_unlock_irqrestore(v, flags); -} +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub -static __inline__ s64 -atomic64_read(const atomic64_t *v) -{ - return (*(volatile long *)&(v)->counter); -} +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op, c_op) \ + ATOMIC64_OP(op, c_op) \ + ATOMIC64_FETCH_OP(op, c_op) -#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) -#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) -#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) -#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) +ATOMIC64_OPS(and, &=) +ATOMIC64_OPS(or, |=) +ATOMIC64_OPS(xor, ^=) -#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) -#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) -#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) -#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) +static __inline__ void +arch_atomic64_set(atomic64_t *v, s64 i) +{ + unsigned long flags; + _atomic_spin_lock_irqsave(v, flags); -/* exported interface */ -#define atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + v->counter = i; -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); + _atomic_spin_unlock_irqrestore(v, flags); } -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i)) -/* - * atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static __inline__ s64 +arch_atomic64_read(const atomic64_t *v) { - long c, old, dec; - c = atomic64_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; + return READ_ONCE((v)->counter); } #endif /* !CONFIG_64BIT */ |
