diff options
Diffstat (limited to 'include/linux/atomic.h')
| -rw-r--r-- | include/linux/atomic.h | 167 |
1 files changed, 60 insertions, 107 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -1,131 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Atomic operations usable in machine independent code */ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H +#include <linux/types.h> + #include <asm/atomic.h> +#include <asm/barrier.h> -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. +/* + * Relaxed variants of xchg, cmpxchg and some atomic operations. * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - return __atomic_add_unless(v, a, u) != u; -} - -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t + * We support four variants: * - * Atomically increments @v by 1, so long as @v is non-zero. - * Returns non-zero if @v was non-zero, and zero otherwise. - */ -#ifndef atomic_inc_not_zero -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#endif - -/** - * atomic_inc_not_zero_hint - increment if not null - * @v: pointer of type atomic_t - * @hint: probable value of the atomic before the increment + * - Fully ordered: The default implementation, no suffix required. + * - Acquire: Provides ACQUIRE semantics, _acquire suffix. + * - Release: Provides RELEASE semantics, _release suffix. + * - Relaxed: No ordering guarantees, _relaxed suffix. * - * This version of atomic_inc_not_zero() gives a hint of probable - * value of the atomic. This helps processor to not read the memory - * before doing the atomic read/modify/write cycle, lowering - * number of bus transactions on some arches. + * For compound atomics performing both a load and a store, ACQUIRE + * semantics apply only to the load and RELEASE semantics only to the + * store portion of the operation. Note that a failed cmpxchg_acquire + * does -not- imply any memory ordering constraints. * - * Returns: 0 if increment was not done, 1 otherwise. + * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ -#ifndef atomic_inc_not_zero_hint -static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) -{ - int val, c = hint; - /* sanity test, should be removed by compiler if hint is a constant */ - if (!hint) - return atomic_inc_not_zero(v); +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - do { - val = atomic_cmpxchg(v, c, c + 1); - if (val == c) - return 1; - c = val; - } while (c); +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - return 0; -} +/* + * The idea here is to build acquire/release variants by adding explicit + * barriers on top of the relaxed variant. In the case where the relaxed + * variant is already fully ordered, no additional barriers are needed. + * + * If an architecture overrides __atomic_acquire_fence() it will probably + * want to define smp_mb__after_spinlock(). + */ +#ifndef __atomic_acquire_fence +#define __atomic_acquire_fence smp_mb__after_atomic #endif -#ifndef atomic_inc_unless_negative -static inline int atomic_inc_unless_negative(atomic_t *p) -{ - int v, v1; - for (v = 0; v >= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v + 1); - if (likely(v1 == v)) - return 1; - } - return 0; -} +#ifndef __atomic_release_fence +#define __atomic_release_fence smp_mb__before_atomic #endif -#ifndef atomic_dec_unless_positive -static inline int atomic_dec_unless_positive(atomic_t *p) -{ - int v, v1; - for (v = 0; v <= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v - 1); - if (likely(v1 == v)) - return 1; - } - return 0; -} +#ifndef __atomic_pre_full_fence +#define __atomic_pre_full_fence smp_mb__before_atomic #endif -/* - * atomic_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -#ifndef atomic_dec_if_positive -static inline int atomic_dec_if_positive(atomic_t *v) -{ - int c, old, dec; - c = atomic_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} +#ifndef __atomic_post_full_fence +#define __atomic_post_full_fence smp_mb__after_atomic #endif -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -static inline void atomic_or(int i, atomic_t *v) -{ - int old; - int new; +#define __atomic_op_acquire(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ + __atomic_acquire_fence(); \ + __ret; \ +}) - do { - old = atomic_read(v); - new = old | i; - } while (atomic_cmpxchg(v, old, new) != old); -} -#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ +#define __atomic_op_release(op, args...) \ +({ \ + __atomic_release_fence(); \ + op##_relaxed(args); \ +}) + +#define __atomic_op_fence(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret; \ + __atomic_pre_full_fence(); \ + __ret = op##_relaxed(args); \ + __atomic_post_full_fence(); \ + __ret; \ +}) + +#include <linux/atomic/atomic-arch-fallback.h> +#include <linux/atomic/atomic-long.h> +#include <linux/atomic/atomic-instrumented.h> -#include <asm-generic/atomic-long.h> -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> -#endif #endif /* _LINUX_ATOMIC_H */ |
