diff options
Diffstat (limited to 'arch/x86/include/asm/atomic.h')
| -rw-r--r-- | arch/x86/include/asm/atomic.h | 300 |
1 files changed, 100 insertions, 200 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 722aa3b04624..75743f1dfd4e 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -1,272 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_ATOMIC_H #define _ASM_X86_ATOMIC_H #include <linux/compiler.h> #include <linux/types.h> -#include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> +#include <asm/rmwcc.h> +#include <asm/barrier.h> /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - -/** - * atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ -static inline int atomic_read(const atomic_t *v) +static __always_inline int arch_atomic_read(const atomic_t *v) { - return (*(volatile int *)&(v)->counter); + /* + * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here, + * it's non-inlined function that increases binary size and stack usage. + */ + return __READ_ONCE((v)->counter); } -/** - * atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ -static inline void atomic_set(atomic_t *v, int i) +static __always_inline void arch_atomic_set(atomic_t *v, int i) { - v->counter = i; + __WRITE_ONCE(v->counter, i); } -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static inline void atomic_add(int i, atomic_t *v) +static __always_inline void arch_atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" + asm_inline volatile(LOCK_PREFIX "addl %1, %0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } -/** - * atomic_sub - subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static inline void atomic_sub(int i, atomic_t *v) +static __always_inline void arch_atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm_inline volatile(LOCK_PREFIX "subl %1, %0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static inline int atomic_sub_and_test(int i, atomic_t *v) +static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; + return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); } +#define arch_atomic_sub_and_test arch_atomic_sub_and_test -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static inline void atomic_inc(atomic_t *v) +static __always_inline void arch_atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" - : "+m" (v->counter)); + asm_inline volatile(LOCK_PREFIX "incl %0" + : "+m" (v->counter) :: "memory"); } +#define arch_atomic_inc arch_atomic_inc -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static inline void atomic_dec(atomic_t *v) +static __always_inline void arch_atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" - : "+m" (v->counter)); + asm_inline volatile(LOCK_PREFIX "decl %0" + : "+m" (v->counter) :: "memory"); } +#define arch_atomic_dec arch_atomic_dec -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static inline int atomic_dec_and_test(atomic_t *v) +static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { - unsigned char c; - - asm volatile(LOCK_PREFIX "decl %0; sete %1" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; + return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); } +#define arch_atomic_dec_and_test arch_atomic_dec_and_test -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static inline int atomic_inc_and_test(atomic_t *v) +static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { - unsigned char c; + return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test - asm volatile(LOCK_PREFIX "incl %0; sete %1" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; +static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) +{ + return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); } +#define arch_atomic_add_negative arch_atomic_add_negative -/** - * atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static inline int atomic_add_negative(int i, atomic_t *v) +static __always_inline int arch_atomic_add_return(int i, atomic_t *v) { - unsigned char c; + return i + xadd(&v->counter, i); +} +#define arch_atomic_add_return arch_atomic_add_return - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; +#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v) + +static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) +{ + return xadd(&v->counter, i); } +#define arch_atomic_fetch_add arch_atomic_fetch_add -/** - * atomic_add_return - add integer and return - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns @i + @v - */ -static inline int atomic_add_return(int i, atomic_t *v) +#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v) + +static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { - return i + xadd(&v->counter, i); + return arch_cmpxchg(&v->counter, old, new); } +#define arch_atomic_cmpxchg arch_atomic_cmpxchg -/** - * atomic_sub_return - subtract integer and return - * @v: pointer of type atomic_t - * @i: integer value to subtract - * - * Atomically subtracts @i from @v and returns @v - @i - */ -static inline int atomic_sub_return(int i, atomic_t *v) +static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - return atomic_add_return(-i, v); + return arch_try_cmpxchg(&v->counter, old, new); } +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) +static __always_inline int arch_atomic_xchg(atomic_t *v, int new) +{ + return arch_xchg(&v->counter, new); +} +#define arch_atomic_xchg arch_atomic_xchg -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static __always_inline void arch_atomic_and(int i, atomic_t *v) { - return cmpxchg(&v->counter, old, new); + asm_inline volatile(LOCK_PREFIX "andl %1, %0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); } -static inline int atomic_xchg(atomic_t *v, int new) +static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v) { - return xchg(&v->counter, new); + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i)); + + return val; } +#define arch_atomic_fetch_and arch_atomic_fetch_and -/** - * __atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns the old value of @v. - */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline void arch_atomic_or(int i, atomic_t *v) { - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; + asm_inline volatile(LOCK_PREFIX "orl %1, %0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); } -/** - * atomic_inc_short - increment of a short integer - * @v: pointer to type int - * - * Atomically adds 1 to @v - * Returns the new value of @u - */ -static inline short int atomic_inc_short(short int *v) +static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v) { - asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); - return *v; + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); + + return val; } +#define arch_atomic_fetch_or arch_atomic_fetch_or -#ifdef CONFIG_X86_64 -/** - * atomic_or_long - OR of two long integers - * @v1: pointer to type unsigned long - * @v2: pointer to type unsigned long - * - * Atomically ORs @v1 and @v2 - * Returns the result of the OR - */ -static inline void atomic_or_long(unsigned long *v1, unsigned long v2) +static __always_inline void arch_atomic_xor(int i, atomic_t *v) { - asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); + asm_inline volatile(LOCK_PREFIX "xorl %1, %0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); } -#endif -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "andl %0,%1" \ - : : "r" (~(mask)), "m" (*(addr)) : "memory") - -#define atomic_set_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "orl %0,%1" \ - : : "r" ((unsigned)(mask)), "m" (*(addr)) \ - : "memory") - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i)); + + return val; +} +#define arch_atomic_fetch_xor arch_atomic_fetch_xor #ifdef CONFIG_X86_32 # include <asm/atomic64_32.h> |
