summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/atomic.h')
-rw-r--r--arch/x86/include/asm/atomic.h260
1 files changed, 91 insertions, 169 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b871463..75743f1dfd4e 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_ATOMIC_H
#define _ASM_X86_ATOMIC_H
@@ -13,238 +14,159 @@
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
- return READ_ONCE((v)->counter);
+ /*
+ * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
+ * it's non-inlined function that increases binary size and stack usage.
+ */
+ return __READ_ONCE((v)->counter);
}
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __always_inline void atomic_add(int i, atomic_t *v)
+static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "addl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}
-/**
- * atomic_sub - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __always_inline void atomic_sub(int i, atomic_t *v)
+static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "subl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "subl %1, %0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __always_inline void atomic_inc(atomic_t *v)
+static __always_inline void arch_atomic_inc(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "incl %0"
- : "+m" (v->counter));
+ asm_inline volatile(LOCK_PREFIX "incl %0"
+ : "+m" (v->counter) :: "memory");
}
+#define arch_atomic_inc arch_atomic_inc
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-static __always_inline void atomic_dec(atomic_t *v)
+static __always_inline void arch_atomic_dec(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "decl %0"
- : "+m" (v->counter));
+ asm_inline volatile(LOCK_PREFIX "decl %0"
+ : "+m" (v->counter) :: "memory");
}
+#define arch_atomic_dec arch_atomic_dec
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool atomic_dec_and_test(atomic_t *v)
+static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool atomic_inc_and_test(atomic_t *v)
+static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
}
+#define arch_atomic_add_negative arch_atomic_add_negative
-/**
- * atomic_add_return - add integer and return
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __always_inline int atomic_add_return(int i, atomic_t *v)
+static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic_add_return arch_atomic_add_return
-/**
- * atomic_sub_return - subtract integer and return
- * @v: pointer of type atomic_t
- * @i: integer value to subtract
- *
- * Atomically subtracts @i from @v and returns @v - @i
- */
-static __always_inline int atomic_sub_return(int i, atomic_t *v)
+#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
+
+static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
- return atomic_add_return(-i, v);
+ return xadd(&v->counter, i);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
-static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
- return xadd(&v->counter, i);
+ return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- return xadd(&v->counter, -i);
+ return arch_try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return cmpxchg(&v->counter, old, new);
+ return arch_xchg(&v->counter, new);
}
+#define arch_atomic_xchg arch_atomic_xchg
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
- return try_cmpxchg(&v->counter, old, new);
+ asm_inline volatile(LOCK_PREFIX "andl %1, %0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
}
-static inline int atomic_xchg(atomic_t *v, int new)
+static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
- return xchg(&v->counter, new);
-}
+ int val = arch_atomic_read(v);
+
+ do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
-#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- asm volatile(LOCK_PREFIX #op"l %1,%0" \
- : "+m" (v->counter) \
- : "ir" (i) \
- : "memory"); \
+ return val;
}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- int val = atomic_read(v); \
- do { \
- } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
- return val; \
+static __always_inline void arch_atomic_or(int i, atomic_t *v)
+{
+ asm_inline volatile(LOCK_PREFIX "orl %1, %0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
}
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op) \
- ATOMIC_FETCH_OP(op, c_op)
+static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int val = arch_atomic_read(v);
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+ do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+ return val;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
-/**
- * __atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline void arch_atomic_xor(int i, atomic_t *v)
+{
+ asm_inline volatile(LOCK_PREFIX "xorl %1, %0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
+}
+
+static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
- int c = atomic_read(v);
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
- return c;
+ int val = arch_atomic_read(v);
+
+ do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
+
+ return val;
}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>