diff options
Diffstat (limited to 'arch/x86/include/asm/atomic64_32.h')
| -rw-r--r-- | arch/x86/include/asm/atomic64_32.h | 394 |
1 files changed, 179 insertions, 215 deletions
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 71d7705fb303..ab838205c1c6 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_ATOMIC64_32_H #define _ASM_X86_ATOMIC64_32_H @@ -8,11 +9,37 @@ /* An 64bit atomic type */ typedef struct { - u64 __aligned(8) counter; + s64 __aligned(8) counter; } atomic64_t; #define ATOMIC64_INIT(val) { (val) } +/* + * Read an atomic64_t non-atomically. + * + * This is intended to be used in cases where a subsequent atomic operation + * will handle the torn value, and can be used to prime the first iteration + * of unconditional try_cmpxchg() loops, e.g.: + * + * s64 val = arch_atomic64_read_nonatomic(v); + * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i); + * + * This is NOT safe to use where the value is not always checked by a + * subsequent atomic operation, such as in conditional try_cmpxchg() loops + * that can break before the atomic operation, e.g.: + * + * s64 val = arch_atomic64_read_nonatomic(v); + * do { + * if (condition(val)) + * break; + * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i); + */ +static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v) +{ + /* See comment in arch_atomic_read(). */ + return __READ_ONCE(v->counter); +} + #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) #ifndef ATOMIC64_EXPORT #define ATOMIC64_DECL_ONE __ATOMIC64_DECL @@ -21,16 +48,20 @@ typedef struct { ATOMIC64_EXPORT(atomic64_##sym) #endif -#ifdef CONFIG_X86_CMPXCHG64 -#define __alternative_atomic64(f, g, out, in...) \ - asm volatile("call %P[func]" \ - : out : [func] "i" (atomic64_##g##_cx8), ## in) +#ifdef CONFIG_X86_CX8 +#define __alternative_atomic64(f, g, out, in, clobbers...) \ + asm volatile("call %c[func]" \ + : ALT_OUTPUT_SP(out) \ + : [func] "i" (atomic64_##g##_cx8) \ + COMMA(in) \ + : clobbers) #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8) #else -#define __alternative_atomic64(f, g, out, in...) \ - alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \ - X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in) +#define __alternative_atomic64(f, g, out, in, clobbers...) \ + alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \ + X86_FEATURE_CX8, ASM_OUTPUT(out), \ + ASM_INPUT(in), clobbers) #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \ ATOMIC64_DECL_ONE(sym##_386) @@ -41,8 +72,8 @@ ATOMIC64_DECL_ONE(inc_386); ATOMIC64_DECL_ONE(dec_386); #endif -#define alternative_atomic64(f, out, in...) \ - __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) +#define alternative_atomic64(f, out, in, clobbers...) \ + __alternative_atomic64(f, f, ASM_OUTPUT(out), ASM_INPUT(in), clobbers) ATOMIC64_DECL(read); ATOMIC64_DECL(set); @@ -60,289 +91,222 @@ ATOMIC64_DECL(add_unless); #undef __ATOMIC64_DECL #undef ATOMIC64_EXPORT -/** - * atomic64_cmpxchg - cmpxchg atomic64 variable - * @v: pointer to type atomic64_t - * @o: expected value - * @n: new value - * - * Atomically sets @v to @n if it was equal to @o and returns - * the old value. - */ +static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + return arch_cmpxchg64(&v->counter, old, new); +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg -static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - return cmpxchg64(&v->counter, o, n); + return arch_try_cmpxchg64(&v->counter, old, new); } +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg -/** - * atomic64_xchg - xchg atomic64 variable - * @v: pointer to type atomic64_t - * @n: value to assign - * - * Atomically xchgs the value of @v to @n and returns - * the old value. - */ -static inline long long atomic64_xchg(atomic64_t *v, long long n) +static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) { - long long o; + s64 o; unsigned high = (unsigned)(n >> 32); unsigned low = (unsigned)n; - alternative_atomic64(xchg, "=&A" (o), - "S" (v), "b" (low), "c" (high) - : "memory"); + alternative_atomic64(xchg, + "=&A" (o), + ASM_INPUT("S" (v), "b" (low), "c" (high)), + "memory"); return o; } +#define arch_atomic64_xchg arch_atomic64_xchg -/** - * atomic64_set - set atomic64 variable - * @v: pointer to type atomic64_t - * @i: value to assign - * - * Atomically sets the value of @v to @n. - */ -static inline void atomic64_set(atomic64_t *v, long long i) +static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) { unsigned high = (unsigned)(i >> 32); unsigned low = (unsigned)i; - alternative_atomic64(set, /* no output */, - "S" (v), "b" (low), "c" (high) - : "eax", "edx", "memory"); + alternative_atomic64(set, + /* no output */, + ASM_INPUT("S" (v), "b" (low), "c" (high)), + "eax", "edx", "memory"); } -/** - * atomic64_read - read atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically reads the value of @v and returns it. - */ -static inline long long atomic64_read(const atomic64_t *v) +static __always_inline s64 arch_atomic64_read(const atomic64_t *v) { - long long r; - alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); + s64 r; + alternative_atomic64(read, "=&A" (r), "c" (v), "memory"); return r; - } +} -/** - * atomic64_add_return - add and return - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns @i + *@v - */ -static inline long long atomic64_add_return(long long i, atomic64_t *v) +static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { alternative_atomic64(add_return, - ASM_OUTPUT2("+A" (i), "+c" (v)), - ASM_NO_INPUT_CLOBBER("memory")); + ASM_OUTPUT("+A" (i), "+c" (v)), + /* no input */, + "memory"); return i; } +#define arch_atomic64_add_return arch_atomic64_add_return -/* - * Other variants with different arithmetic operators: - */ -static inline long long atomic64_sub_return(long long i, atomic64_t *v) +static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) { alternative_atomic64(sub_return, - ASM_OUTPUT2("+A" (i), "+c" (v)), - ASM_NO_INPUT_CLOBBER("memory")); + ASM_OUTPUT("+A" (i), "+c" (v)), + /* no input */, + "memory"); return i; } +#define arch_atomic64_sub_return arch_atomic64_sub_return -static inline long long atomic64_inc_return(atomic64_t *v) +static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v) { - long long a; - alternative_atomic64(inc_return, "=&A" (a), - "S" (v) : "memory", "ecx"); + s64 a; + alternative_atomic64(inc_return, + "=&A" (a), + "S" (v), + "memory", "ecx"); return a; } +#define arch_atomic64_inc_return arch_atomic64_inc_return -static inline long long atomic64_dec_return(atomic64_t *v) +static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v) { - long long a; - alternative_atomic64(dec_return, "=&A" (a), - "S" (v) : "memory", "ecx"); + s64 a; + alternative_atomic64(dec_return, + "=&A" (a), + "S" (v), + "memory", "ecx"); return a; } +#define arch_atomic64_dec_return arch_atomic64_dec_return -/** - * atomic64_add - add integer to atomic64 variable - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v. - */ -static inline long long atomic64_add(long long i, atomic64_t *v) +static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) { __alternative_atomic64(add, add_return, - ASM_OUTPUT2("+A" (i), "+c" (v)), - ASM_NO_INPUT_CLOBBER("memory")); - return i; + ASM_OUTPUT("+A" (i), "+c" (v)), + /* no input */, + "memory"); } -/** - * atomic64_sub - subtract the atomic64 variable - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v. - */ -static inline long long atomic64_sub(long long i, atomic64_t *v) +static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v) { __alternative_atomic64(sub, sub_return, - ASM_OUTPUT2("+A" (i), "+c" (v)), - ASM_NO_INPUT_CLOBBER("memory")); - return i; + ASM_OUTPUT("+A" (i), "+c" (v)), + /* no input */, + "memory"); } -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static inline int atomic64_sub_and_test(long long i, atomic64_t *v) +static __always_inline void arch_atomic64_inc(atomic64_t *v) { - return atomic64_sub_return(i, v) == 0; + __alternative_atomic64(inc, inc_return, + /* no output */, + "S" (v), + "memory", "eax", "ecx", "edx"); } +#define arch_atomic64_inc arch_atomic64_inc -/** - * atomic64_inc - increment atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1. - */ -static inline void atomic64_inc(atomic64_t *v) +static __always_inline void arch_atomic64_dec(atomic64_t *v) { - __alternative_atomic64(inc, inc_return, /* no output */, - "S" (v) : "memory", "eax", "ecx", "edx"); + __alternative_atomic64(dec, dec_return, + /* no output */, + "S" (v), + "memory", "eax", "ecx", "edx"); } +#define arch_atomic64_dec arch_atomic64_dec -/** - * atomic64_dec - decrement atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1. - */ -static inline void atomic64_dec(atomic64_t *v) +static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - __alternative_atomic64(dec, dec_return, /* no output */, - "S" (v) : "memory", "eax", "ecx", "edx"); + unsigned low = (unsigned)u; + unsigned high = (unsigned)(u >> 32); + alternative_atomic64(add_unless, + ASM_OUTPUT("+A" (a), "+c" (low), "+D" (high)), + "S" (v), + "memory"); + return (int)a; } +#define arch_atomic64_add_unless arch_atomic64_add_unless -/** - * atomic64_dec_and_test - decrement and test - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static inline int atomic64_dec_and_test(atomic64_t *v) +static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v) { - return atomic64_dec_return(v) == 0; + int r; + alternative_atomic64(inc_not_zero, + "=&a" (r), + "S" (v), + "ecx", "edx", "memory"); + return r; } +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero -/** - * atomic64_inc_and_test - increment and test - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static inline int atomic64_inc_and_test(atomic64_t *v) +static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { - return atomic64_inc_return(v) == 0; + s64 r; + alternative_atomic64(dec_if_positive, + "=&A" (r), + "S" (v), + "ecx", "memory"); + return r; } +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -/** - * atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static inline int atomic64_add_negative(long long i, atomic64_t *v) +#undef alternative_atomic64 +#undef __alternative_atomic64 + +static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v) { - return atomic64_add_return(i, v) < 0; + s64 val = arch_atomic64_read_nonatomic(v); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); } -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if the add was done, zero otherwise. - */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) { - unsigned low = (unsigned)u; - unsigned high = (unsigned)(u >> 32); - alternative_atomic64(add_unless, - ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)), - "S" (v) : "memory"); - return (int)a; -} + s64 val = arch_atomic64_read_nonatomic(v); + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); -static inline int atomic64_inc_not_zero(atomic64_t *v) -{ - int r; - alternative_atomic64(inc_not_zero, "=&a" (r), - "S" (v) : "ecx", "edx", "memory"); - return r; + return val; } +#define arch_atomic64_fetch_and arch_atomic64_fetch_and -static inline long long atomic64_dec_if_positive(atomic64_t *v) +static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v) { - long long r; - alternative_atomic64(dec_if_positive, "=&A" (r), - "S" (v) : "ecx", "memory"); - return r; + s64 val = arch_atomic64_read_nonatomic(v); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); } -#undef alternative_atomic64 -#undef __alternative_atomic64 +static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read_nonatomic(v); -#define ATOMIC64_OP(op, c_op) \ -static inline void atomic64_##op(long long i, atomic64_t *v) \ -{ \ - long long old, c = 0; \ - while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ - c = old; \ + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); + + return val; } +#define arch_atomic64_fetch_or arch_atomic64_fetch_or + +static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read_nonatomic(v); -#define ATOMIC64_FETCH_OP(op, c_op) \ -static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ -{ \ - long long old, c = 0; \ - while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ - c = old; \ - return old; \ + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); } -ATOMIC64_FETCH_OP(add, +) +static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read_nonatomic(v); -#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); -#define ATOMIC64_OPS(op, c_op) \ - ATOMIC64_OP(op, c_op) \ - ATOMIC64_FETCH_OP(op, c_op) + return val; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -ATOMIC64_OPS(and, &) -ATOMIC64_OPS(or, |) -ATOMIC64_OPS(xor, ^) +static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read_nonatomic(v); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i)); + + return val; +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add -#undef ATOMIC64_OPS -#undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP +#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v)) #endif /* _ASM_X86_ATOMIC64_32_H */ |
