diff options
Diffstat (limited to 'arch/arm64/include/asm/atomic_lse.h')
-rw-r--r-- | arch/arm64/include/asm/atomic_lse.h | 353 |
1 files changed, 134 insertions, 219 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 574808b9df4c..87f568a94e55 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -11,12 +11,14 @@ #define __ASM_ATOMIC_LSE_H #define ATOMIC_OP(op, asm_op) \ -static inline void __lse_atomic_##op(int i, atomic_t *v) \ +static __always_inline void \ +__lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ -" " #asm_op " %w[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + __LSE_PREAMBLE \ + " " #asm_op " %w[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC_OP(andnot, stclr) @@ -24,18 +26,28 @@ ATOMIC_OP(or, stset) ATOMIC_OP(xor, steor) ATOMIC_OP(add, stadd) +static __always_inline void __lse_atomic_sub(int i, atomic_t *v) +{ + __lse_atomic_add(-i, v); +} + #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ + int old; \ + \ asm volatile( \ -" " #asm_op #mb " %w[i], %w[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + __LSE_PREAMBLE \ + " " #asm_op #mb " %w[i], %w[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ @@ -52,48 +64,50 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OPS -#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ +#define ATOMIC_FETCH_OP_SUB(name) \ +static __always_inline int \ +__lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ - u32 tmp; \ - \ - asm volatile( \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic_fetch_add##name(-i, v); \ +} + +ATOMIC_FETCH_OP_SUB(_relaxed) +ATOMIC_FETCH_OP_SUB(_acquire) +ATOMIC_FETCH_OP_SUB(_release) +ATOMIC_FETCH_OP_SUB( ) + +#undef ATOMIC_FETCH_OP_SUB + +#define ATOMIC_OP_ADD_SUB_RETURN(name) \ +static __always_inline int \ +__lse_atomic_add_return##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static __always_inline int \ +__lse_atomic_sub_return##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_sub(i, v) - i; \ } -ATOMIC_OP_ADD_RETURN(_relaxed, ) -ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC_OP_ADD_RETURN(_release, l, "memory") -ATOMIC_OP_ADD_RETURN( , al, "memory") +ATOMIC_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC_OP_ADD_SUB_RETURN(_acquire) +ATOMIC_OP_ADD_SUB_RETURN(_release) +ATOMIC_OP_ADD_SUB_RETURN( ) -#undef ATOMIC_OP_ADD_RETURN +#undef ATOMIC_OP_ADD_SUB_RETURN -static inline void __lse_atomic_and(int i, atomic_t *v) +static __always_inline void __lse_atomic_and(int i, atomic_t *v) { - asm volatile( - " mvn %w[i], %w[i]\n" - " stclr %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic_andnot(~i, v); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ - asm volatile( \ - " mvn %w[i], %w[i]\n" \ - " ldclr" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic_fetch_andnot##name(~i, v); \ } ATOMIC_FETCH_OP_AND(_relaxed, ) @@ -103,65 +117,15 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void __lse_atomic_sub(int i, atomic_t *v) -{ - asm volatile( - " neg %w[i], %w[i]\n" - " stadd %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ -{ \ - u32 tmp; \ - \ - asm volatile( \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_OP_SUB_RETURN(_relaxed, ) -ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC_OP_SUB_RETURN(_release, l, "memory") -ATOMIC_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC_OP_SUB_RETURN - -#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ -{ \ - asm volatile( \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_FETCH_OP_SUB(_relaxed, ) -ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC_FETCH_OP_SUB(_release, l, "memory") -ATOMIC_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC_FETCH_OP_SUB - #define ATOMIC64_OP(op, asm_op) \ -static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ +static __always_inline void \ +__lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ -" " #asm_op " %[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + __LSE_PREAMBLE \ + " " #asm_op " %[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC64_OP(andnot, stclr) @@ -169,18 +133,28 @@ ATOMIC64_OP(or, stset) ATOMIC64_OP(xor, steor) ATOMIC64_OP(add, stadd) +static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v) +{ + __lse_atomic64_add(-i, v); +} + #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ +static __always_inline long \ +__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ + s64 old; \ + \ asm volatile( \ -" " #asm_op #mb " %[i], %[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + __LSE_PREAMBLE \ + " " #asm_op #mb " %[i], %[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ @@ -197,48 +171,50 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OPS -#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ -static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ +#define ATOMIC64_FETCH_OP_SUB(name) \ +static __always_inline long \ +__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ - unsigned long tmp; \ - \ - asm volatile( \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic64_fetch_add##name(-i, v); \ +} + +ATOMIC64_FETCH_OP_SUB(_relaxed) +ATOMIC64_FETCH_OP_SUB(_acquire) +ATOMIC64_FETCH_OP_SUB(_release) +ATOMIC64_FETCH_OP_SUB( ) + +#undef ATOMIC64_FETCH_OP_SUB + +#define ATOMIC64_OP_ADD_SUB_RETURN(name) \ +static __always_inline long \ +__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \ +{ \ + return __lse_atomic64_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static __always_inline long \ +__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ +{ \ + return __lse_atomic64_fetch_sub##name(i, v) - i; \ } -ATOMIC64_OP_ADD_RETURN(_relaxed, ) -ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC64_OP_ADD_RETURN(_release, l, "memory") -ATOMIC64_OP_ADD_RETURN( , al, "memory") +ATOMIC64_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC64_OP_ADD_SUB_RETURN(_acquire) +ATOMIC64_OP_ADD_SUB_RETURN(_release) +ATOMIC64_OP_ADD_SUB_RETURN( ) -#undef ATOMIC64_OP_ADD_RETURN +#undef ATOMIC64_OP_ADD_SUB_RETURN -static inline void __lse_atomic64_and(s64 i, atomic64_t *v) +static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v) { - asm volatile( - " mvn %[i], %[i]\n" - " stclr %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic64_andnot(~i, v); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ +static __always_inline long \ +__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ - asm volatile( \ - " mvn %[i], %[i]\n" \ - " ldclr" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic64_fetch_andnot##name(~i, v); \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) @@ -248,63 +224,12 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) -{ - asm volatile( - " neg %[i], %[i]\n" - " stadd %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ -{ \ - unsigned long tmp; \ - \ - asm volatile( \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_OP_SUB_RETURN(_relaxed, ) -ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC64_OP_SUB_RETURN(_release, l, "memory") -ATOMIC64_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC64_OP_SUB_RETURN - -#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ -{ \ - asm volatile( \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_FETCH_OP_SUB(_relaxed, ) -ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC64_FETCH_OP_SUB(_release, l, "memory") -ATOMIC64_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC64_FETCH_OP_SUB - -static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) +static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { unsigned long tmp; asm volatile( + __LSE_PREAMBLE "1: ldr %x[tmp], %[v]\n" " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" @@ -326,21 +251,15 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ u##sz old, \ u##sz new) \ { \ - register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ - register u##sz x1 asm ("x1") = old; \ - register u##sz x2 asm ("x2") = new; \ - unsigned long tmp; \ - \ asm volatile( \ - " mov %" #w "[tmp], %" #w "[old]\n" \ - " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ - " mov %" #w "[ret], %" #w "[tmp]" \ - : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \ - [tmp] "=&r" (tmp) \ - : [old] "r" (x1), [new] "r" (x2) \ + __LSE_PREAMBLE \ + " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \ + : [v] "+Q" (*(u##sz *)ptr), \ + [old] "+r" (old) \ + : [new] "rZ" (new) \ : cl); \ \ - return x0; \ + return old; \ } __CMPXCHG_CASE(w, b, , 8, ) @@ -362,39 +281,35 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __CMPXCHG_CASE -#define __CMPXCHG_DBL(name, mb, cl...) \ -static __always_inline long \ -__lse__cmpxchg_double##name(unsigned long old1, \ - unsigned long old2, \ - unsigned long new1, \ - unsigned long new2, \ - volatile void *ptr) \ +#define __CMPXCHG128(name, mb, cl...) \ +static __always_inline u128 \ +__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \ { \ - unsigned long oldval1 = old1; \ - unsigned long oldval2 = old2; \ - register unsigned long x0 asm ("x0") = old1; \ - register unsigned long x1 asm ("x1") = old2; \ - register unsigned long x2 asm ("x2") = new1; \ - register unsigned long x3 asm ("x3") = new2; \ + union __u128_halves r, o = { .full = (old) }, \ + n = { .full = (new) }; \ + register unsigned long x0 asm ("x0") = o.low; \ + register unsigned long x1 asm ("x1") = o.high; \ + register unsigned long x2 asm ("x2") = n.low; \ + register unsigned long x3 asm ("x3") = n.high; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile( \ + __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ - " eor %[old1], %[old1], %[oldval1]\n" \ - " eor %[old2], %[old2], %[oldval2]\n" \ - " orr %[old1], %[old1], %[old2]" \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ - [v] "+Q" (*(unsigned long *)ptr) \ + [v] "+Q" (*(u128 *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ - [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ + [oldval1] "r" (o.low), [oldval2] "r" (o.high) \ : cl); \ \ - return x0; \ + r.low = x0; r.high = x1; \ + \ + return r.full; \ } -__CMPXCHG_DBL( , ) -__CMPXCHG_DBL(_mb, al, "memory") +__CMPXCHG128( , ) +__CMPXCHG128(_mb, al, "memory") -#undef __CMPXCHG_DBL +#undef __CMPXCHG128 #endif /* __ASM_ATOMIC_LSE_H */ |