diff options
Diffstat (limited to 'arch/xtensa/include/asm/atomic.h')
| -rw-r--r-- | arch/xtensa/include/asm/atomic.h | 531 |
1 files changed, 201 insertions, 330 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e7fb447bce8e..7308b7f777d7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -15,12 +15,9 @@ #include <linux/stringify.h> #include <linux/types.h> - -#ifdef __KERNEL__ #include <asm/processor.h> #include <asm/cmpxchg.h> - -#define ATOMIC_INIT(i) { (i) } +#include <asm/barrier.h> /* * This Xtensa implementation assumes that the right mechanism @@ -28,15 +25,15 @@ * * Locking interrupts looks like this: * - * rsil a15, LOCKLEVEL + * rsil a14, TOPLEVEL * <code> - * wsr a15, PS + * wsr a14, PS * rsync * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -46,7 +43,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) /** * atomic_set - set atomic variable @@ -55,344 +52,218 @@ * * Atomically sets the value of @v to @i. */ -#define atomic_set(v,i) ((v)->counter = (i)) - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static inline void atomic_add(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " add %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " add %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); -#endif +#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) + +#if XCHAL_HAVE_EXCLUSIVE +#define ATOMIC_OP(op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ + : "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ + : "memory" \ + ); \ + \ + return result; \ } -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static inline void atomic_sub(int i, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " sub %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " sub %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); -#endif +#define ATOMIC_FETCH_OP(op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ + : "memory" \ + ); \ + \ + return tmp; \ } -/* - * We use atomic_{add|sub}_return to define other functions. - */ - -static inline int atomic_add_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " add %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - " add %0, %0, %2\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); - - return result; -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " add %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; -#endif +#elif XCHAL_HAVE_S32C1I +#define ATOMIC_OP(op) \ +static inline void arch_atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + " " #op " %[result], %[result], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "memory" \ + ); \ + \ + return result; \ } -static inline int atomic_sub_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " sub %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - " sub %0, %0, %2\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); - - return result; -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " sub %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; -#endif +#define ATOMIC_FETCH_OP(op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "memory" \ + ); \ + \ + return result; \ } -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -#define atomic_inc(v) atomic_add(1,(v)) - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -#define atomic_inc_return(v) atomic_add_return(1,(v)) - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -#define atomic_dec(v) atomic_sub(1,(v)) - -/** - * atomic_dec_return - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -#define atomic_dec_return(v) atomic_sub_return(1,(v)) - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0) - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0) - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) - -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; +#else /* XCHAL_HAVE_S32C1I */ + +#define ATOMIC_OP(op) \ +static inline void arch_atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned int vval; \ + \ + __asm__ __volatile__( \ + " rsil a14, "__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[mem]\n" \ + " wsr a14, ps\n" \ + " rsync\n" \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned int vval; \ + \ + __asm__ __volatile__( \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[mem]\n" \ + " wsr a14, ps\n" \ + " rsync\n" \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ + ); \ + \ + return vval; \ } +#define ATOMIC_FETCH_OP(op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ +{ \ + unsigned int tmp, vval; \ + \ + __asm__ __volatile__( \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[tmp], %[result], %[i]\n" \ + " s32i %[tmp], %[mem]\n" \ + " wsr a14, ps\n" \ + " rsync\n" \ + : [result] "=&a" (vval), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ + ); \ + \ + return vval; \ +} -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; +#endif /* XCHAL_HAVE_S32C1I */ - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (~mask), "a" (v) - : "memory" - ); -#else - unsigned int all_f = -1; - unsigned int vval; +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op) - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " xor %1, %4, %3\n" - " and %0, %0, %4\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); -#endif -} +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (mask), "a" (v) - : "memory" - ); -#else - unsigned int vval; +#undef ATOMIC_OPS +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " or %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); -#endif -} +ATOMIC_OPS(and) +ATOMIC_OPS(or) +ATOMIC_OPS(xor) -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor -#endif /* __KERNEL__ */ +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #endif /* _XTENSA_ATOMIC_H */ |
