diff options
Diffstat (limited to 'arch/xtensa/include/asm/atomic.h')
| -rw-r--r-- | arch/xtensa/include/asm/atomic.h | 200 |
1 files changed, 132 insertions, 68 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 7de0149e1cf7..7308b7f777d7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -15,29 +15,25 @@ #include <linux/stringify.h> #include <linux/types.h> - -#ifdef __KERNEL__ #include <asm/processor.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> -#define ATOMIC_INIT(i) { (i) } - /* * This Xtensa implementation assumes that the right mechanism * for exclusion is for locking interrupts to level EXCM_LEVEL. * * Locking interrupts looks like this: * - * rsil a15, TOPLEVEL + * rsil a14, TOPLEVEL * <code> - * wsr a15, PS + * wsr a14, PS * rsync * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -47,7 +43,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) /** * atomic_set - set atomic variable @@ -56,42 +52,42 @@ * * Atomically sets the value of @v to @i. */ -#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) -#if XCHAL_HAVE_S32C1I +#if XCHAL_HAVE_EXCLUSIVE #define ATOMIC_OP(op) \ -static inline void atomic_##op(int i, atomic_t * v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ } \ #define ATOMIC_OP_RETURN(op) \ -static inline int atomic_##op##_return(int i, atomic_t * v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - " " #op " %0, %0, %2\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ \ @@ -99,19 +95,82 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ } #define ATOMIC_FETCH_OP(op) \ -static inline int atomic_fetch_##op(int i, atomic_t * v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ + : "memory" \ + ); \ + \ + return tmp; \ +} + +#elif XCHAL_HAVE_S32C1I +#define ATOMIC_OP(op) \ +static inline void arch_atomic_##op(int i, atomic_t * v) \ { \ unsigned long tmp; \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + " " #op " %[result], %[result], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "memory" \ + ); \ + \ + return result; \ +} + +#define ATOMIC_FETCH_OP(op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %[tmp], %[mem]\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[mem]\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ : "memory" \ ); \ \ @@ -121,58 +180,59 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ #else /* XCHAL_HAVE_S32C1I */ #define ATOMIC_OP(op) \ -static inline void atomic_##op(int i, atomic_t * v) \ +static inline void arch_atomic_##op(int i, atomic_t * v) \ { \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(TOPLEVEL)"\n"\ - " l32i %0, %2, 0\n" \ - " " #op " %0, %0, %1\n" \ - " s32i %0, %2, 0\n" \ - " wsr a15, ps\n" \ + " rsil a14, "__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[mem]\n" \ + " wsr a14, ps\n" \ " rsync\n" \ - : "=&a" (vval) \ - : "a" (i), "a" (v) \ - : "a15", "memory" \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ ); \ } \ #define ATOMIC_OP_RETURN(op) \ -static inline int atomic_##op##_return(int i, atomic_t * v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ { \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %0, %2, 0\n" \ - " " #op " %0, %0, %1\n" \ - " s32i %0, %2, 0\n" \ - " wsr a15, ps\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[mem]\n" \ + " wsr a14, ps\n" \ " rsync\n" \ - : "=&a" (vval) \ - : "a" (i), "a" (v) \ - : "a15", "memory" \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ ); \ \ return vval; \ } #define ATOMIC_FETCH_OP(op) \ -static inline int atomic_fetch_##op(int i, atomic_t * v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ { \ unsigned int tmp, vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %0, %3, 0\n" \ - " " #op " %1, %0, %2\n" \ - " s32i %1, %3, 0\n" \ - " wsr a15, ps\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[mem]\n" \ + " " #op " %[tmp], %[result], %[i]\n" \ + " s32i %[tmp], %[mem]\n" \ + " wsr a14, ps\n" \ " rsync\n" \ - : "=&a" (vval), "=&a" (tmp) \ - : "a" (i), "a" (v) \ - : "a15", "memory" \ + : [result] "=&a" (vval), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ + : "a14", "memory" \ ); \ \ return vval; \ @@ -185,6 +245,11 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -192,14 +257,13 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -#endif /* __KERNEL__ */ - #endif /* _XTENSA_ATOMIC_H */ |
