summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm/atomic.h')
-rw-r--r--arch/riscv/include/asm/atomic.h72
1 files changed, 0 insertions, 72 deletions
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index bba472928b53..f5dfef6c2153 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -238,78 +238,6 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
-/*
- * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a full barrier.
- */
-#define ATOMIC_OP(c_t, prefix, size) \
-static __always_inline \
-c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
-{ \
- return __xchg_relaxed(&(v->counter), n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
-{ \
- return __xchg_acquire(&(v->counter), n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
-{ \
- return __xchg_release(&(v->counter), n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
-{ \
- return __arch_xchg(&(v->counter), n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
- c_t o, c_t n) \
-{ \
- return __cmpxchg_relaxed(&(v->counter), o, n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
- c_t o, c_t n) \
-{ \
- return __cmpxchg_acquire(&(v->counter), o, n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
- c_t o, c_t n) \
-{ \
- return __cmpxchg_release(&(v->counter), o, n, size); \
-} \
-static __always_inline \
-c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
-{ \
- return __cmpxchg(&(v->counter), o, n, size); \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS() \
- ATOMIC_OP(int, , 4)
-#else
-#define ATOMIC_OPS() \
- ATOMIC_OP(int, , 4) \
- ATOMIC_OP(s64, 64, 8)
-#endif
-
-ATOMIC_OPS()
-
-#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
-#define arch_atomic_xchg_release arch_atomic_xchg_release
-#define arch_atomic_xchg arch_atomic_xchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;