From d12157efc8e083c77d054675fcdd594f54cc7e2b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Jun 2023 08:01:01 +0100 Subject: locking/atomic: make atomic*_{cmp,}xchg optional Most architectures define the atomic/atomic64 xchg and cmpxchg operations in terms of arch_xchg and arch_cmpxchg respectfully. Add fallbacks for these cases and remove the trivial cases from arch code. On some architectures the existing definitions are kept as these are used to build other arch_atomic*() operations. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230605070124.3741859-5-mark.rutland@arm.com --- arch/ia64/include/asm/atomic.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 266c429b9137..6540a628d257 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -207,13 +207,6 @@ ATOMIC64_FETCH_OP(xor, ^) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - -#define arch_atomic64_cmpxchg(v, old, new) \ - (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v)) #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v)) -- cgit