diff options
Diffstat (limited to 'arch/sparc/include/asm/cmpxchg_64.h')
| -rw-r--r-- | arch/sparc/include/asm/cmpxchg_64.h | 104 |
1 files changed, 83 insertions, 21 deletions
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 4adefe8e2885..3de25262c411 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* 64-bit atomic xchg() and cmpxchg() definitions. * * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) @@ -6,6 +7,17 @@ #ifndef __ARCH_SPARC64_CMPXCHG__ #define __ARCH_SPARC64_CMPXCHG__ +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + __asm__ __volatile__("cas [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; @@ -40,14 +52,47 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long return val; } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define arch_xchg(ptr,x) \ +({ __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __arch_xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) + +void __xchg_called_with_bad_pointer(void); -extern void __xchg_called_with_bad_pointer(void); +/* + * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order. + */ +static inline unsigned long +xchg16(__volatile__ unsigned short *m, unsigned short val) +{ + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 2) ^ 2) << 3; + unsigned int mask = 0xffff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~2); + unsigned int old32, new32, load32; + + /* Read the old value */ + load32 = *ptr; + + do { + old32 = load32; + new32 = (load32 & (~mask)) | val << bit_shift; + load32 = __cmpxchg_u32(ptr, old32, new32); + } while (load32 != old32); + + return (load32 & mask) >> bit_shift; +} -static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, - int size) +static __always_inline unsigned long +__arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { + case 2: + return xchg16(ptr, x); case 4: return xchg32(ptr, x); case 8: @@ -65,12 +110,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, #include <asm-generic/cmpxchg-local.h> -#define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) { - __asm__ __volatile__("cas [%2], %3, %0" + __asm__ __volatile__("casx [%2], %3, %0" : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); @@ -78,25 +122,43 @@ __cmpxchg_u32(volatile int *m, int old, int new) return new; } +/* + * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order + */ static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) { - __asm__ __volatile__("casx [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 3) ^ 3) << 3; + unsigned int mask = 0xff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~3); + unsigned int old32, new32, load; + unsigned int load32 = *ptr; + + do { + new32 = (load32 & ~mask) | (new << bit_shift); + old32 = (load32 & ~mask) | (old << bit_shift); + load32 = __cmpxchg_u32(ptr, old32, new32); + if (load32 == old32) + return old; + load = (load32 & mask) >> bit_shift; + } while (load == old); + + return load; } /* This function doesn't exist, so you'll get a linker error if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); +void __cmpxchg_called_with_bad_pointer(void); static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { + case 1: + return __cmpxchg_u8(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); case 8: @@ -106,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) return old; } -#define cmpxchg(ptr,o,n) \ +#define arch_cmpxchg(ptr,o,n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -127,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, case 4: case 8: return __cmpxchg(ptr, old, new, size); default: - return __cmpxchg_local_generic(ptr, old, new, size); + return __generic_cmpxchg_local(ptr, old, new, size); } return old; } -#define cmpxchg_local(ptr, o, n) \ +#define arch_cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + arch_cmpxchg_local((ptr), (o), (n)); \ }) -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) #endif /* __ARCH_SPARC64_CMPXCHG__ */ |
