summaryrefslogtreecommitdiff
path: root/arch/xtensa/include/asm/cmpxchg.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm/cmpxchg.h')
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h26
1 files changed, 14 insertions, 12 deletions
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 3699e2818efb..b6db4838b175 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -11,10 +11,11 @@
#ifndef _XTENSA_CMPXCHG_H
#define _XTENSA_CMPXCHG_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bits.h>
#include <linux/stringify.h>
+#include <linux/cmpxchg-emu.h>
/*
* cmpxchg
@@ -52,16 +53,16 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new;
#else
__asm__ __volatile__(
- " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " rsil a14, "__stringify(TOPLEVEL)"\n"
" l32i %[old], %[mem]\n"
" bne %[old], %[cmp], 1f\n"
" s32i %[new], %[mem]\n"
"1:\n"
- " wsr a15, ps\n"
+ " wsr a14, ps\n"
" rsync\n"
: [old] "=&a" (old), [mem] "+m" (*p)
: [cmp] "a" (old), [new] "r" (new)
- : "a15", "memory");
+ : "a14", "memory");
return old;
#endif
}
@@ -74,6 +75,7 @@ static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
+ case 1: return cmpxchg_emu_u8(ptr, old, new);
case 4: return __cmpxchg_u32(ptr, old, new);
default: __cmpxchg_called_with_bad_pointer();
return old;
@@ -116,10 +118,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
/*
* xchg_u32
*
- * Note that a15 is used here because the register allocation
+ * Note that a14 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using
- * a15 in the rsil, the machine is guaranteed to be in a state
+ * a14 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow.
*/
@@ -157,20 +159,20 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else
unsigned long tmp;
__asm__ __volatile__(
- " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " rsil a14, "__stringify(TOPLEVEL)"\n"
" l32i %[tmp], %[mem]\n"
" s32i %[val], %[mem]\n"
- " wsr a15, ps\n"
+ " wsr a14, ps\n"
" rsync\n"
: [tmp] "=&a" (tmp), [mem] "+m" (*m)
: [val] "a" (val)
- : "a15", "memory");
+ : "a14", "memory");
return tmp;
#endif
}
#define arch_xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+ ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
{
@@ -203,7 +205,7 @@ static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__xchg(unsigned long x, volatile void * ptr, int size)
+__arch_xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
@@ -218,6 +220,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
}
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _XTENSA_CMPXCHG_H */