summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/atomic_lse.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-04-23 20:08:49 +0100
committerWill Deacon <will.deacon@arm.com>2015-07-27 15:28:51 +0100
commitc342f78217e822d2178265b0b1de232eeb717149 (patch)
tree2c1de7284bb6671aa9cb4e8a0f85a119d64de3a2 /arch/arm64/include/asm/atomic_lse.h
parentc8366ba0fb65063b6b4f69c7af1ea74152435590 (diff)
arm64: cmpxchg: patch in lse instructions when supported by the CPU
On CPUs which support the LSE atomic instructions introduced in ARMv8.1, it makes sense to use them in preference to ll/sc sequences. This patch introduces runtime patching of our cmpxchg primitives so that the LSE cas instruction is used instead. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/atomic_lse.h')
-rw-r--r--arch/arm64/include/asm/atomic_lse.h39
1 files changed, 39 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 6e21b5e0c9d6..b39ae4c1451a 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -349,4 +349,43 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#undef __LL_SC_ATOMIC64
+#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
+
+#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new) \
+{ \
+ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
+ register unsigned long x1 asm ("x1") = old; \
+ register unsigned long x2 asm ("x2") = new; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ "nop\n" \
+ __LL_SC_CMPXCHG(name) \
+ "nop", \
+ /* LSE atomics */ \
+ " mov " #w "30, %" #w "[old]\n" \
+ " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
+ " mov %" #w "[ret], " #w "30") \
+ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
+ : [old] "r" (x1), [new] "r" (x2) \
+ : "x30" , ##cl); \
+ \
+ return x0; \
+}
+
+__CMPXCHG_CASE(w, b, 1, )
+__CMPXCHG_CASE(w, h, 2, )
+__CMPXCHG_CASE(w, , 4, )
+__CMPXCHG_CASE(x, , 8, )
+__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+__CMPXCHG_CASE(w, , mb_4, al, "memory")
+__CMPXCHG_CASE(x, , mb_8, al, "memory")
+
+#undef __LL_SC_CMPXCHG
+#undef __CMPXCHG_CASE
+
#endif /* __ASM_ATOMIC_LSE_H */