summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/cmpxchg.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm/cmpxchg.h')
-rw-r--r--arch/riscv/include/asm/cmpxchg.h80
1 files changed, 70 insertions, 10 deletions
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index ddb002ed89de..ebbce134917c 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,9 +8,12 @@
#include <linux/bug.h>
+#include <asm/alternative-macros.h>
#include <asm/fence.h>
+#include <asm/hwcap.h>
+#include <asm/insn-def.h>
-#define __arch_xchg_masked(prepend, append, r, p, n) \
+#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
({ \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
@@ -25,7 +28,7 @@
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
- " sc.w %1, %1, %2\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
@@ -46,7 +49,8 @@
: "memory"); \
})
-#define _arch_xchg(ptr, new, sfx, prepend, append) \
+#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \
+ sc_append, swap_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __new = (new); \
@@ -55,15 +59,15 @@
switch (sizeof(*__ptr)) { \
case 1: \
case 2: \
- __arch_xchg_masked(prepend, append, \
+ __arch_xchg_masked(sc_sfx, prepend, sc_append, \
__ret, __ptr, __new); \
break; \
case 4: \
- __arch_xchg(".w" sfx, prepend, append, \
+ __arch_xchg(".w" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
case 8: \
- __arch_xchg(".d" sfx, prepend, append, \
+ __arch_xchg(".d" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
default: \
@@ -73,16 +77,17 @@
})
#define arch_xchg_relaxed(ptr, x) \
- _arch_xchg(ptr, x, "", "", "")
+ _arch_xchg(ptr, x, "", "", "", "", "")
#define arch_xchg_acquire(ptr, x) \
- _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
+ _arch_xchg(ptr, x, "", "", "", \
+ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
- _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
+ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
#define arch_xchg(ptr, x) \
- _arch_xchg(ptr, x, ".aqrl", "", "")
+ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
#define xchg32(ptr, x) \
({ \
@@ -221,4 +226,59 @@
arch_cmpxchg_release((ptr), (o), (n)); \
})
+#ifdef CONFIG_RISCV_ISA_ZAWRS
+/*
+ * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to
+ * @val we expect it to still terminate within a "reasonable" amount of time
+ * for an implementation-specific other reason, a pending, locally-enabled
+ * interrupt, or because it has been configured to raise an illegal
+ * instruction exception.
+ */
+static __always_inline void __cmpwait(volatile void *ptr,
+ unsigned long val,
+ int size)
+{
+ unsigned long tmp;
+
+ asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
+ 0, RISCV_ISA_EXT_ZAWRS, 1)
+ : : : : no_zawrs);
+
+ switch (size) {
+ case 4:
+ asm volatile(
+ " lr.w %0, %1\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(u32 *)ptr)
+ : "r" (val));
+ break;
+#if __riscv_xlen == 64
+ case 8:
+ asm volatile(
+ " lr.d %0, %1\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(u64 *)ptr)
+ : "r" (val));
+ break;
+#endif
+ default:
+ BUILD_BUG();
+ }
+
+ return;
+
+no_zawrs:
+ asm volatile(RISCV_PAUSE : : : "memory");
+}
+
+#define __cmpwait_relaxed(ptr, val) \
+ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
+#endif
+
#endif /* _ASM_RISCV_CMPXCHG_H */