diff options
author | Alexandre Ghiti <alexghiti@rivosinc.com> | 2024-11-03 15:51:43 +0100 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2024-11-11 07:33:10 -0800 |
commit | 38acdee32d23f789e866488c99867fd497d43c86 (patch) | |
tree | f64aabe557031bb7e65be738a935858583227530 /arch/riscv/include/asm/cmpxchg.h | |
parent | af042c457db07db4bc1baa5c22d089cab69cfc5b (diff) |
riscv: Implement cmpxchg32/64() using Zacas
This adds runtime support for Zacas in cmpxchg operations.
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Andrea Parri <parri.andrea@gmail.com>
Link: https://lore.kernel.org/r/20241103145153.105097-4-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/include/asm/cmpxchg.h')
-rw-r--r-- | arch/riscv/include/asm/cmpxchg.h | 48 |
1 files changed, 31 insertions, 17 deletions
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ac1d7df898ef..39c1daf39f6a 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -12,6 +12,7 @@ #include <asm/fence.h> #include <asm/hwcap.h> #include <asm/insn-def.h> +#include <asm/cpufeature-macros.h> #define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ ({ \ @@ -137,24 +138,37 @@ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ }) -#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \ +#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ ({ \ - register unsigned int __rc; \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr" lr_sfx " %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc" sc_sfx " %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ - : "rJ" (co o), "rJ" (n) \ - : "memory"); \ + __asm__ __volatile__ ( \ + prepend \ + " amocas" sc_cas_sfx " %0, %z2, %1\n" \ + append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + register unsigned int __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ + : "rJ" (co o), "rJ" (n) \ + : "memory"); \ + } \ }) -#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \ +#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __old = (old); \ @@ -164,15 +178,15 @@ switch (sizeof(*__ptr)) { \ case 1: \ case 2: \ - __arch_cmpxchg_masked(sc_sfx, prepend, append, \ + __arch_cmpxchg_masked(sc_cas_sfx, prepend, append, \ __ret, __ptr, __old, __new); \ break; \ case 4: \ - __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ + __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ + __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ __ret, __ptr, /**/, __old, __new); \ break; \ default: \ |