summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/bitops.h
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-10-04 17:53:12 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-10-18 14:34:17 -0700
commit2a667285b53c58d72f8bdb736c040f0f36bff58a (patch)
tree910a25dfd4786d6a5a770332bee8d95a3e22068f /arch/riscv/include/asm/bitops.h
parent51a752c28bcf901618bbc25a43f84ef539f9e682 (diff)
riscv: implement xor_unlock_is_negative_byte
Inspired by the riscv clear_bit_unlock(), this will surely be more efficient than the generic one defined in filemap.c. Link: https://lkml.kernel.org/r/20231004165317.1061855-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/riscv/include/asm/bitops.h')
-rw-r--r--arch/riscv/include/asm/bitops.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 3540b690944b..15e3044298a2 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -191,6 +191,19 @@ static inline void __clear_bit_unlock(
clear_bit_unlock(nr, addr);
}
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
+{
+ unsigned long res;
+ __asm__ __volatile__ (
+ __AMO(xor) ".rl %0, %2, %1"
+ : "=r" (res), "+A" (*addr)
+ : "r" (__NOP(mask))
+ : "memory");
+ return (res & BIT(7)) != 0;
+}
+#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
+
#undef __test_and_op_bit
#undef __op_bit
#undef __NOP