summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2024-12-04 12:30:58 +0100
committerAlexander Gordeev <agordeev@linux.ibm.com>2024-12-15 15:13:42 +0100
commitd809df72b5a583f9fa6e0a722e4e7cb8b28b19fc (patch)
treeff002f38df66a89348e80e3da6c0bef395710dea
parent7ad0075005088d931a39321dcbb5cb4df8801a37 (diff)
s390/atomic: Implement arch_atomic_inc() / arch_atomic_dec()
Implement arch_atomic_inc() / arch_atomic_dec() functions which result in a single instruction if compiled for z196 or newer architectures. Reduces the kernel image size by ~6K (defconfig): bloat-o-meter: add/remove: 0/0 grow/shrink: 12/1005 up/down: 106/-6404 (-6298) Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/atomic.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 6723fca64018..c1a4a06e8340 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -45,6 +45,18 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
}
#define arch_atomic_add arch_atomic_add
+static __always_inline void arch_atomic_inc(atomic_t *v)
+{
+ __atomic_add_const(1, &v->counter);
+}
+#define arch_atomic_inc arch_atomic_inc
+
+static __always_inline void arch_atomic_dec(atomic_t *v)
+{
+ __atomic_add_const(-1, &v->counter);
+}
+#define arch_atomic_dec arch_atomic_dec
+
#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
@@ -122,6 +134,18 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
}
#define arch_atomic64_add arch_atomic64_add
+static __always_inline void arch_atomic64_inc(atomic64_t *v)
+{
+ __atomic64_add_const(1, (long *)&v->counter);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+
+static __always_inline void arch_atomic64_dec(atomic64_t *v)
+{
+ __atomic64_add_const(-1, (long *)&v->counter);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+
static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);