summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2019-10-04 12:28:43 +0200
committerVasily Gorbik <gor@linux.ibm.com>2019-10-04 16:37:33 +0200
commitb4fd5a0a9295d87da629c72378a7f4967a277030 (patch)
treeaf454e0b4a48d597480cdda245aef45206717dc3 /arch
parent51ce02216d4ad4e8f6a58de81d6e803cf04c418e (diff)
s390/atomic,bitops: mark function(s) __always_inline
Always inline asm inlines with variable operands for "i" constraints, since they won't compile if the compiler would decide to not inline them. Reported-by: Michal Kubecek <mkubecek@suse.cz> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/atomic_ops.h2
-rw-r--r--arch/s390/include/asm/bitops.h8
2 files changed, 5 insertions, 5 deletions
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index d3f09526ee19..61467b9eecc7 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -41,7 +41,7 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OP
#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
-static inline void op_name(op_type val, op_type *ptr) \
+static __always_inline void op_name(op_type val, op_type *ptr) \
{ \
asm volatile( \
op_string " %[ptr],%[val]\n" \
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index b8833ac983fa..eb7eed43e780 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -56,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}
-static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
@@ -77,7 +77,7 @@ static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
__atomic64_or(mask, (long *)addr);
}
-static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
@@ -98,8 +98,8 @@ static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
__atomic64_and(mask, (long *)addr);
}
-static inline void arch_change_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline void arch_change_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;