summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2021-04-13 11:23:30 +0200
committerHeiko Carstens <hca@linux.ibm.com>2021-04-15 17:47:41 +0200
commit13525f0a62cc258b2b2266478cc5fec0a45d1e71 (patch)
tree1b2356d157dd983e5d457bd6c53cb5431cf50ae4 /arch/s390
parent9d42a4d3e27db3cabad82483ed876d4c8b8bed65 (diff)
s390/cmpxchg: use unsigned long values instead of void pointers
gcc and clang warn about incompatible pointer types due to the recent cmpxchg changes: drivers/gpu/drm/drm_lock.c:75:10: error: passing 'typeof (lock)' (aka 'volatile unsigned int *') to parameter of type 'void *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers] prev = cmpxchg(lock, old, new); ^~~~~~~~~~~~~~~~~~~~~~~ include/asm-generic/atomic-instrumented.h:1685:2: note: expanded from macro 'cmpxchg' arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To avoid this simply cast pointers to unsigned long and use them instead of void pointers. This allows to stay with functions, instead of using complex defines and having to deal with all their potential side effects. Reported-by: kernel test robot <lkp@intel.com> Fixes: d2b1f6d2d350 ("s390/cmpxchg: get rid of gcc atomic builtins") Link: https://lore.kernel.org/linux-s390/202104130131.sMmSqpb5-lkp@intel.com/ Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/cmpxchg.h49
1 files changed, 23 insertions, 26 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 6def9722124e..6ae4e8a288a2 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -14,16 +14,15 @@
void __xchg_called_with_bad_pointer(void);
-static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
+static inline unsigned long __xchg(unsigned long x, unsigned long address, int size)
{
- unsigned long addr, old;
+ unsigned long old;
int shift;
switch (size) {
case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
asm volatile(
" l %0,%1\n"
"0: lr 0,%0\n"
@@ -31,14 +30,13 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" or 0,%2\n"
" cs %0,0,%1\n"
" jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) addr)
+ : "=&d" (old), "+Q" (*(int *) address)
: "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
: "memory", "cc", "0");
return old >> shift;
case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
asm volatile(
" l %0,%1\n"
"0: lr 0,%0\n"
@@ -46,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" or 0,%2\n"
" cs %0,0,%1\n"
" jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) addr)
+ : "=&d" (old), "+Q" (*(int *) address)
: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
: "memory", "cc", "0");
return old >> shift;
@@ -55,7 +53,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" l %0,%1\n"
"0: cs %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) ptr)
+ : "=&d" (old), "+Q" (*(int *) address)
: "d" (x)
: "memory", "cc");
return old;
@@ -64,7 +62,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" lg %0,%1\n"
"0: csg %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (old), "+S" (*(long *) ptr)
+ : "=&d" (old), "+S" (*(long *) address)
: "d" (x)
: "memory", "cc");
return old;
@@ -78,23 +76,23 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
__typeof__(*(ptr)) __ret; \
\
__ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr))); \
+ __xchg((unsigned long)(x), (unsigned long)(ptr), \
+ sizeof(*(ptr))); \
__ret; \
})
void __cmpxchg_called_with_bad_pointer(void);
-static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
+static inline unsigned long __cmpxchg(unsigned long address, unsigned long old,
unsigned long new, int size)
{
- unsigned long addr, prev, tmp;
+ unsigned long prev, tmp;
int shift;
switch (size) {
case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
asm volatile(
" l %0,%2\n"
"0: nr %0,%5\n"
@@ -107,16 +105,15 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
" nr %1,%5\n"
" jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
: "d" ((old & 0xff) << shift),
"d" ((new & 0xff) << shift),
"d" (~(0xff << shift))
: "memory", "cc");
return prev >> shift;
case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
asm volatile(
" l %0,%2\n"
"0: nr %0,%5\n"
@@ -129,7 +126,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
" nr %1,%5\n"
" jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
: "d" ((old & 0xffff) << shift),
"d" ((new & 0xffff) << shift),
"d" (~(0xffff << shift))
@@ -138,14 +135,14 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
case 4:
asm volatile(
" cs %0,%3,%1\n"
- : "=&d" (prev), "+Q" (*(int *) ptr)
+ : "=&d" (prev), "+Q" (*(int *) address)
: "0" (old), "d" (new)
: "memory", "cc");
return prev;
case 8:
asm volatile(
" csg %0,%3,%1\n"
- : "=&d" (prev), "+S" (*(long *) ptr)
+ : "=&d" (prev), "+S" (*(long *) address)
: "0" (old), "d" (new)
: "memory", "cc");
return prev;
@@ -159,7 +156,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
__typeof__(*(ptr)) __ret; \
\
__ret = (__typeof__(*(ptr))) \
- __cmpxchg((ptr), (unsigned long)(o), \
+ __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))); \
__ret; \
})