summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-01-24 22:13:03 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-06-11 08:03:24 +0200
commit37f8173dd84936ea78000ed1cad24f8b18d48ebb (patch)
tree0b715066a7f5c16a71988e176627c46b61481b3c /arch
parent765dcd209947e7b3666c08fb109ab8b879f7a471 (diff)
locking/atomics: Flip fallbacks and instrumentation
Currently instrumentation of atomic primitives is done at the architecture level, while composites or fallbacks are provided at the generic level. The result is that there are no uninstrumented variants of the fallbacks. Since there is now need of such variants to isolate text poke from any form of instrumentation invert this ordering. Doing this means moving the instrumentation into the generic code as well as having (for now) two variants of the fallbacks. Notes: - the various *cond_read* primitives are not proper fallbacks and got moved into linux/atomic.c. No arch_ variants are generated because the base primitives smp_cond_load*() are instrumented. - once all architectures are moved over to arch_atomic_ one of the fallback variants can be removed and some 2300 lines reclaimed. - atomic_{read,set}*() are no longer double-instrumented Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Mark Rutland <mark.rutland@arm.com> Link: https://lkml.kernel.org/r/20200505134058.769149955@linutronix.de
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/atomic.h6
-rw-r--r--arch/x86/include/asm/atomic.h17
-rw-r--r--arch/x86/include/asm/atomic64_32.h9
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
4 files changed, 37 insertions, 10 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9543b5e0534d..a08890da696c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define ATOMIC_INIT(i) { (i) }
-#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 115127c7ad28..a9ae58826074 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic_add_return arch_atomic_add_return
/**
* arch_atomic_sub_return - subtract integer and return
@@ -178,32 +179,37 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
+#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic_xchg arch_atomic_xchg
static inline void arch_atomic_and(int i, atomic_t *v)
{
@@ -221,6 +227,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
static inline void arch_atomic_or(int i, atomic_t *v)
{
@@ -238,6 +245,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
static inline void arch_atomic_xor(int i, atomic_t *v)
{
@@ -255,6 +263,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
@@ -262,6 +271,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 52cfaecb13f9..5efd01b548d1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
/**
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
+#define arch_atomic64_xchg arch_atomic64_xchg
/**
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_add_return arch_atomic64_add_return
/*
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 95c6ceac66b9..809bd010a751 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */