summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h17
-rw-r--r--arch/arm/include/asm/atomic.h15
-rw-r--r--arch/arm/include/asm/sync_bitops.h29
3 files changed, 53 insertions, 8 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 505a306e0271..aebe2c8f6a68 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
#endif
.endm
+/*
+ * Raw SMP data memory barrier
+ */
+ .macro __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ dmb ish
+ .else
+ W(dmb) ish
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#else
+ .error "Incompatible SMP platform"
+#endif
+ .endm
+
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index db8512d9a918..f0e3b01afa74 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -197,6 +197,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -210,8 +220,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#endif /* __LINUX_ARM_ARCH__ */
@@ -240,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
s64 counter;
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 6f5d627c44a3..f46b3c570f92 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
+/*
+ * Unordered
+ */
+
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define arch_sync_cmpxchg arch_cmpxchg
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_mb__before_atomic(); \
+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
+ __smp_mb__after_atomic(); \
+ __ret; \
+})
#endif