summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm/barrier.h
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2018-08-05 13:30:31 -0400
committerHelge Deller <deller@gmx.de>2018-08-08 22:13:32 +0200
commitfedb8da96355f5f64353625bf96dc69423ad1826 (patch)
tree36c305623c2a88863801b2f6b5f407af392099eb /arch/parisc/include/asm/barrier.h
parent66509a276c8c1d19ee3f661a41b418d101c57d29 (diff)
parisc: Define mb() and add memory barriers to assembler unlock sequences
For years I thought all parisc machines executed loads and stores in order. However, Jeff Law recently indicated on gcc-patches that this is not correct. There are various degrees of out-of-order execution all the way back to the PA7xxx processor series (hit-under-miss). The PA8xxx series has full out-of-order execution for both integer operations, and loads and stores. This is described in the following article: http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml For this reason, we need to define mb() and to insert a memory barrier before the store unlocking spinlocks. This ensures that all memory accesses are complete prior to unlocking. The ldcw instruction performs the same function on entry. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: stable@vger.kernel.org # 4.0+ Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/include/asm/barrier.h')
-rw-r--r--arch/parisc/include/asm/barrier.h32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 000000000000..dbaaca84f27f
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+ which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb() do { synchronize_caches(); } while (0)
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() mb()
+#define __smp_rmb() mb()
+#define __smp_wmb() mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */