summaryrefslogtreecommitdiff
path: root/samples/bpf
diff options
context:
space:
mode:
authorBrian Brooks <brian.brooks@linaro.org>2018-07-25 16:08:19 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2018-07-27 03:49:02 +0200
commit598135e7444c121f11c8c16495ba1e6ab122678f (patch)
tree6e13ac796fad3cad2a7a1662a8364c93e70bcf98 /samples/bpf
parent08a852528e9678f0854af331f19747f2b2a73c06 (diff)
samples/bpf: xdpsock: order memory on AArch64
Define u_smp_rmb() and u_smp_wmb() to respective barrier instructions. This ensures the processor will order accesses to queue indices against accesses to queue ring entries. Signed-off-by: Brian Brooks <brian.brooks@linaro.org> Acked-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'samples/bpf')
-rw-r--r--samples/bpf/xdpsock_user.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 5904b1543831..1e82f7c617c3 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -145,8 +145,13 @@ static void dump_stats(void);
} while (0)
#define barrier() __asm__ __volatile__("": : :"memory")
+#ifdef __aarch64__
+#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
+#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
+#else
#define u_smp_rmb() barrier()
#define u_smp_wmb() barrier()
+#endif
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)