summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/verifier
diff options
context:
space:
mode:
authorBrendan Jackman <jackmanb@google.com>2021-02-16 12:53:07 +0000
committerDaniel Borkmann <daniel@iogearbox.net>2021-02-22 18:03:11 +0100
commitb29dd96b905f3dd543f4ca729447286adf934dd6 (patch)
tree3c54d592962495bd1a2638fccb8f4ffce5e0a94b /tools/testing/selftests/bpf/verifier
parent3a2eb515d1367c0f667b76089a6e727279c688b8 (diff)
bpf, x86: Fix BPF_FETCH atomic and/or/xor with r0 as src
This code generates a CMPXCHG loop in order to implement atomic_fetch bitwise operations. Because CMPXCHG is hard-coded to use rax (which holds the BPF r0 value), it saves the _real_ r0 value into the internal "ax" temporary register and restores it once the loop is complete. In the middle of the loop, the actual bitwise operation is performed using src_reg. The bug occurs when src_reg is r0: as described above, r0 has been clobbered and the real r0 value is in the ax register. Therefore, perform this operation on the ax register instead, when src_reg is r0. Fixes: 981f94c3e921 ("bpf: Add bitwise atomic instructions") Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: KP Singh <kpsingh@kernel.org> Link: https://lore.kernel.org/bpf/20210216125307.1406237-1-jackmanb@google.com
Diffstat (limited to 'tools/testing/selftests/bpf/verifier')
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_and.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/verifier/atomic_and.c b/tools/testing/selftests/bpf/verifier/atomic_and.c
index 1bdc8e6684f7..fe4bb70eb9c5 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_and.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_and.c
@@ -75,3 +75,26 @@
},
.result = ACCEPT,
},
+{
+ "BPF_ATOMIC_AND with fetch - r0 as source reg",
+ .insns = {
+ /* val = 0x110; */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
+ /* old = atomic_fetch_and(&val, 0x011); */
+ BPF_MOV64_IMM(BPF_REG_0, 0x011),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
+ /* if (old != 0x110) exit(3); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ /* if (val != 0x010) exit(2); */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},