summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXu Kuohai <xukuohai@huawei.com>2022-03-21 11:28:51 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2022-04-01 00:27:48 +0200
commitf516420f683d147d8f4cfd83bbc7c3c6ad1c61b5 (patch)
tree386121ba68e9330c55b61efc84d6fd385a2c468a
parent5b3d19b9bd4080d7f5e260f91ce8f639e19eb499 (diff)
bpf, tests: Add tests for BPF_LDX/BPF_STX with different offsets
This patch adds tests to verify the behavior of BPF_LDX/BPF_STX + BPF_B/BPF_H/BPF_W/BPF_DW with negative offset, small positive offset, large positive offset, and misaligned offset. Tested on both big-endian and little-endian arm64 qemu, result: test_bpf: Summary: 1026 PASSED, 0 FAILED, [1014/1014 JIT'ed]'] test_bpf: test_tail_calls: Summary: 8 PASSED, 0 FAILED, [8/8 JIT'ed] test_bpf: test_skb_segment: Summary: 2 PASSED, 0 FAILED Signed-off-by: Xu Kuohai <xukuohai@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220321152852.2334294-5-xukuohai@huawei.com
-rw-r--r--lib/test_bpf.c285
1 files changed, 281 insertions, 4 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0c5cb2d6436a..aa0c7c68b2be 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -53,6 +53,7 @@
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = {
},
/* BPF_LDX_MEM B/H/W/DW */
{
- "BPF_LDX_MEM | BPF_B",
+ "BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
@@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_H",
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
@@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_W",
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
@@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
.stack_depth = 8,
},
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)