summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-11-01 19:59:45 -0700
committerJakub Kicinski <kuba@kernel.org>2021-11-01 19:59:46 -0700
commitb7b98f868987cd3e86c9bd9a6db048614933d7a0 (patch)
tree8651e9d44726348ea56692d988b26c273e129c7a /lib
parent52fa3ee0cce60a04739f4a5ca1c9d5c2a8ee1578 (diff)
parent0b170456e0dda92b8925d40e217461fcc4e1efc9 (diff)
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-11-01 We've added 181 non-merge commits during the last 28 day(s) which contain a total of 280 files changed, 11791 insertions(+), 5879 deletions(-). The main changes are: 1) Fix bpf verifier propagation of 64-bit bounds, from Alexei. 2) Parallelize bpf test_progs, from Yucong and Andrii. 3) Deprecate various libbpf apis including af_xdp, from Andrii, Hengqi, Magnus. 4) Improve bpf selftests on s390, from Ilya. 5) bloomfilter bpf map type, from Joanne. 6) Big improvements to JIT tests especially on Mips, from Johan. 7) Support kernel module function calls from bpf, from Kumar. 8) Support typeless and weak ksym in light skeleton, from Kumar. 9) Disallow unprivileged bpf by default, from Pawan. 10) BTF_KIND_DECL_TAG support, from Yonghong. 11) Various bpftool cleanups, from Quentin. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (181 commits) libbpf: Deprecate AF_XDP support kbuild: Unify options for BTF generation for vmlinux and modules selftests/bpf: Add a testcase for 64-bit bounds propagation issue. bpf: Fix propagation of signed bounds from 64-bit min/max into 32-bit. bpf: Fix propagation of bounds from 64-bit min/max into 32-bit and var_off. selftests/bpf: Fix also no-alu32 strobemeta selftest bpf: Add missing map_delete_elem method to bloom filter map selftests/bpf: Add bloom map success test for userspace calls bpf: Add alignment padding for "map_extra" + consolidate holes bpf: Bloom filter map naming fixups selftests/bpf: Add test cases for struct_ops prog bpf: Add dummy BPF STRUCT_OPS for test purpose bpf: Factor out helpers for ctx access checking bpf: Factor out a helper to prepare trampoline for struct_ops prog selftests, bpf: Fix broken riscv build riscv, libbpf: Add RISC-V (RV64) support to bpf_tracing.h tools, build: Add RISC-V to HOSTARCH parsing riscv, bpf: Increase the maximum number of iterations selftests, bpf: Add one test for sockmap with strparser selftests, bpf: Fix test_txmsg_ingress_parser error ... ==================== Link: https://lore.kernel.org/r/20211102013123.9005-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_bpf.c332
1 files changed, 252 insertions, 80 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index b9fc330fc83b..adae39567264 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2134,7 +2134,7 @@ static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
* of the immediate value. This is often the case if the native instruction
* immediate field width is narrower than 32 bits.
*/
-static int bpf_fill_ld_imm64(struct bpf_test *self)
+static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
{
int block = 64; /* Increase for more tests per MSB position */
int len = 3 + 8 * 63 * block * 2;
@@ -2181,6 +2181,88 @@ static int bpf_fill_ld_imm64(struct bpf_test *self)
}
/*
+ * Test the two-instruction 64-bit immediate load operation for different
+ * combinations of bytes. Each byte in the 64-bit word is constructed as
+ * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
+ * All patterns (base1, mask1) and (base2, mask2) bytes are tested.
+ */
+static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
+ u8 base1, u8 mask1,
+ u8 base2, u8 mask2)
+{
+ struct bpf_insn *insn;
+ int len = 3 + 8 * BIT(8);
+ int pattern, index;
+ u32 rand = 1;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (pattern = 0; pattern < BIT(8); pattern++) {
+ u64 imm = 0;
+
+ for (index = 0; index < 8; index++) {
+ int byte;
+
+ if (pattern & BIT(index))
+ byte = (base1 & mask1) | (rand & ~mask1);
+ else
+ byte = (base2 & mask2) | (rand & ~mask2);
+ imm = (imm << 8) | byte;
+ }
+
+ /* Update our LCG */
+ rand = rand * 1664525 + 1013904223;
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
+}
+
+static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
+}
+
+static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
+}
+
+static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
+}
+
+/*
* Exhaustive tests of JMP operations for all combinations of power-of-two
* magnitudes of the operands, both for positive and negative values. The
* test is designed to verify e.g. the JMP and JMP32 operations for JITs that
@@ -12401,14 +12483,46 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_alu32_mod_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
- /* LD_IMM64 immediate magnitudes */
+ /* LD_IMM64 immediate magnitudes and byte patterns */
{
"LD_IMM64: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
- .fill_helper = bpf_fill_ld_imm64,
+ .fill_helper = bpf_fill_ld_imm64_magn,
+ },
+ {
+ "LD_IMM64: checker byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_checker,
+ },
+ {
+ "LD_IMM64: random positive and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_zero,
+ },
+ {
+ "LD_IMM64: random negative and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_neg_zero,
+ },
+ {
+ "LD_IMM64: random positive and negative byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_neg,
},
/* 64-bit ATOMIC register combinations */
{
@@ -14202,72 +14316,9 @@ module_param_string(test_name, test_name, sizeof(test_name), 0);
static int test_id = -1;
module_param(test_id, int, 0);
-static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+static int test_range[2] = { 0, INT_MAX };
module_param_array(test_range, int, NULL, 0);
-static __init int find_test_index(const char *test_name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (!strcmp(tests[i].descr, test_name))
- return i;
- }
- return -1;
-}
-
-static __init int prepare_bpf_tests(void)
-{
- if (test_id >= 0) {
- /*
- * if a test_id was specified, use test_range to
- * cover only that test.
- */
- if (test_id >= ARRAY_SIZE(tests)) {
- pr_err("test_bpf: invalid test_id specified.\n");
- return -EINVAL;
- }
-
- test_range[0] = test_id;
- test_range[1] = test_id;
- } else if (*test_name) {
- /*
- * if a test_name was specified, find it and setup
- * test_range to cover only that test.
- */
- int idx = find_test_index(test_name);
-
- if (idx < 0) {
- pr_err("test_bpf: no test named '%s' found.\n",
- test_name);
- return -EINVAL;
- }
- test_range[0] = idx;
- test_range[1] = idx;
- } else {
- /*
- * check that the supplied test_range is valid.
- */
- if (test_range[0] >= ARRAY_SIZE(tests) ||
- test_range[1] >= ARRAY_SIZE(tests) ||
- test_range[0] < 0 || test_range[1] < 0) {
- pr_err("test_bpf: test_range is out of bound.\n");
- return -EINVAL;
- }
-
- if (test_range[1] < test_range[0]) {
- pr_err("test_bpf: test_range is ending before it starts.\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static __init void destroy_bpf_tests(void)
-{
-}
-
static bool exclude_test(int test_id)
{
return test_id < test_range[0] || test_id > test_range[1];
@@ -14439,6 +14490,10 @@ static __init int test_skb_segment(void)
for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
const struct skb_segment_test *test = &skb_segment_tests[i];
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
pr_info("#%d %s ", i, test->descr);
if (test_skb_segment_single(test)) {
@@ -14820,6 +14875,8 @@ static __init int test_tail_calls(struct bpf_array *progs)
int ret;
cond_resched();
+ if (exclude_test(i))
+ continue;
pr_info("#%d %s ", i, test->descr);
if (!fp) {
@@ -14852,29 +14909,144 @@ static __init int test_tail_calls(struct bpf_array *progs)
return err_cnt ? -EINVAL : 0;
}
+static char test_suite[32];
+module_param_string(test_suite, test_suite, sizeof(test_suite), 0);
+
+static __init int find_test_index(const char *test_name)
+{
+ int i;
+
+ if (!strcmp(test_suite, "test_bpf")) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!strcmp(tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_tail_calls")) {
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ if (!strcmp(tail_call_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_skb_segment")) {
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ if (!strcmp(skb_segment_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static __init int prepare_test_range(void)
+{
+ int valid_range;
+
+ if (!strcmp(test_suite, "test_bpf"))
+ valid_range = ARRAY_SIZE(tests);
+ else if (!strcmp(test_suite, "test_tail_calls"))
+ valid_range = ARRAY_SIZE(tail_call_tests);
+ else if (!strcmp(test_suite, "test_skb_segment"))
+ valid_range = ARRAY_SIZE(skb_segment_tests);
+ else
+ return 0;
+
+ if (test_id >= 0) {
+ /*
+ * if a test_id was specified, use test_range to
+ * cover only that test.
+ */
+ if (test_id >= valid_range) {
+ pr_err("test_bpf: invalid test_id specified for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ test_range[0] = test_id;
+ test_range[1] = test_id;
+ } else if (*test_name) {
+ /*
+ * if a test_name was specified, find it and setup
+ * test_range to cover only that test.
+ */
+ int idx = find_test_index(test_name);
+
+ if (idx < 0) {
+ pr_err("test_bpf: no test named '%s' found for '%s' suite.\n",
+ test_name, test_suite);
+ return -EINVAL;
+ }
+ test_range[0] = idx;
+ test_range[1] = idx;
+ } else if (test_range[0] != 0 || test_range[1] != INT_MAX) {
+ /*
+ * check that the supplied test_range is valid.
+ */
+ if (test_range[0] < 0 || test_range[1] >= valid_range) {
+ pr_err("test_bpf: test_range is out of bound for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ if (test_range[1] < test_range[0]) {
+ pr_err("test_bpf: test_range is ending before it starts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __init test_bpf_init(void)
{
struct bpf_array *progs = NULL;
int ret;
- ret = prepare_bpf_tests();
+ if (strlen(test_suite) &&
+ strcmp(test_suite, "test_bpf") &&
+ strcmp(test_suite, "test_tail_calls") &&
+ strcmp(test_suite, "test_skb_segment")) {
+ pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite);
+ return -EINVAL;
+ }
+
+ /*
+ * if test_suite is not specified, but test_id, test_name or test_range
+ * is specified, set 'test_bpf' as the default test suite.
+ */
+ if (!strlen(test_suite) &&
+ (test_id != -1 || strlen(test_name) ||
+ (test_range[0] != 0 || test_range[1] != INT_MAX))) {
+ pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n");
+ strscpy(test_suite, "test_bpf", sizeof(test_suite));
+ }
+
+ ret = prepare_test_range();
if (ret < 0)
return ret;
- ret = test_bpf();
- destroy_bpf_tests();
- if (ret)
- return ret;
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) {
+ ret = test_bpf();
+ if (ret)
+ return ret;
+ }
- ret = prepare_tail_call_tests(&progs);
- if (ret)
- return ret;
- ret = test_tail_calls(progs);
- destroy_tail_call_tests(progs);
- if (ret)
- return ret;
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) {
+ ret = prepare_tail_call_tests(&progs);
+ if (ret)
+ return ret;
+ ret = test_tail_calls(progs);
+ destroy_tail_call_tests(progs);
+ if (ret)
+ return ret;
+ }
+
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment"))
+ return test_skb_segment();
- return test_skb_segment();
+ return 0;
}
static void __exit test_bpf_exit(void)