summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst2
-rw-r--r--tools/include/uapi/linux/bpf.h29
-rw-r--r--tools/include/uapi/linux/netdev.h36
-rw-r--r--tools/lib/bpf/libbpf_common.h13
-rw-r--r--tools/net/ynl/Makefile2
-rw-r--r--tools/net/ynl/generated/netdev-user.c419
-rw-r--r--tools/net/ynl/generated/netdev-user.h171
-rw-r--r--tools/net/ynl/lib/ynl.h2
-rw-r--r--tools/net/ynl/samples/.gitignore1
-rw-r--r--tools/net/ynl/samples/Makefile4
-rw-r--r--tools/net/ynl/samples/page-pool.c147
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py77
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py388
-rwxr-xr-xtools/net/ynl/ynl-regen.sh4
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c116
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/config.aarch6417
-rw-r--r--tools/testing/selftests/bpf/config.s390x9
-rw-r--r--tools/testing/selftests/bpf/config.vm12
-rw-r--r--tools/testing/selftests/bpf/config.x86_6412
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c39
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bind_perm.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c87
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c204
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c158
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c2124
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vmlinux.c16
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c5
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c40
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c22
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c71
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf180.c22
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c71
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c2
-rw-r--r--tools/testing/selftests/bpf/test_loader.c35
-rw-r--r--tools/testing/selftests/bpf/test_maps.c17
-rw-r--r--tools/testing/selftests/bpf/test_maps.h5
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/veristat.c89
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/pci_reset.sh58
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c50
-rwxr-xr-xtools/testing/selftests/net/fq_band_pktlimit.sh57
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh23
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh110
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh376
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh91
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh39
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh19
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh143
-rwxr-xr-xtools/testing/selftests/net/net_helper.sh22
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh13
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh5
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh5
-rw-r--r--tools/testing/selftests/tc-testing/Makefile29
-rw-r--r--tools/testing/selftests/tc-testing/README2
-rw-r--r--tools/testing/selftests/tc-testing/action-ebpfbin0 -> 856 bytes
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py67
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py210
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json14
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/u32.json57
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py14
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh69
77 files changed, 5061 insertions, 1084 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 6965c94dfdaf..09e4f2ff5658 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -20,7 +20,7 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** |
+ *OBJECT* := { **map** | **prog** | **link** | **cgroup** | **perf** | **net** | **feature** |
**btf** | **gen** | **struct_ops** | **iter** }
*OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0f6cdf52b1da..7a5498242eaa 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1200,6 +1200,9 @@ enum bpf_perf_event_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+/* The verifier internal test flag. Behavior is undefined */
+#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@@ -4517,6 +4520,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@@ -4528,6 +4533,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@@ -7151,40 +7157,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
- __u32 :32;
+ __u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 2943a151d4f1..2b37233e00c0 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -65,15 +65,51 @@ enum {
};
enum {
+ NETDEV_A_PAGE_POOL_ID = 1,
+ NETDEV_A_PAGE_POOL_IFINDEX,
+ NETDEV_A_PAGE_POOL_NAPI_ID,
+ NETDEV_A_PAGE_POOL_INFLIGHT,
+ NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
+ NETDEV_A_PAGE_POOL_DETACH_TIME,
+
+ __NETDEV_A_PAGE_POOL_MAX,
+ NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
+};
+
+enum {
+ NETDEV_A_PAGE_POOL_STATS_INFO = 1,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
+
+ __NETDEV_A_PAGE_POOL_STATS_MAX,
+ NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
NETDEV_CMD_DEV_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_GET,
+ NETDEV_CMD_PAGE_POOL_ADD_NTF,
+ NETDEV_CMD_PAGE_POOL_DEL_NTF,
+ NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_STATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
};
#define NETDEV_MCGRP_MGMT "mgmt"
+#define NETDEV_MCGRP_PAGE_POOL "page-pool"
#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index b7060f254486..8fe248e14eb6 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -79,11 +79,14 @@
*/
#define LIBBPF_OPTS_RESET(NAME, ...) \
do { \
- memset(&NAME, 0, sizeof(NAME)); \
- NAME = (typeof(NAME)) { \
- .sz = sizeof(NAME), \
- __VA_ARGS__ \
- }; \
+ typeof(NAME) ___##NAME = ({ \
+ memset(&___##NAME, 0, sizeof(NAME)); \
+ (typeof(NAME)) { \
+ .sz = sizeof(NAME), \
+ __VA_ARGS__ \
+ }; \
+ }); \
+ memcpy(&NAME, &___##NAME, sizeof(NAME)); \
} while (0)
#endif /* __LIBBPF_LIBBPF_COMMON_H */
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index d664b36deb5b..da1aa10bbcc3 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -4,6 +4,8 @@ SUBDIRS = lib generated samples
all: $(SUBDIRS)
+samples: | lib generated
+
$(SUBDIRS):
@if [ -f "$@/Makefile" ] ; then \
$(MAKE) -C $@ ; \
diff --git a/tools/net/ynl/generated/netdev-user.c b/tools/net/ynl/generated/netdev-user.c
index b5ffe8cd1144..a7b7019d00f1 100644
--- a/tools/net/ynl/generated/netdev-user.c
+++ b/tools/net/ynl/generated/netdev-user.c
@@ -18,6 +18,11 @@ static const char * const netdev_op_strmap[] = {
[NETDEV_CMD_DEV_ADD_NTF] = "dev-add-ntf",
[NETDEV_CMD_DEV_DEL_NTF] = "dev-del-ntf",
[NETDEV_CMD_DEV_CHANGE_NTF] = "dev-change-ntf",
+ [NETDEV_CMD_PAGE_POOL_GET] = "page-pool-get",
+ [NETDEV_CMD_PAGE_POOL_ADD_NTF] = "page-pool-add-ntf",
+ [NETDEV_CMD_PAGE_POOL_DEL_NTF] = "page-pool-del-ntf",
+ [NETDEV_CMD_PAGE_POOL_CHANGE_NTF] = "page-pool-change-ntf",
+ [NETDEV_CMD_PAGE_POOL_STATS_GET] = "page-pool-stats-get",
};
const char *netdev_op_str(int op)
@@ -59,6 +64,16 @@ const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value)
}
/* Policies */
+struct ynl_policy_attr netdev_page_pool_info_policy[NETDEV_A_PAGE_POOL_MAX + 1] = {
+ [NETDEV_A_PAGE_POOL_ID] = { .name = "id", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest netdev_page_pool_info_nest = {
+ .max_attr = NETDEV_A_PAGE_POOL_MAX,
+ .table = netdev_page_pool_info_policy,
+};
+
struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = {
[NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
[NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
@@ -72,7 +87,85 @@ struct ynl_policy_nest netdev_dev_nest = {
.table = netdev_dev_policy,
};
+struct ynl_policy_attr netdev_page_pool_policy[NETDEV_A_PAGE_POOL_MAX + 1] = {
+ [NETDEV_A_PAGE_POOL_ID] = { .name = "id", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+ [NETDEV_A_PAGE_POOL_NAPI_ID] = { .name = "napi-id", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_INFLIGHT] = { .name = "inflight", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_INFLIGHT_MEM] = { .name = "inflight-mem", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_DETACH_TIME] = { .name = "detach-time", .type = YNL_PT_UINT, },
+};
+
+struct ynl_policy_nest netdev_page_pool_nest = {
+ .max_attr = NETDEV_A_PAGE_POOL_MAX,
+ .table = netdev_page_pool_policy,
+};
+
+struct ynl_policy_attr netdev_page_pool_stats_policy[NETDEV_A_PAGE_POOL_STATS_MAX + 1] = {
+ [NETDEV_A_PAGE_POOL_STATS_INFO] = { .name = "info", .type = YNL_PT_NEST, .nest = &netdev_page_pool_info_nest, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST] = { .name = "alloc-fast", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW] = { .name = "alloc-slow", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER] = { .name = "alloc-slow-high-order", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY] = { .name = "alloc-empty", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL] = { .name = "alloc-refill", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE] = { .name = "alloc-waive", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED] = { .name = "recycle-cached", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL] = { .name = "recycle-cache-full", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING] = { .name = "recycle-ring", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL] = { .name = "recycle-ring-full", .type = YNL_PT_UINT, },
+ [NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT] = { .name = "recycle-released-refcnt", .type = YNL_PT_UINT, },
+};
+
+struct ynl_policy_nest netdev_page_pool_stats_nest = {
+ .max_attr = NETDEV_A_PAGE_POOL_STATS_MAX,
+ .table = netdev_page_pool_stats_policy,
+};
+
/* Common nested types */
+void netdev_page_pool_info_free(struct netdev_page_pool_info *obj)
+{
+}
+
+int netdev_page_pool_info_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct netdev_page_pool_info *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.id)
+ mnl_attr_put_uint(nlh, NETDEV_A_PAGE_POOL_ID, obj->id);
+ if (obj->_present.ifindex)
+ mnl_attr_put_u32(nlh, NETDEV_A_PAGE_POOL_IFINDEX, obj->ifindex);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int netdev_page_pool_info_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct netdev_page_pool_info *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_PAGE_POOL_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.id = 1;
+ dst->id = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
/* ============== NETDEV_CMD_DEV_GET ============== */
/* NETDEV_CMD_DEV_GET - do */
void netdev_dev_get_req_free(struct netdev_dev_get_req *req)
@@ -197,6 +290,314 @@ void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp)
free(rsp);
}
+/* ============== NETDEV_CMD_PAGE_POOL_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_GET - do */
+void netdev_page_pool_get_req_free(struct netdev_page_pool_get_req *req)
+{
+ free(req);
+}
+
+void netdev_page_pool_get_rsp_free(struct netdev_page_pool_get_rsp *rsp)
+{
+ free(rsp);
+}
+
+int netdev_page_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct netdev_page_pool_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_PAGE_POOL_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.id = 1;
+ dst->id = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_NAPI_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.napi_id = 1;
+ dst->napi_id = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_INFLIGHT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.inflight = 1;
+ dst->inflight = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_INFLIGHT_MEM) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.inflight_mem = 1;
+ dst->inflight_mem = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_DETACH_TIME) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.detach_time = 1;
+ dst->detach_time = mnl_attr_get_uint(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct netdev_page_pool_get_rsp *
+netdev_page_pool_get(struct ynl_sock *ys, struct netdev_page_pool_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct netdev_page_pool_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_GET, 1);
+ ys->req_policy = &netdev_page_pool_nest;
+ yrs.yarg.rsp_policy = &netdev_page_pool_nest;
+
+ if (req->_present.id)
+ mnl_attr_put_uint(nlh, NETDEV_A_PAGE_POOL_ID, req->id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = netdev_page_pool_get_rsp_parse;
+ yrs.rsp_cmd = NETDEV_CMD_PAGE_POOL_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ netdev_page_pool_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* NETDEV_CMD_PAGE_POOL_GET - dump */
+void netdev_page_pool_get_list_free(struct netdev_page_pool_get_list *rsp)
+{
+ struct netdev_page_pool_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp);
+ }
+}
+
+struct netdev_page_pool_get_list *
+netdev_page_pool_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct netdev_page_pool_get_list);
+ yds.cb = netdev_page_pool_get_rsp_parse;
+ yds.rsp_cmd = NETDEV_CMD_PAGE_POOL_GET;
+ yds.rsp_policy = &netdev_page_pool_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ netdev_page_pool_get_list_free(yds.first);
+ return NULL;
+}
+
+/* NETDEV_CMD_PAGE_POOL_GET - notify */
+void netdev_page_pool_get_ntf_free(struct netdev_page_pool_get_ntf *rsp)
+{
+ free(rsp);
+}
+
+/* ============== NETDEV_CMD_PAGE_POOL_STATS_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - do */
+void
+netdev_page_pool_stats_get_req_free(struct netdev_page_pool_stats_get_req *req)
+{
+ netdev_page_pool_info_free(&req->info);
+ free(req);
+}
+
+void
+netdev_page_pool_stats_get_rsp_free(struct netdev_page_pool_stats_get_rsp *rsp)
+{
+ netdev_page_pool_info_free(&rsp->info);
+ free(rsp);
+}
+
+int netdev_page_pool_stats_get_rsp_parse(const struct nlmsghdr *nlh,
+ void *data)
+{
+ struct netdev_page_pool_stats_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_PAGE_POOL_STATS_INFO) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.info = 1;
+
+ parg.rsp_policy = &netdev_page_pool_info_nest;
+ parg.data = &dst->info;
+ if (netdev_page_pool_info_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_fast = 1;
+ dst->alloc_fast = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_slow = 1;
+ dst->alloc_slow = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_slow_high_order = 1;
+ dst->alloc_slow_high_order = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_empty = 1;
+ dst->alloc_empty = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_refill = 1;
+ dst->alloc_refill = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.alloc_waive = 1;
+ dst->alloc_waive = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.recycle_cached = 1;
+ dst->recycle_cached = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.recycle_cache_full = 1;
+ dst->recycle_cache_full = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.recycle_ring = 1;
+ dst->recycle_ring = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.recycle_ring_full = 1;
+ dst->recycle_ring_full = mnl_attr_get_uint(attr);
+ } else if (type == NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.recycle_released_refcnt = 1;
+ dst->recycle_released_refcnt = mnl_attr_get_uint(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct netdev_page_pool_stats_get_rsp *
+netdev_page_pool_stats_get(struct ynl_sock *ys,
+ struct netdev_page_pool_stats_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct netdev_page_pool_stats_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_STATS_GET, 1);
+ ys->req_policy = &netdev_page_pool_stats_nest;
+ yrs.yarg.rsp_policy = &netdev_page_pool_stats_nest;
+
+ if (req->_present.info)
+ netdev_page_pool_info_put(nlh, NETDEV_A_PAGE_POOL_STATS_INFO, &req->info);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = netdev_page_pool_stats_get_rsp_parse;
+ yrs.rsp_cmd = NETDEV_CMD_PAGE_POOL_STATS_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ netdev_page_pool_stats_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - dump */
+void
+netdev_page_pool_stats_get_list_free(struct netdev_page_pool_stats_get_list *rsp)
+{
+ struct netdev_page_pool_stats_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ netdev_page_pool_info_free(&rsp->obj.info);
+ free(rsp);
+ }
+}
+
+struct netdev_page_pool_stats_get_list *
+netdev_page_pool_stats_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct netdev_page_pool_stats_get_list);
+ yds.cb = netdev_page_pool_stats_get_rsp_parse;
+ yds.rsp_cmd = NETDEV_CMD_PAGE_POOL_STATS_GET;
+ yds.rsp_policy = &netdev_page_pool_stats_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_PAGE_POOL_STATS_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ netdev_page_pool_stats_get_list_free(yds.first);
+ return NULL;
+}
+
static const struct ynl_ntf_info netdev_ntf_info[] = {
[NETDEV_CMD_DEV_ADD_NTF] = {
.alloc_sz = sizeof(struct netdev_dev_get_ntf),
@@ -216,6 +617,24 @@ static const struct ynl_ntf_info netdev_ntf_info[] = {
.policy = &netdev_dev_nest,
.free = (void *)netdev_dev_get_ntf_free,
},
+ [NETDEV_CMD_PAGE_POOL_ADD_NTF] = {
+ .alloc_sz = sizeof(struct netdev_page_pool_get_ntf),
+ .cb = netdev_page_pool_get_rsp_parse,
+ .policy = &netdev_page_pool_nest,
+ .free = (void *)netdev_page_pool_get_ntf_free,
+ },
+ [NETDEV_CMD_PAGE_POOL_DEL_NTF] = {
+ .alloc_sz = sizeof(struct netdev_page_pool_get_ntf),
+ .cb = netdev_page_pool_get_rsp_parse,
+ .policy = &netdev_page_pool_nest,
+ .free = (void *)netdev_page_pool_get_ntf_free,
+ },
+ [NETDEV_CMD_PAGE_POOL_CHANGE_NTF] = {
+ .alloc_sz = sizeof(struct netdev_page_pool_get_ntf),
+ .cb = netdev_page_pool_get_rsp_parse,
+ .policy = &netdev_page_pool_nest,
+ .free = (void *)netdev_page_pool_get_ntf_free,
+ },
};
const struct ynl_family ynl_netdev_family = {
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
index 4fafac879df3..4093602c9b6c 100644
--- a/tools/net/ynl/generated/netdev-user.h
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -21,6 +21,16 @@ const char *netdev_xdp_act_str(enum netdev_xdp_act value);
const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value);
/* Common nested types */
+struct netdev_page_pool_info {
+ struct {
+ __u32 id:1;
+ __u32 ifindex:1;
+ } _present;
+
+ __u64 id;
+ __u32 ifindex;
+};
+
/* ============== NETDEV_CMD_DEV_GET ============== */
/* NETDEV_CMD_DEV_GET - do */
struct netdev_dev_get_req {
@@ -87,4 +97,165 @@ struct netdev_dev_get_ntf {
void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp);
+/* ============== NETDEV_CMD_PAGE_POOL_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_GET - do */
+struct netdev_page_pool_get_req {
+ struct {
+ __u32 id:1;
+ } _present;
+
+ __u64 id;
+};
+
+static inline struct netdev_page_pool_get_req *
+netdev_page_pool_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_page_pool_get_req));
+}
+void netdev_page_pool_get_req_free(struct netdev_page_pool_get_req *req);
+
+static inline void
+netdev_page_pool_get_req_set_id(struct netdev_page_pool_get_req *req, __u64 id)
+{
+ req->_present.id = 1;
+ req->id = id;
+}
+
+struct netdev_page_pool_get_rsp {
+ struct {
+ __u32 id:1;
+ __u32 ifindex:1;
+ __u32 napi_id:1;
+ __u32 inflight:1;
+ __u32 inflight_mem:1;
+ __u32 detach_time:1;
+ } _present;
+
+ __u64 id;
+ __u32 ifindex;
+ __u64 napi_id;
+ __u64 inflight;
+ __u64 inflight_mem;
+ __u64 detach_time;
+};
+
+void netdev_page_pool_get_rsp_free(struct netdev_page_pool_get_rsp *rsp);
+
+/*
+ * Get / dump information about Page Pools.
+(Only Page Pools associated with a net_device can be listed.)
+
+ */
+struct netdev_page_pool_get_rsp *
+netdev_page_pool_get(struct ynl_sock *ys, struct netdev_page_pool_get_req *req);
+
+/* NETDEV_CMD_PAGE_POOL_GET - dump */
+struct netdev_page_pool_get_list {
+ struct netdev_page_pool_get_list *next;
+ struct netdev_page_pool_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_page_pool_get_list_free(struct netdev_page_pool_get_list *rsp);
+
+struct netdev_page_pool_get_list *
+netdev_page_pool_get_dump(struct ynl_sock *ys);
+
+/* NETDEV_CMD_PAGE_POOL_GET - notify */
+struct netdev_page_pool_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct netdev_page_pool_get_ntf *ntf);
+ struct netdev_page_pool_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_page_pool_get_ntf_free(struct netdev_page_pool_get_ntf *rsp);
+
+/* ============== NETDEV_CMD_PAGE_POOL_STATS_GET ============== */
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - do */
+struct netdev_page_pool_stats_get_req {
+ struct {
+ __u32 info:1;
+ } _present;
+
+ struct netdev_page_pool_info info;
+};
+
+static inline struct netdev_page_pool_stats_get_req *
+netdev_page_pool_stats_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_page_pool_stats_get_req));
+}
+void
+netdev_page_pool_stats_get_req_free(struct netdev_page_pool_stats_get_req *req);
+
+static inline void
+netdev_page_pool_stats_get_req_set_info_id(struct netdev_page_pool_stats_get_req *req,
+ __u64 id)
+{
+ req->_present.info = 1;
+ req->info._present.id = 1;
+ req->info.id = id;
+}
+static inline void
+netdev_page_pool_stats_get_req_set_info_ifindex(struct netdev_page_pool_stats_get_req *req,
+ __u32 ifindex)
+{
+ req->_present.info = 1;
+ req->info._present.ifindex = 1;
+ req->info.ifindex = ifindex;
+}
+
+struct netdev_page_pool_stats_get_rsp {
+ struct {
+ __u32 info:1;
+ __u32 alloc_fast:1;
+ __u32 alloc_slow:1;
+ __u32 alloc_slow_high_order:1;
+ __u32 alloc_empty:1;
+ __u32 alloc_refill:1;
+ __u32 alloc_waive:1;
+ __u32 recycle_cached:1;
+ __u32 recycle_cache_full:1;
+ __u32 recycle_ring:1;
+ __u32 recycle_ring_full:1;
+ __u32 recycle_released_refcnt:1;
+ } _present;
+
+ struct netdev_page_pool_info info;
+ __u64 alloc_fast;
+ __u64 alloc_slow;
+ __u64 alloc_slow_high_order;
+ __u64 alloc_empty;
+ __u64 alloc_refill;
+ __u64 alloc_waive;
+ __u64 recycle_cached;
+ __u64 recycle_cache_full;
+ __u64 recycle_ring;
+ __u64 recycle_ring_full;
+ __u64 recycle_released_refcnt;
+};
+
+void
+netdev_page_pool_stats_get_rsp_free(struct netdev_page_pool_stats_get_rsp *rsp);
+
+/*
+ * Get page pool statistics.
+ */
+struct netdev_page_pool_stats_get_rsp *
+netdev_page_pool_stats_get(struct ynl_sock *ys,
+ struct netdev_page_pool_stats_get_req *req);
+
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - dump */
+struct netdev_page_pool_stats_get_list {
+ struct netdev_page_pool_stats_get_list *next;
+ struct netdev_page_pool_stats_get_rsp obj __attribute__((aligned(8)));
+};
+
+void
+netdev_page_pool_stats_get_list_free(struct netdev_page_pool_stats_get_list *rsp);
+
+struct netdev_page_pool_stats_get_list *
+netdev_page_pool_stats_get_dump(struct ynl_sock *ys);
+
#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index e974378e3b8c..075d868f3b57 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -239,7 +239,7 @@ int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
#ifndef MNL_HAS_AUTO_SCALARS
static inline uint64_t mnl_attr_get_uint(const struct nlattr *attr)
{
- if (mnl_attr_get_len(attr) == 4)
+ if (mnl_attr_get_payload_len(attr) == 4)
return mnl_attr_get_u32(attr);
return mnl_attr_get_u64(attr);
}
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
index 2aae60c4829f..49637b26c482 100644
--- a/tools/net/ynl/samples/.gitignore
+++ b/tools/net/ynl/samples/.gitignore
@@ -1,3 +1,4 @@
ethtool
devlink
netdev
+page-pool \ No newline at end of file
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index 3dbb106e87d9..28bdb1557a54 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -18,7 +18,9 @@ include $(wildcard *.d)
all: $(BINS)
-$(BINS): ../lib/ynl.a ../generated/protos.a
+CFLAGS_page-pool=$(CFLAGS_netdev)
+
+$(BINS): ../lib/ynl.a ../generated/protos.a $(SRCS)
@echo -e '\tCC sample $@'
@$(COMPILE.c) $(CFLAGS_$@) $@.c -o $@.o
@$(LINK.c) $@.o -o $@ $(LDLIBS)
diff --git a/tools/net/ynl/samples/page-pool.c b/tools/net/ynl/samples/page-pool.c
new file mode 100644
index 000000000000..098b5190d0e5
--- /dev/null
+++ b/tools/net/ynl/samples/page-pool.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include <net/if.h>
+
+#include "netdev-user.h"
+
+struct stat {
+ unsigned int ifc;
+
+ struct {
+ unsigned int cnt;
+ size_t refs, bytes;
+ } live[2];
+
+ size_t alloc_slow, alloc_fast, recycle_ring, recycle_cache;
+};
+
+struct stats_array {
+ unsigned int i, max;
+ struct stat *s;
+};
+
+static struct stat *find_ifc(struct stats_array *a, unsigned int ifindex)
+{
+ unsigned int i;
+
+ for (i = 0; i < a->i; i++) {
+ if (a->s[i].ifc == ifindex)
+ return &a->s[i];
+ }
+
+ a->i++;
+ if (a->i == a->max) {
+ a->max *= 2;
+ a->s = reallocarray(a->s, a->max, sizeof(*a->s));
+ }
+ a->s[i].ifc = ifindex;
+ return &a->s[i];
+}
+
+static void count(struct stat *s, unsigned int l,
+ struct netdev_page_pool_get_rsp *pp)
+{
+ s->live[l].cnt++;
+ if (pp->_present.inflight)
+ s->live[l].refs += pp->inflight;
+ if (pp->_present.inflight_mem)
+ s->live[l].bytes += pp->inflight_mem;
+}
+
+int main(int argc, char **argv)
+{
+ struct netdev_page_pool_stats_get_list *pp_stats;
+ struct netdev_page_pool_get_list *pools;
+ struct stats_array a = {};
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return 1;
+ }
+
+ a.max = 128;
+ a.s = calloc(a.max, sizeof(*a.s));
+ if (!a.s)
+ goto err_close;
+
+ pools = netdev_page_pool_get_dump(ys);
+ if (!pools)
+ goto err_free;
+
+ ynl_dump_foreach(pools, pp) {
+ struct stat *s = find_ifc(&a, pp->ifindex);
+
+ count(s, 1, pp);
+ if (pp->_present.detach_time)
+ count(s, 0, pp);
+ }
+ netdev_page_pool_get_list_free(pools);
+
+ pp_stats = netdev_page_pool_stats_get_dump(ys);
+ if (!pp_stats)
+ goto err_free;
+
+ ynl_dump_foreach(pp_stats, pp) {
+ struct stat *s = find_ifc(&a, pp->info.ifindex);
+
+ if (pp->_present.alloc_fast)
+ s->alloc_fast += pp->alloc_fast;
+ if (pp->_present.alloc_slow)
+ s->alloc_slow += pp->alloc_slow;
+ if (pp->_present.recycle_ring)
+ s->recycle_ring += pp->recycle_ring;
+ if (pp->_present.recycle_cached)
+ s->recycle_cache += pp->recycle_cached;
+ }
+ netdev_page_pool_stats_get_list_free(pp_stats);
+
+ for (unsigned int i = 0; i < a.i; i++) {
+ char ifname[IF_NAMESIZE];
+ struct stat *s = &a.s[i];
+ const char *name;
+ double recycle;
+
+ if (!s->ifc) {
+ name = "<orphan>\t";
+ } else {
+ name = if_indextoname(s->ifc, ifname);
+ if (name)
+ printf("%8s", name);
+ printf("[%d]\t", s->ifc);
+ }
+
+ printf("page pools: %u (zombies: %u)\n",
+ s->live[1].cnt, s->live[0].cnt);
+ printf("\t\trefs: %zu bytes: %zu (refs: %zu bytes: %zu)\n",
+ s->live[1].refs, s->live[1].bytes,
+ s->live[0].refs, s->live[0].bytes);
+
+ /* We don't know how many pages are sitting in cache and ring
+ * so we will under-count the recycling rate a bit.
+ */
+ recycle = (double)(s->recycle_ring + s->recycle_cache) /
+ (s->alloc_fast + s->alloc_slow) * 100;
+ printf("\t\trecycling: %.1lf%% (alloc: %zu:%zu recycle: %zu:%zu)\n",
+ recycle, s->alloc_slow, s->alloc_fast,
+ s->recycle_ring, s->recycle_cache);
+ }
+
+ ynl_sock_destroy(ys);
+ return 0;
+
+err_free:
+ free(a.s);
+err_close:
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 8337aa6de25e..266bc1629e58 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -67,9 +67,9 @@ class Type(SpecAttr):
if 'nested-attributes' in attr:
self.nested_attrs = attr['nested-attributes']
if self.nested_attrs == family.name:
- self.nested_render_name = f"{family.name}"
+ self.nested_render_name = c_lower(f"{family.name}")
else:
- self.nested_render_name = f"{family.name}_{c_lower(self.nested_attrs)}"
+ self.nested_render_name = c_lower(f"{family.name}_{self.nested_attrs}")
if self.nested_attrs in self.family.consts:
self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
@@ -335,7 +335,7 @@ class TypeScalar(Type):
maybe_enum = not self.is_bitfield and 'enum' in self.attr
if maybe_enum and self.family.consts[self.attr['enum']].enum_name:
- self.type_name = f"enum {self.family.name}_{c_lower(self.attr['enum'])}"
+ self.type_name = c_lower(f"enum {self.family.name}_{self.attr['enum']}")
elif self.is_auto_scalar:
self.type_name = '__' + self.type[0] + '64'
else:
@@ -685,9 +685,9 @@ class Struct:
self.nested = type_list is None
if family.name == c_lower(space_name):
- self.render_name = f"{family.name}"
+ self.render_name = c_lower(family.name)
else:
- self.render_name = f"{family.name}_{c_lower(space_name)}"
+ self.render_name = c_lower(family.name + '-' + space_name)
self.struct_name = 'struct ' + self.render_name
if self.nested and space_name in family.consts:
self.struct_name += '_'
@@ -755,11 +755,17 @@ class EnumSet(SpecEnumSet):
if 'enum-name' in yaml:
if yaml['enum-name']:
self.enum_name = 'enum ' + c_lower(yaml['enum-name'])
+ self.user_type = self.enum_name
else:
self.enum_name = None
else:
self.enum_name = 'enum ' + self.render_name
+ if self.enum_name:
+ self.user_type = self.enum_name
+ else:
+ self.user_type = 'int'
+
self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
super().__init__(family, yaml)
@@ -841,7 +847,7 @@ class Operation(SpecOperation):
def __init__(self, family, yaml, req_value, rsp_value):
super().__init__(family, yaml, req_value, rsp_value)
- self.render_name = family.name + '_' + c_lower(self.name)
+ self.render_name = c_lower(family.name + '_' + self.name)
self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
('dump' in yaml and 'request' in yaml['dump'])
@@ -1158,8 +1164,9 @@ class RenderInfo:
class CodeWriter:
- def __init__(self, nlib, out_file=None):
+ def __init__(self, nlib, out_file=None, overwrite=True):
self.nlib = nlib
+ self._overwrite = overwrite
self._nl = False
self._block_end = False
@@ -1180,8 +1187,9 @@ class CodeWriter:
return
# Avoid modifying the file if contents didn't change
self._out.flush()
- if os.path.isfile(self._out_file) and filecmp.cmp(self._out.name, self._out_file, shallow=False):
- return
+ if not self._overwrite and os.path.isfile(self._out_file):
+ if filecmp.cmp(self._out.name, self._out_file, shallow=False):
+ return
with open(self._out_file, 'w+') as out_file:
self._out.seek(0)
shutil.copyfileobj(self._out, out_file)
@@ -1431,7 +1439,7 @@ def op_prefix(ri, direction, deref=False):
suffix += '_rsp'
suffix += '_dump' if deref else '_list'
- return f"{ri.family['name']}{suffix}"
+ return f"{ri.family.c_name}{suffix}"
def type_name(ri, direction, deref=False):
@@ -1483,8 +1491,8 @@ def put_typol(cw, struct):
def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
args = [f'int {arg_name}']
- if enum and not ('enum-name' in enum and not enum['enum-name']):
- args = [f'enum {render_name} {arg_name}']
+ if enum:
+ args = [enum.user_type + ' ' + arg_name]
cw.write_func_prot('const char *', f'{render_name}_str', args)
cw.block_start()
if enum and enum.type == 'flags':
@@ -1497,11 +1505,11 @@ def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
def put_op_name_fwd(family, cw):
- cw.write_func_prot('const char *', f'{family.name}_op_str', ['int op'], suffix=';')
+ cw.write_func_prot('const char *', f'{family.c_name}_op_str', ['int op'], suffix=';')
def put_op_name(family, cw):
- map_name = f'{family.name}_op_strmap'
+ map_name = f'{family.c_name}_op_strmap'
cw.block_start(line=f"static const char * const {map_name}[] =")
for op_name, op in family.msgs.items():
if op.rsp_value:
@@ -1518,13 +1526,11 @@ def put_op_name(family, cw):
cw.block_end(line=';')
cw.nl()
- _put_enum_to_str_helper(cw, family.name + '_op', map_name, 'op')
+ _put_enum_to_str_helper(cw, family.c_name + '_op', map_name, 'op')
def put_enum_to_str_fwd(family, cw, enum):
- args = [f'enum {enum.render_name} value']
- if 'enum-name' in enum and not enum['enum-name']:
- args = ['int value']
+ args = [enum.user_type + ' value']
cw.write_func_prot('const char *', f'{enum.render_name}_str', args, suffix=';')
@@ -1838,7 +1844,7 @@ def _print_type(ri, direction, struct):
if ri.op_mode == 'dump':
suffix += '_dump'
- ri.cw.block_start(line=f"struct {ri.family['name']}{suffix}")
+ ri.cw.block_start(line=f"struct {ri.family.c_name}{suffix}")
meta_started = False
for _, attr in struct.member_list():
@@ -2068,12 +2074,13 @@ def print_kernel_policy_ranges(family, cw):
first = False
sign = '' if attr.type[0] == 'u' else '_signed'
+ suffix = 'ULL' if attr.type[0] == 'u' else 'LL'
cw.block_start(line=f'static const struct netlink_range_validation{sign} {c_lower(attr.enum_name)}_range =')
members = []
if 'min' in attr.checks:
- members.append(('min', attr.get_limit('min')))
+ members.append(('min', str(attr.get_limit('min')) + suffix))
if 'max' in attr.checks:
- members.append(('max', attr.get_limit('max')))
+ members.append(('max', str(attr.get_limit('max')) + suffix))
cw.write_struct_init(members)
cw.block_end(line=';')
cw.nl()
@@ -2103,7 +2110,7 @@ def print_kernel_op_table_fwd(family, cw, terminate):
cnt = len(family.ops)
qual = 'static const' if not exported else 'const'
- line = f"{qual} struct {struct_type} {family.name}_nl_ops[{cnt}]"
+ line = f"{qual} struct {struct_type} {family.c_name}_nl_ops[{cnt}]"
if terminate:
cw.p(f"extern {line};")
else:
@@ -2246,7 +2253,7 @@ def print_kernel_mcgrp_src(family, cw):
if not family.mcgrps['list']:
return
- cw.block_start('static const struct genl_multicast_group ' + family.name + '_nl_mcgrps[] =')
+ cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =')
for grp in family.mcgrps['list']:
name = grp['name']
grp_id = c_upper(f"{family.name}-nlgrp-{name}")
@@ -2259,7 +2266,7 @@ def print_kernel_family_struct_hdr(family, cw):
if not kernel_can_gen_family_struct(family):
return
- cw.p(f"extern struct genl_family {family.name}_nl_family;")
+ cw.p(f"extern struct genl_family {family.c_name}_nl_family;")
cw.nl()
@@ -2274,14 +2281,14 @@ def print_kernel_family_struct_src(family, cw):
cw.p('.parallel_ops\t= true,')
cw.p('.module\t\t= THIS_MODULE,')
if family.kernel_policy == 'per-op':
- cw.p(f'.ops\t\t= {family.name}_nl_ops,')
- cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.name}_nl_ops),')
+ cw.p(f'.ops\t\t= {family.c_name}_nl_ops,')
+ cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.c_name}_nl_ops),')
elif family.kernel_policy == 'split':
- cw.p(f'.split_ops\t= {family.name}_nl_ops,')
- cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.name}_nl_ops),')
+ cw.p(f'.split_ops\t= {family.c_name}_nl_ops,')
+ cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.c_name}_nl_ops),')
if family.mcgrps['list']:
- cw.p(f'.mcgrps\t\t= {family.name}_nl_mcgrps,')
- cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.name}_nl_mcgrps),')
+ cw.p(f'.mcgrps\t\t= {family.c_name}_nl_mcgrps,')
+ cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
cw.block_end(';')
@@ -2291,7 +2298,7 @@ def uapi_enum_start(family, cw, obj, ckey='', enum_name='enum-name'):
if obj[enum_name]:
start_line = 'enum ' + c_lower(obj[enum_name])
elif ckey and ckey in obj:
- start_line = 'enum ' + family.name + '_' + c_lower(obj[ckey])
+ start_line = 'enum ' + family.c_name + '_' + c_lower(obj[ckey])
cw.block_start(line=start_line)
@@ -2475,7 +2482,7 @@ def render_user_family(family, cw, prototype):
cw.nl()
cw.block_start(f'{symbol} = ')
- cw.p(f'.name\t\t= "{family.name}",')
+ cw.p(f'.name\t\t= "{family.c_name}",')
if family.ntfs:
cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
cw.p(f".ntf_info_size\t= MNL_ARRAY_SIZE({family['name']}_ntf_info),")
@@ -2509,6 +2516,8 @@ def main():
parser.add_argument('--header', dest='header', action='store_true', default=None)
parser.add_argument('--source', dest='header', action='store_false')
parser.add_argument('--user-header', nargs='+', default=[])
+ parser.add_argument('--cmp-out', action='store_true', default=None,
+ help='Do not overwrite the output file if the new output is identical to the old')
parser.add_argument('--exclude-op', action='append', default=[])
parser.add_argument('-o', dest='out_file', type=str, default=None)
args = parser.parse_args()
@@ -2536,7 +2545,7 @@ def main():
print(f'Message enum-model {parsed.msg_id_model} not supported for {args.mode} generation')
os.sys.exit(1)
- cw = CodeWriter(BaseNlLib(), args.out_file)
+ cw = CodeWriter(BaseNlLib(), args.out_file, overwrite=(not args.cmp_out))
_, spec_kernel = find_kernel_root(args.spec)
if args.mode == 'uapi' or args.header:
@@ -2557,7 +2566,7 @@ def main():
render_uapi(parsed, cw)
return
- hdr_prot = f"_LINUX_{parsed.name.upper()}_GEN_H"
+ hdr_prot = f"_LINUX_{parsed.c_name.upper()}_GEN_H"
if args.header:
cw.p('#ifndef ' + hdr_prot)
cw.p('#define ' + hdr_prot)
diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py
new file mode 100755
index 000000000000..b6292109e236
--- /dev/null
+++ b/tools/net/ynl/ynl-gen-rst.py
@@ -0,0 +1,388 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+
+"""
+ Script to auto generate the documentation for Netlink specifications.
+
+ :copyright: Copyright (C) 2023 Breno Leitao <leitao@debian.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ This script performs extensive parsing to the Linux kernel's netlink YAML
+ spec files, in an effort to avoid needing to heavily mark up the original
+ YAML file.
+
+ This code is split in three big parts:
+ 1) RST formatters: Use to convert a string to a RST output
+ 2) Parser helpers: Functions to parse the YAML data structure
+ 3) Main function and small helpers
+"""
+
+from typing import Any, Dict, List
+import os.path
+import sys
+import argparse
+import logging
+import yaml
+
+
+SPACE_PER_LEVEL = 4
+
+
+# RST Formatters
+# ==============
+def headroom(level: int) -> str:
+ """Return space to format"""
+ return " " * (level * SPACE_PER_LEVEL)
+
+
+def bold(text: str) -> str:
+ """Format bold text"""
+ return f"**{text}**"
+
+
+def inline(text: str) -> str:
+ """Format inline text"""
+ return f"``{text}``"
+
+
+def sanitize(text: str) -> str:
+ """Remove newlines and multiple spaces"""
+ # This is useful for some fields that are spread across multiple lines
+ return str(text).replace("\n", "").strip()
+
+
+def rst_fields(key: str, value: str, level: int = 0) -> str:
+ """Return a RST formatted field"""
+ return headroom(level) + f":{key}: {value}"
+
+
+def rst_definition(key: str, value: Any, level: int = 0) -> str:
+ """Format a single rst definition"""
+ return headroom(level) + key + "\n" + headroom(level + 1) + str(value)
+
+
+def rst_paragraph(paragraph: str, level: int = 0) -> str:
+ """Return a formatted paragraph"""
+ return headroom(level) + paragraph
+
+
+def rst_bullet(item: str, level: int = 0) -> str:
+ """Return a formatted a bullet"""
+ return headroom(level) + f" - {item}"
+
+
+def rst_subsection(title: str) -> str:
+ """Add a sub-section to the document"""
+ return f"{title}\n" + "-" * len(title)
+
+
+def rst_subsubsection(title: str) -> str:
+ """Add a sub-sub-section to the document"""
+ return f"{title}\n" + "~" * len(title)
+
+
+def rst_section(title: str) -> str:
+ """Add a section to the document"""
+ return f"\n{title}\n" + "=" * len(title)
+
+
+def rst_subtitle(title: str) -> str:
+ """Add a subtitle to the document"""
+ return "\n" + "-" * len(title) + f"\n{title}\n" + "-" * len(title) + "\n\n"
+
+
+def rst_title(title: str) -> str:
+ """Add a title to the document"""
+ return "=" * len(title) + f"\n{title}\n" + "=" * len(title) + "\n\n"
+
+
+def rst_list_inline(list_: List[str], level: int = 0) -> str:
+ """Format a list using inlines"""
+ return headroom(level) + "[" + ", ".join(inline(i) for i in list_) + "]"
+
+
+def rst_header() -> str:
+ """The headers for all the auto generated RST files"""
+ lines = []
+
+ lines.append(rst_paragraph(".. SPDX-License-Identifier: GPL-2.0"))
+ lines.append(rst_paragraph(".. NOTE: This document was auto-generated.\n\n"))
+
+ return "\n".join(lines)
+
+
+def rst_toctree(maxdepth: int = 2) -> str:
+ """Generate a toctree RST primitive"""
+ lines = []
+
+ lines.append(".. toctree::")
+ lines.append(f" :maxdepth: {maxdepth}\n\n")
+
+ return "\n".join(lines)
+
+
+# Parsers
+# =======
+
+
+def parse_mcast_group(mcast_group: List[Dict[str, Any]]) -> str:
+ """Parse 'multicast' group list and return a formatted string"""
+ lines = []
+ for group in mcast_group:
+ lines.append(rst_bullet(group["name"]))
+
+ return "\n".join(lines)
+
+
+def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'do' section and return a formatted string"""
+ lines = []
+ for key in do_dict.keys():
+ lines.append(rst_paragraph(bold(key), level + 1))
+ lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+
+ return "\n".join(lines)
+
+
+def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'attributes' section"""
+ if "attributes" not in attrs:
+ return ""
+ lines = [rst_fields("attributes", rst_list_inline(attrs["attributes"]), level + 1)]
+
+ return "\n".join(lines)
+
+
+def parse_operations(operations: List[Dict[str, Any]]) -> str:
+ """Parse operations block"""
+ preprocessed = ["name", "doc", "title", "do", "dump"]
+ lines = []
+
+ for operation in operations:
+ lines.append(rst_section(operation["name"]))
+ lines.append(rst_paragraph(sanitize(operation["doc"])) + "\n")
+
+ for key in operation.keys():
+ if key in preprocessed:
+ # Skip the special fields
+ continue
+ lines.append(rst_fields(key, operation[key], 0))
+
+ if "do" in operation:
+ lines.append(rst_paragraph(":do:", 0))
+ lines.append(parse_do(operation["do"], 0))
+ if "dump" in operation:
+ lines.append(rst_paragraph(":dump:", 0))
+ lines.append(parse_do(operation["dump"], 0))
+
+ # New line after fields
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+
+def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
+ """Parse a list of entries"""
+ lines = []
+ for entry in entries:
+ if isinstance(entry, dict):
+ # entries could be a list or a dictionary
+ lines.append(
+ rst_fields(entry.get("name", ""), sanitize(entry.get("doc", "")), level)
+ )
+ elif isinstance(entry, list):
+ lines.append(rst_list_inline(entry, level))
+ else:
+ lines.append(rst_bullet(inline(sanitize(entry)), level))
+
+ lines.append("\n")
+ return "\n".join(lines)
+
+
+def parse_definitions(defs: Dict[str, Any]) -> str:
+ """Parse definitions section"""
+ preprocessed = ["name", "entries", "members"]
+ ignored = ["render-max"] # This is not printed
+ lines = []
+
+ for definition in defs:
+ lines.append(rst_section(definition["name"]))
+ for k in definition.keys():
+ if k in preprocessed + ignored:
+ continue
+ lines.append(rst_fields(k, sanitize(definition[k]), 0))
+
+ # Field list needs to finish with a new line
+ lines.append("\n")
+ if "entries" in definition:
+ lines.append(rst_paragraph(":entries:", 0))
+ lines.append(parse_entries(definition["entries"], 1))
+ if "members" in definition:
+ lines.append(rst_paragraph(":members:", 0))
+ lines.append(parse_entries(definition["members"], 1))
+
+ return "\n".join(lines)
+
+
+def parse_attr_sets(entries: List[Dict[str, Any]]) -> str:
+ """Parse attribute from attribute-set"""
+ preprocessed = ["name", "type"]
+ ignored = ["checks"]
+ lines = []
+
+ for entry in entries:
+ lines.append(rst_section(entry["name"]))
+ for attr in entry["attributes"]:
+ type_ = attr.get("type")
+ attr_line = bold(attr["name"])
+ if type_:
+ # Add the attribute type in the same line
+ attr_line += f" ({inline(type_)})"
+
+ lines.append(rst_subsubsection(attr_line))
+
+ for k in attr.keys():
+ if k in preprocessed + ignored:
+ continue
+ lines.append(rst_fields(k, sanitize(attr[k]), 2))
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+
+def parse_yaml(obj: Dict[str, Any]) -> str:
+ """Format the whole YAML into a RST string"""
+ lines = []
+
+ # Main header
+
+ lines.append(rst_header())
+
+ title = f"Family ``{obj['name']}`` netlink specification"
+ lines.append(rst_title(title))
+ lines.append(rst_paragraph(".. contents::\n"))
+
+ if "doc" in obj:
+ lines.append(rst_subtitle("Summary"))
+ lines.append(rst_paragraph(obj["doc"], 0))
+
+ # Operations
+ if "operations" in obj:
+ lines.append(rst_subtitle("Operations"))
+ lines.append(parse_operations(obj["operations"]["list"]))
+
+ # Multicast groups
+ if "mcast-groups" in obj:
+ lines.append(rst_subtitle("Multicast groups"))
+ lines.append(parse_mcast_group(obj["mcast-groups"]["list"]))
+
+ # Definitions
+ if "definitions" in obj:
+ lines.append(rst_subtitle("Definitions"))
+ lines.append(parse_definitions(obj["definitions"]))
+
+ # Attributes set
+ if "attribute-sets" in obj:
+ lines.append(rst_subtitle("Attribute sets"))
+ lines.append(parse_attr_sets(obj["attribute-sets"]))
+
+ return "\n".join(lines)
+
+
+# Main functions
+# ==============
+
+
+def parse_arguments() -> argparse.Namespace:
+ """Parse arguments from user"""
+ parser = argparse.ArgumentParser(description="Netlink RST generator")
+
+ parser.add_argument("-v", "--verbose", action="store_true")
+ parser.add_argument("-o", "--output", help="Output file name")
+
+ # Index and input are mutually exclusive
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument(
+ "-x", "--index", action="store_true", help="Generate the index page"
+ )
+ group.add_argument("-i", "--input", help="YAML file name")
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+
+ if args.input and not os.path.isfile(args.input):
+ logging.warning("%s is not a valid file.", args.input)
+ sys.exit(-1)
+
+ if not args.output:
+ logging.error("No output file specified.")
+ sys.exit(-1)
+
+ if os.path.isfile(args.output):
+ logging.debug("%s already exists. Overwriting it.", args.output)
+
+ return args
+
+
+def parse_yaml_file(filename: str) -> str:
+ """Transform the YAML specified by filename into a rst-formmated string"""
+ with open(filename, "r", encoding="utf-8") as spec_file:
+ yaml_data = yaml.safe_load(spec_file)
+ content = parse_yaml(yaml_data)
+
+ return content
+
+
+def write_to_rstfile(content: str, filename: str) -> None:
+ """Write the generated content into an RST file"""
+ logging.debug("Saving RST file to %s", filename)
+
+ with open(filename, "w", encoding="utf-8") as rst_file:
+ rst_file.write(content)
+
+
+def generate_main_index_rst(output: str) -> None:
+ """Generate the `networking_spec/index` content and write to the file"""
+ lines = []
+
+ lines.append(rst_header())
+ lines.append(rst_title("Netlink Specification"))
+ lines.append(rst_toctree(1))
+
+ index_dir = os.path.dirname(output)
+ logging.debug("Looking for .rst files in %s", index_dir)
+ for filename in os.listdir(index_dir):
+ if not filename.endswith(".rst") or filename == "index.rst":
+ continue
+ lines.append(f" {filename.replace('.rst', '')}\n")
+
+ logging.debug("Writing an index file at %s", output)
+ write_to_rstfile("".join(lines), output)
+
+
+def main() -> None:
+ """Main function that reads the YAML files and generates the RST files"""
+
+ args = parse_arguments()
+
+ if args.input:
+ logging.debug("Parsing %s", args.input)
+ try:
+ content = parse_yaml_file(os.path.join(args.input))
+ except Exception as exception:
+ logging.warning("Failed to parse %s.", args.input)
+ logging.warning(exception)
+ sys.exit(-1)
+
+ write_to_rstfile(content, args.output)
+
+ if args.index:
+ # Generate the index RST file
+ generate_main_index_rst(args.output)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/net/ynl/ynl-regen.sh b/tools/net/ynl/ynl-regen.sh
index bdba24066cf1..a37304dcc88e 100755
--- a/tools/net/ynl/ynl-regen.sh
+++ b/tools/net/ynl/ynl-regen.sh
@@ -30,8 +30,8 @@ for f in $files; do
fi
echo -e "\tGEN ${params[2]}\t$f"
- $TOOL --mode ${params[2]} --${params[3]} --spec $KDIR/${params[0]} \
- $args -o $f
+ $TOOL --cmp-out --mode ${params[2]} --${params[3]} \
+ --spec $KDIR/${params[0]} $args -o $f
done
popd >>/dev/null
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 5b1da2a32ea7..5aa133bf3688 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -45,9 +45,12 @@
#define format_parent_cgroup_path(buf, path) \
format_cgroup_path_pid(buf, path, getppid())
-#define format_classid_path(buf) \
- snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
- CGROUP_WORK_DIR)
+#define format_classid_path_pid(buf, pid) \
+ snprintf(buf, sizeof(buf), "%s%s%d", NETCLS_MOUNT_PATH, \
+ CGROUP_WORK_DIR, pid)
+
+#define format_classid_path(buf) \
+ format_classid_path_pid(buf, getpid())
static __thread bool cgroup_workdir_mounted;
@@ -419,26 +422,23 @@ int create_and_get_cgroup(const char *relative_path)
}
/**
- * get_cgroup_id() - Get cgroup id for a particular cgroup path
- * @relative_path: The cgroup path, relative to the workdir, to join
+ * get_cgroup_id_from_path - Get cgroup id for a particular cgroup path
+ * @cgroup_workdir: The absolute cgroup path
*
* On success, it returns the cgroup id. On failure it returns 0,
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
-unsigned long long get_cgroup_id(const char *relative_path)
+unsigned long long get_cgroup_id_from_path(const char *cgroup_workdir)
{
int dirfd, err, flags, mount_id, fhsize;
union {
unsigned long long cgid;
unsigned char raw_bytes[8];
} id;
- char cgroup_workdir[PATH_MAX + 1];
struct file_handle *fhp, *fhp2;
unsigned long long ret = 0;
- format_cgroup_path(cgroup_workdir, relative_path);
-
dirfd = AT_FDCWD;
flags = 0;
fhsize = sizeof(*fhp);
@@ -474,6 +474,14 @@ free_mem:
return ret;
}
+unsigned long long get_cgroup_id(const char *relative_path)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_workdir, relative_path);
+ return get_cgroup_id_from_path(cgroup_workdir);
+}
+
int cgroup_setup_and_join(const char *path) {
int cg_fd;
@@ -523,10 +531,20 @@ int setup_classid_environment(void)
return 1;
}
- if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
- errno != EBUSY) {
- log_err("mount cgroup net_cls");
- return 1;
+ if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls")) {
+ if (errno != EBUSY) {
+ log_err("mount cgroup net_cls");
+ return 1;
+ }
+
+ if (rmdir(NETCLS_MOUNT_PATH)) {
+ log_err("rmdir cgroup net_cls");
+ return 1;
+ }
+ if (umount(CGROUP_MOUNT_DFLT)) {
+ log_err("umount cgroup base");
+ return 1;
+ }
}
cleanup_classid_environment();
@@ -541,15 +559,16 @@ int setup_classid_environment(void)
/**
* set_classid() - Set a cgroupv1 net_cls classid
- * @id: the numeric classid
*
- * Writes the passed classid into the cgroup work dir's net_cls.classid
+ * Writes the classid into the cgroup work dir's net_cls.classid
* file in order to later on trigger socket tagging.
*
+ * We leverage the current pid as the classid, ensuring unique identification.
+ *
* On success, it returns 0, otherwise on failure it returns 1. If there
* is a failure, it prints the error to stderr.
*/
-int set_classid(unsigned int id)
+int set_classid(void)
{
char cgroup_workdir[PATH_MAX - 42];
char cgroup_classid_path[PATH_MAX + 1];
@@ -565,7 +584,7 @@ int set_classid(unsigned int id)
return 1;
}
- if (dprintf(fd, "%u\n", id) < 0) {
+ if (dprintf(fd, "%u\n", getpid()) < 0) {
log_err("Setting cgroup classid");
rc = 1;
}
@@ -607,3 +626,66 @@ void cleanup_classid_environment(void)
join_cgroup_from_top(NETCLS_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}
+
+/**
+ * get_classid_cgroup_id - Get the cgroup id of a net_cls cgroup
+ */
+unsigned long long get_classid_cgroup_id(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ return get_cgroup_id_from_path(cgroup_workdir);
+}
+
+/**
+ * get_cgroup1_hierarchy_id - Retrieves the ID of a cgroup1 hierarchy from the cgroup1 subsys name.
+ * @subsys_name: The cgroup1 subsys name, which can be retrieved from /proc/self/cgroup. It can be
+ * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-contollers like
+ * "net_cls,net_prio".
+ */
+int get_cgroup1_hierarchy_id(const char *subsys_name)
+{
+ char *c, *c2, *c3, *c4;
+ bool found = false;
+ char line[1024];
+ FILE *file;
+ int i, id;
+
+ if (!subsys_name)
+ return -1;
+
+ file = fopen("/proc/self/cgroup", "r");
+ if (!file) {
+ log_err("fopen /proc/self/cgroup");
+ return -1;
+ }
+
+ while (fgets(line, 1024, file)) {
+ i = 0;
+ for (c = strtok_r(line, ":", &c2); c && i < 2; c = strtok_r(NULL, ":", &c2)) {
+ if (i == 0) {
+ id = strtol(c, NULL, 10);
+ } else if (i == 1) {
+ if (!strcmp(c, subsys_name)) {
+ found = true;
+ break;
+ }
+
+ /* Multiple subsystems may share one single mount point */
+ for (c3 = strtok_r(c, ",", &c4); c3;
+ c3 = strtok_r(NULL, ",", &c4)) {
+ if (!strcmp(c, subsys_name)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ i++;
+ }
+ if (found)
+ break;
+ }
+ fclose(file);
+ return found ? id : -1;
+}
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 5c2cb9c8b546..ee053641c026 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -20,6 +20,7 @@ int get_root_cgroup(void);
int create_and_get_cgroup(const char *relative_path);
void remove_cgroup(const char *relative_path);
unsigned long long get_cgroup_id(const char *relative_path);
+int get_cgroup1_hierarchy_id(const char *subsys_name);
int join_cgroup(const char *relative_path);
int join_root_cgroup(void);
@@ -29,8 +30,9 @@ int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
/* cgroupv1 related */
-int set_classid(unsigned int id);
+int set_classid(void);
int join_classid(void);
+unsigned long long get_classid_cgroup_id(void);
int setup_classid_environment(void);
void cleanup_classid_environment(void);
diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64
index 253821494884..29c8635c5722 100644
--- a/tools/testing/selftests/bpf/config.aarch64
+++ b/tools/testing/selftests/bpf/config.aarch64
@@ -1,4 +1,3 @@
-CONFIG_9P_FS=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_ARM_SMMU_V3=y
@@ -37,6 +36,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_REDUCED=n
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_NOTIFIERS=y
@@ -46,7 +46,6 @@ CONFIG_DEBUG_SG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEVTMPFS=y
-CONFIG_DRM_VIRTIO_GPU=y
CONFIG_DRM=y
CONFIG_DUMMY=y
CONFIG_EXPERT=y
@@ -67,7 +66,6 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HEADERS_INSTALL=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HUGETLBFS=y
-CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_HW_RANDOM=y
CONFIG_HZ_100=y
CONFIG_IDLE_PAGE_TRACKING=y
@@ -99,8 +97,6 @@ CONFIG_MEMCG=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
-CONFIG_NET_9P_VIRTIO=y
-CONFIG_NET_9P=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_ACT_GACT=y
CONFIG_NETDEVICES=y
@@ -140,7 +136,6 @@ CONFIG_SCHED_TRACER=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_SCSI=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -167,16 +162,6 @@ CONFIG_UPROBES=y
CONFIG_USELIB=y
CONFIG_USER_NS=y
CONFIG_VETH=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_VIRTIO_FS=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
index 2ba92167be35..e93330382849 100644
--- a/tools/testing/selftests/bpf/config.s390x
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -1,4 +1,3 @@
-CONFIG_9P_FS=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_AUDIT=y
CONFIG_BLK_CGROUP=y
@@ -84,8 +83,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_KEY=y
@@ -114,7 +111,6 @@ CONFIG_SAMPLE_SECCOMP=y
CONFIG_SAMPLES=y
CONFIG_SCHED_TRACER=y
CONFIG_SCSI=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_SECURITY_NETWORK=y
CONFIG_STACK_TRACER=y
CONFIG_STATIC_KEYS_SELFTEST=y
@@ -136,11 +132,6 @@ CONFIG_UPROBES=y
CONFIG_USELIB=y
CONFIG_USER_NS=y
CONFIG_VETH=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
diff --git a/tools/testing/selftests/bpf/config.vm b/tools/testing/selftests/bpf/config.vm
new file mode 100644
index 000000000000..a9746ca78777
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.vm
@@ -0,0 +1,12 @@
+CONFIG_9P_FS=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_VSOCKETS_COMMON=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index 2e70a6048278..f7bfb2b09c82 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -1,6 +1,3 @@
-CONFIG_9P_FS=y
-CONFIG_9P_FS_POSIX_ACL=y
-CONFIG_9P_FS_SECURITY=y
CONFIG_AGP=y
CONFIG_AGP_AMD64=y
CONFIG_AGP_INTEL=y
@@ -45,7 +42,6 @@ CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPUSETS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_BLAKE2B=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
@@ -145,8 +141,6 @@ CONFIG_MEMORY_FAILURE=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_EMATCH=y
@@ -228,12 +222,6 @@ CONFIG_USER_NS=y
CONFIG_VALIDATE_FS_PARSER=y
CONFIG_VETH=y
CONFIG_VIRT_DRIVERS=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
index 8bf497a9843e..2ea36408816b 100644
--- a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
@@ -131,10 +131,17 @@ static bool is_lru(__u32 map_type)
map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
+static bool is_percpu(__u32 map_type)
+{
+ return map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
+}
+
struct upsert_opts {
__u32 map_type;
int map_fd;
__u32 n;
+ bool retry_for_nomem;
};
static int create_small_hash(void)
@@ -148,19 +155,38 @@ static int create_small_hash(void)
return map_fd;
}
+static bool retry_for_nomem_fn(int err)
+{
+ return err == ENOMEM;
+}
+
static void *patch_map_thread(void *arg)
{
+ /* 8KB is enough for 1024 CPUs. And it is shared between N_THREADS. */
+ static __u8 blob[8 << 10];
struct upsert_opts *opts = arg;
+ void *val_ptr;
int val;
int ret;
int i;
for (i = 0; i < opts->n; i++) {
- if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
val = create_small_hash();
- else
+ val_ptr = &val;
+ } else if (is_percpu(opts->map_type)) {
+ val_ptr = blob;
+ } else {
val = rand();
- ret = bpf_map_update_elem(opts->map_fd, &i, &val, 0);
+ val_ptr = &val;
+ }
+
+ /* 2 seconds may be enough ? */
+ if (opts->retry_for_nomem)
+ ret = map_update_retriable(opts->map_fd, &i, val_ptr, 0,
+ 40, retry_for_nomem_fn);
+ else
+ ret = bpf_map_update_elem(opts->map_fd, &i, val_ptr, 0);
CHECK(ret < 0, "bpf_map_update_elem", "key=%d error: %s\n", i, strerror(errno));
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
@@ -281,6 +307,13 @@ static void __test(int map_fd)
else
opts.n /= 2;
+ /* per-cpu bpf memory allocator may not be able to allocate per-cpu
+ * pointer successfully and it can not refill free llist timely, and
+ * bpf_map_update_elem() will return -ENOMEM. so just retry to mitigate
+ * the problem temporarily.
+ */
+ opts.retry_for_nomem = is_percpu(opts.map_type) && (info.map_flags & BPF_F_NO_PREALLOC);
+
/*
* Upsert keys [0, n) under some competition: with random values from
* N_THREADS threads. Check values, then delete all elements and check
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 465c1c3a3d3c..4ebd0da898f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -40,7 +40,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "2"},
{1, "R3_w", "4"},
@@ -68,7 +68,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "1"},
{1, "R3_w", "2"},
@@ -97,7 +97,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "4"},
{1, "R3_w", "8"},
@@ -119,7 +119,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "7"},
{1, "R3_w", "7"},
@@ -162,13 +162,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R0_w", "pkt(off=8,r=8,imm=0)"},
+ {6, "R0_w", "pkt(off=8,r=8)"},
{6, "R3_w", "var_off=(0x0; 0xff)"},
{7, "R3_w", "var_off=(0x0; 0x1fe)"},
{8, "R3_w", "var_off=(0x0; 0x3fc)"},
{9, "R3_w", "var_off=(0x0; 0x7f8)"},
{10, "R3_w", "var_off=(0x0; 0xff0)"},
- {12, "R3_w", "pkt_end(off=0,imm=0)"},
+ {12, "R3_w", "pkt_end()"},
{17, "R4_w", "var_off=(0x0; 0xff)"},
{18, "R4_w", "var_off=(0x0; 0x1fe0)"},
{19, "R4_w", "var_off=(0x0; 0xff0)"},
@@ -235,11 +235,11 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {2, "R5_w", "pkt(off=0,r=0,imm=0)"},
- {4, "R5_w", "pkt(off=14,r=0,imm=0)"},
- {5, "R4_w", "pkt(off=14,r=0,imm=0)"},
- {9, "R2", "pkt(off=0,r=18,imm=0)"},
- {10, "R5", "pkt(off=14,r=18,imm=0)"},
+ {2, "R5_w", "pkt(r=0)"},
+ {4, "R5_w", "pkt(off=14,r=0)"},
+ {5, "R4_w", "pkt(off=14,r=0)"},
+ {9, "R2", "pkt(r=18)"},
+ {10, "R5", "pkt(off=14,r=18)"},
{10, "R4_w", "var_off=(0x0; 0xff)"},
{13, "R4_w", "var_off=(0x0; 0xffff)"},
{14, "R4_w", "var_off=(0x0; 0xffff)"},
@@ -299,7 +299,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {6, "R2_w", "pkt(r=8)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
@@ -337,7 +337,7 @@ static struct bpf_align_test tests[] = {
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5_w", "pkt(off=14,r=8,"},
+ {26, "R5_w", "pkt(off=14,r=8)"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
@@ -397,7 +397,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {6, "R2_w", "pkt(r=8)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{8, "R6_w", "var_off=(0x2; 0x7fc)"},
@@ -459,7 +459,7 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {3, "R5_w", "pkt_end(off=0,imm=0)"},
+ {3, "R5_w", "pkt_end()"},
/* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
@@ -513,7 +513,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {6, "R2_w", "pkt(r=8)"},
{8, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w", "var_off=(0x2; 0x7fc)"},
@@ -566,7 +566,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {6, "R2_w", "pkt(r=8)"},
{9, "R6_w", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
{10, "R6_w", "var_off=(0x2; 0x7c)"},
@@ -659,14 +659,14 @@ static int do_test_single(struct bpf_align_test *test)
/* Check the next line as well in case the previous line
* did not have a corresponding bpf insn. Example:
* func#0 @0
- * 0: R1=ctx(off=0,imm=0) R10=fp0
+ * 0: R1=ctx() R10=fp0
* 0: (b7) r3 = 2 ; R3_w=2
*
* Sometimes it's actually two lines below, e.g. when
* searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
- * from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
- * 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
- * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
+ * from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
+ * 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
+ * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
*/
while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
cur_line = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
index a1766a298bb7..f7cd129cb82b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c
+++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
@@ -9,8 +9,6 @@
#include "cap_helpers.h"
#include "bind_perm.skel.h"
-static int duration;
-
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
@@ -27,7 +25,7 @@ void try_bind(int family, int port, int expected_errno)
int fd = -1;
fd = socket(family, SOCK_STREAM, 0);
- if (CHECK(fd < 0, "fd", "errno %d", errno))
+ if (!ASSERT_GE(fd, 0, "socket"))
goto close_socket;
if (family == AF_INET) {
@@ -60,7 +58,7 @@ void test_bind_perm(void)
return;
cgroup_fd = test__join_cgroup("/bind_perm");
- if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
+ if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
skel = bind_perm__open_and_load();
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index e3498f607b49..618af9dfae9b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -34,8 +34,6 @@
#include "bpf_iter_ksym.skel.h"
#include "bpf_iter_sockmap.skel.h"
-static int duration;
-
static void test_btf_id_or_null(void)
{
struct bpf_iter_test_kern3 *skel;
@@ -64,7 +62,7 @@ static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_
/* not check contents, but ensure read() ends without error */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
+ ASSERT_GE(len, 0, "read");
close(iter_fd);
@@ -334,6 +332,8 @@ static void test_task_stack(void)
do_dummy_read(skel->progs.dump_task_stack);
do_dummy_read(skel->progs.get_task_user_stacks);
+ ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
+
bpf_iter_task_stack__destroy(skel);
}
@@ -413,7 +413,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
goto free_link;
}
- if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(err, 0, "read"))
goto free_link;
ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
@@ -526,11 +526,11 @@ static int do_read_with_fd(int iter_fd, const char *expected,
start = 0;
while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
start += len;
- if (CHECK(start >= 16, "read", "read len %d\n", len))
+ if (!ASSERT_LT(start, 16, "read"))
return -1;
read_buf_len = read_one_char ? 1 : 16 - start;
}
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
return -1;
if (!ASSERT_STREQ(buf, expected, "read"))
@@ -571,8 +571,7 @@ static int do_read(const char *path, const char *expected)
int err, iter_fd;
iter_fd = open(path, O_RDONLY);
- if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
- path, strerror(errno)))
+ if (!ASSERT_GE(iter_fd, 0, "open"))
return -1;
err = do_read_with_fd(iter_fd, expected, false);
@@ -600,7 +599,7 @@ static void test_file_iter(void)
unlink(path);
err = bpf_link__pin(link, path);
- if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
+ if (!ASSERT_OK(err, "pin_iter"))
goto free_link;
err = do_read(path, "abcd");
@@ -651,12 +650,10 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
* overflow and needs restart.
*/
map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
- if (CHECK(map1_fd < 0, "bpf_map_create",
- "map_creation failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
goto out;
map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
- if (CHECK(map2_fd < 0, "bpf_map_create",
- "map_creation failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
goto free_map1;
/* bpf_seq_printf kernel buffer is 8 pages, so one map
@@ -685,14 +682,12 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
/* setup filtering map_id in bpf program */
map_info_len = sizeof(map_info);
err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
- if (CHECK(err, "get_map_info", "get map info failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map1_id = map_info.id;
err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
- if (CHECK(err, "get_map_info", "get map info failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map2_id = map_info.id;
@@ -705,7 +700,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
goto free_link;
buf = malloc(expected_read_len);
- if (!buf)
+ if (!ASSERT_OK_PTR(buf, "malloc"))
goto close_iter;
/* do read */
@@ -714,16 +709,14 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
- CHECK(len != -1 || errno != E2BIG, "read",
- "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
- len, strerror(errno));
+ ASSERT_EQ(len, -1, "read");
+ ASSERT_EQ(errno, E2BIG, "read");
goto free_buf;
} else if (!ret1) {
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
- if (CHECK(len < 0, "read", "read failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
} else {
do {
@@ -732,8 +725,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
total_read_len += len;
} while (len > 0 || len == -EAGAIN);
- if (CHECK(len < 0, "read", "read failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
}
@@ -836,7 +828,7 @@ static void test_bpf_hash_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@@ -878,6 +870,8 @@ static void test_bpf_percpu_hash_map(void)
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
+ if (!ASSERT_OK_PTR(val, "malloc"))
+ goto out;
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
@@ -917,7 +911,7 @@ static void test_bpf_percpu_hash_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@@ -983,17 +977,14 @@ static void test_bpf_array_map(void)
start = 0;
while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
start += len;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
res_first_key = *(__u32 *)buf;
res_first_val = *(__u64 *)(buf + sizeof(__u32));
- if (CHECK(res_first_key != 0 || res_first_val != first_val,
- "bpf_seq_write",
- "seq_write failure: first key %u vs expected 0, "
- " first value %llu vs expected %llu\n",
- res_first_key, res_first_val, first_val))
+ if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
+ !ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
@@ -1057,6 +1048,8 @@ static void test_bpf_percpu_array_map(void)
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
+ if (!ASSERT_OK_PTR(val, "malloc"))
+ goto out;
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
@@ -1092,7 +1085,7 @@ static void test_bpf_percpu_array_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@@ -1131,6 +1124,7 @@ static void test_bpf_sk_storage_delete(void)
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
+
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "map_update"))
goto out;
@@ -1151,14 +1145,19 @@ static void test_bpf_sk_storage_delete(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
- "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
- goto close_iter;
+
+ /* Note: The following assertions serve to ensure
+ * the value was deleted. It does so by asserting
+ * that bpf_map_lookup_elem has failed. This might
+ * seem counterintuitive at first.
+ */
+ ASSERT_ERR(err, "bpf_map_lookup_elem");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
close_iter:
close(iter_fd);
@@ -1203,17 +1202,15 @@ static void test_bpf_sk_storage_get(void)
do_dummy_read(skel->progs.fill_socket_owner);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
- "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
- getpid(), val, err))
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
+ !ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
goto close_socket;
do_dummy_read(skel->progs.negate_socket_local_storage);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
- "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
- -getpid(), val, err);
+ ASSERT_OK(err, "bpf_map_lookup_elem");
+ ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
close_socket:
close(sock_fd);
@@ -1290,7 +1287,7 @@ static void test_bpf_sk_storage_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
index 675b90b15280..f09d6ac2ef09 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -25,7 +25,7 @@ void serial_test_bpf_obj_id(void)
*/
__u32 map_ids[nr_iters + 1];
char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128];
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ __u32 i, next_id, info_len, nr_id_found;
struct timespec real_time_ts, boot_time_ts;
int err = 0;
__u64 array_value;
@@ -33,16 +33,16 @@ void serial_test_bpf_obj_id(void)
time_t now, load_time;
err = bpf_prog_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_prog_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_prog_get_fd_by_id");
err = bpf_map_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
err = bpf_link_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
/* Check bpf_map_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
@@ -53,25 +53,26 @@ void serial_test_bpf_obj_id(void)
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
*/
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_prog_test_load"))
continue;
/* Insert a magic value to the map */
map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
- if (CHECK_FAIL(map_fds[i] < 0))
+ if (!ASSERT_GE(map_fds[i], 0, "bpf_find_map"))
goto done;
+
err = bpf_map_update_elem(map_fds[i], &array_key,
&array_magic_value, 0);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto done;
- prog = bpf_object__find_program_by_name(objs[i],
- "test_obj_id");
- if (CHECK_FAIL(!prog))
+ prog = bpf_object__find_program_by_name(objs[i], "test_obj_id");
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto done;
+
links[i] = bpf_program__attach(prog);
err = libbpf_get_error(links[i]);
- if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) {
+ if (!ASSERT_OK(err, "bpf_program__attach")) {
links[i] = NULL;
goto done;
}
@@ -81,24 +82,14 @@ void serial_test_bpf_obj_id(void)
bzero(&map_infos[i], info_len);
err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
- if (CHECK(err ||
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
- map_infos[i].key_size != sizeof(__u32) ||
- map_infos[i].value_size != sizeof(__u64) ||
- map_infos[i].max_entries != 1 ||
- map_infos[i].map_flags != 0 ||
- info_len != sizeof(struct bpf_map_info) ||
- strcmp((char *)map_infos[i].name, expected_map_name),
- "get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
- err, errno,
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
- info_len, sizeof(struct bpf_map_info),
- map_infos[i].key_size,
- map_infos[i].value_size,
- map_infos[i].max_entries,
- map_infos[i].map_flags,
- map_infos[i].name, expected_map_name))
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd") ||
+ !ASSERT_EQ(map_infos[i].type, BPF_MAP_TYPE_ARRAY, "map_type") ||
+ !ASSERT_EQ(map_infos[i].key_size, sizeof(__u32), "key_size") ||
+ !ASSERT_EQ(map_infos[i].value_size, sizeof(__u64), "value_size") ||
+ !ASSERT_EQ(map_infos[i].max_entries, 1, "max_entries") ||
+ !ASSERT_EQ(map_infos[i].map_flags, 0, "map_flags") ||
+ !ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "map_info_len") ||
+ !ASSERT_STREQ((char *)map_infos[i].name, expected_map_name, "map_name"))
goto done;
/* Check getting prog info */
@@ -112,48 +103,34 @@ void serial_test_bpf_obj_id(void)
prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
prog_infos[i].nr_map_ids = 2;
+
err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "clock_gettime"))
goto done;
+
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "clock_gettime"))
goto done;
+
err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
&info_len);
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
- if (CHECK(err ||
- prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT ||
- info_len != sizeof(struct bpf_prog_info) ||
- (env.jit_enabled && !prog_infos[i].jited_prog_len) ||
- (env.jit_enabled &&
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
- !prog_infos[i].xlated_prog_len ||
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
- load_time < now - 60 || load_time > now + 60 ||
- prog_infos[i].created_by_uid != my_uid ||
- prog_infos[i].nr_map_ids != 1 ||
- *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
- strcmp((char *)prog_infos[i].name, expected_prog_name),
- "get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%zu) "
- "jit_enabled %d jited_prog_len %u xlated_prog_len %u "
- "jited_prog %d xlated_prog %d load_time %lu(%lu) "
- "uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) "
- "name %s(%s)\n",
- err, errno, i,
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
- info_len, sizeof(struct bpf_prog_info),
- env.jit_enabled,
- prog_infos[i].jited_prog_len,
- prog_infos[i].xlated_prog_len,
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
- load_time, now,
- prog_infos[i].created_by_uid, my_uid,
- prog_infos[i].nr_map_ids, 1,
- *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
- prog_infos[i].name, expected_prog_name))
+
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(prog_infos[i].type, BPF_PROG_TYPE_RAW_TRACEPOINT, "prog_type") ||
+ !ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len") ||
+ !ASSERT_FALSE((env.jit_enabled && !prog_infos[i].jited_prog_len), "jited_prog_len") ||
+ !ASSERT_FALSE((env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))),
+ "jited_insns") ||
+ !ASSERT_NEQ(prog_infos[i].xlated_prog_len, 0, "xlated_prog_len") ||
+ !ASSERT_NEQ(memcmp(xlated_insns, zeros, sizeof(zeros)), 0, "xlated_insns") ||
+ !ASSERT_GE(load_time, (now - 60), "load_time") ||
+ !ASSERT_LE(load_time, (now + 60), "load_time") ||
+ !ASSERT_EQ(prog_infos[i].created_by_uid, my_uid, "created_by_uid") ||
+ !ASSERT_EQ(prog_infos[i].nr_map_ids, 1, "nr_map_ids") ||
+ !ASSERT_EQ(*(int *)(long)prog_infos[i].map_ids, map_infos[i].id, "map_ids") ||
+ !ASSERT_STREQ((char *)prog_infos[i].name, expected_prog_name, "prog_name"))
goto done;
/* Check getting link info */
@@ -163,25 +140,12 @@ void serial_test_bpf_obj_id(void)
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
- if (CHECK(err ||
- link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
- link_infos[i].prog_id != prog_infos[i].id ||
- link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
- strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
- "sys_enter") ||
- info_len != sizeof(struct bpf_link_info),
- "get-link-info(fd)",
- "err %d errno %d info_len %u(%zu) type %d(%d) id %d "
- "prog_id %d (%d) tp_name %s(%s)\n",
- err, errno,
- info_len, sizeof(struct bpf_link_info),
- link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
- link_infos[i].id,
- link_infos[i].prog_id, prog_infos[i].id,
- (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
- "sys_enter"))
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd") ||
+ !ASSERT_EQ(link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type") ||
+ !ASSERT_EQ(link_infos[i].prog_id, prog_infos[i].id, "prog_id") ||
+ !ASSERT_EQ(link_infos[i].raw_tracepoint.tp_name, ptr_to_u64(&tp_name), "&tp_name") ||
+ !ASSERT_STREQ(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter", "tp_name"))
goto done;
-
}
/* Check bpf_prog_get_next_id() */
@@ -190,7 +154,7 @@ void serial_test_bpf_obj_id(void)
while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info = {};
__u32 saved_map_id;
- int prog_fd;
+ int prog_fd, cmp_res;
info_len = sizeof(prog_info);
@@ -198,9 +162,7 @@ void serial_test_bpf_obj_id(void)
if (prog_fd < 0 && errno == ENOENT)
/* The bpf_prog is in the dead row */
continue;
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
- "prog_fd %d next_id %d errno %d\n",
- prog_fd, next_id, errno))
+ if (!ASSERT_GE(prog_fd, 0, "bpf_prog_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -218,9 +180,8 @@ void serial_test_bpf_obj_id(void)
*/
prog_info.nr_map_ids = 1;
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
- if (CHECK(!err || errno != EFAULT,
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
- err, errno, EFAULT))
+ if (!ASSERT_ERR(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(errno, EFAULT, "bpf_prog_get_info_by_fd"))
break;
bzero(&prog_info, sizeof(prog_info));
info_len = sizeof(prog_info);
@@ -231,27 +192,22 @@ void serial_test_bpf_obj_id(void)
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
- memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)(long)prog_info.map_ids != saved_map_id,
- "get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
- err, errno, info_len, sizeof(struct bpf_prog_info),
- memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)(long)prog_info.map_ids, saved_map_id);
+ cmp_res = memcmp(&prog_info, &prog_infos[i], info_len);
+
+ ASSERT_OK(err, "bpf_prog_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len");
+ ASSERT_OK(cmp_res, "memcmp");
+ ASSERT_EQ(*(int *)(long)prog_info.map_ids, saved_map_id, "map_id");
close(prog_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total prog id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "prog_nr_id_found");
/* Check bpf_map_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info = {};
- int map_fd;
+ int map_fd, cmp_res;
info_len = sizeof(map_info);
@@ -259,9 +215,7 @@ void serial_test_bpf_obj_id(void)
if (map_fd < 0 && errno == ENOENT)
/* The bpf_map is in the dead row */
continue;
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
- "map_fd %d next_id %u errno %d\n",
- map_fd, next_id, errno))
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -274,25 +228,19 @@ void serial_test_bpf_obj_id(void)
nr_id_found++;
err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto done;
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
- memcmp(&map_info, &map_infos[i], info_len) ||
- array_value != array_magic_value,
- "check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
- err, errno, info_len, sizeof(struct bpf_map_info),
- memcmp(&map_info, &map_infos[i], info_len),
- array_value, array_magic_value);
+ cmp_res = memcmp(&map_info, &map_infos[i], info_len);
+ ASSERT_OK(err, "bpf_map_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "info_len");
+ ASSERT_OK(cmp_res, "memcmp");
+ ASSERT_EQ(array_value, array_magic_value, "array_value");
close(map_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total map id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "map_nr_id_found");
/* Check bpf_link_get_next_id() */
nr_id_found = 0;
@@ -308,9 +256,7 @@ void serial_test_bpf_obj_id(void)
if (link_fd < 0 && errno == ENOENT)
/* The bpf_link is in the dead row */
continue;
- if (CHECK(link_fd < 0, "get-link-fd(next_id)",
- "link_fd %d next_id %u errno %d\n",
- link_fd, next_id, errno))
+ if (!ASSERT_GE(link_fd, 0, "bpf_link_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -325,17 +271,13 @@ void serial_test_bpf_obj_id(void)
err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
- CHECK(err || info_len != sizeof(link_info) || cmp_res,
- "check get-link-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d\n",
- err, errno, info_len, sizeof(struct bpf_link_info),
- cmp_res);
+ ASSERT_OK(err, "bpf_link_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(link_info), "info_len");
+ ASSERT_OK(cmp_res, "memcmp");
close(link_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total link id found by get_next_id",
- "nr_id_found %u(%u)\n", nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "link_nr_id_found");
done:
for (i = 0; i < nr_iters; i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 4aabeaa525d4..a88e6e07e4f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -20,15 +20,14 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static int expected_stg = 0xeB9F;
-static int stop, duration;
+static int stop;
static int settcpca(int fd, const char *tcp_ca)
{
int err;
err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
- if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
- errno))
+ if (!ASSERT_NEQ(err, -1, "setsockopt"))
return -1;
return 0;
@@ -65,8 +64,7 @@ static void *server(void *arg)
bytes += nr_sent;
}
- CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
- bytes, total_bytes, nr_sent, errno);
+ ASSERT_EQ(bytes, total_bytes, "send");
done:
if (fd >= 0)
@@ -92,10 +90,11 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
WRITE_ONCE(stop, 0);
lfd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
+ if (!ASSERT_NEQ(lfd, -1, "socket"))
return;
+
fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
+ if (!ASSERT_NEQ(fd, -1, "socket")) {
close(lfd);
return;
}
@@ -108,26 +107,27 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
sa6.sin6_family = AF_INET6;
sa6.sin6_addr = in6addr_loopback;
err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "bind", "errno:%d\n", errno))
+ if (!ASSERT_NEQ(err, -1, "bind"))
goto done;
+
err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
- if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
+ if (!ASSERT_NEQ(err, -1, "getsockname"))
goto done;
+
err = listen(lfd, 1);
- if (CHECK(err == -1, "listen", "errno:%d\n", errno))
+ if (!ASSERT_NEQ(err, -1, "listen"))
goto done;
if (sk_stg_map) {
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
&expected_stg, BPF_NOEXIST);
- if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
+ if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
goto done;
}
/* connect to server */
err = connect(fd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "connect", "errno:%d\n", errno))
+ if (!ASSERT_NEQ(err, -1, "connect"))
goto done;
if (sk_stg_map) {
@@ -135,14 +135,13 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
&tmp_stg);
- if (CHECK(!err || errno != ENOENT,
- "bpf_map_lookup_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
+ !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
goto done;
}
err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
- if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
+ if (!ASSERT_OK(err, "pthread_create"))
goto done;
/* recv total_bytes */
@@ -156,13 +155,12 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
bytes += nr_recv;
}
- CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
- bytes, total_bytes, nr_recv, errno);
+ ASSERT_EQ(bytes, total_bytes, "recv");
WRITE_ONCE(stop, 1);
pthread_join(srv_thread, &thread_ret);
- CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
- PTR_ERR(thread_ret));
+ ASSERT_OK(IS_ERR(thread_ret), "thread_ret");
+
done:
close(lfd);
close(fd);
@@ -174,7 +172,7 @@ static void test_cubic(void)
struct bpf_link *link;
cubic_skel = bpf_cubic__open_and_load();
- if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
+ if (!ASSERT_OK_PTR(cubic_skel, "bpf_cubic__open_and_load"))
return;
link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
@@ -197,7 +195,7 @@ static void test_dctcp(void)
struct bpf_link *link;
dctcp_skel = bpf_dctcp__open_and_load();
- if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
+ if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
return;
link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
@@ -207,9 +205,7 @@ static void test_dctcp(void)
}
do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
- CHECK(dctcp_skel->bss->stg_result != expected_stg,
- "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
- dctcp_skel->bss->stg_result, expected_stg);
+ ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index 731c343897d8..e770912fc1d2 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
}
bpf_program__set_type(prog, type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
+ bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
err = bpf_object__load(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 92d51f377fe5..8fb4a04fbbc0 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -5265,6 +5265,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
#endif
assert(0);
+ return 0;
}
static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
new file mode 100644
index 000000000000..74d6d7546f40
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "test_cgroup1_hierarchy.skel.h"
+
+static void bpf_cgroup1(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ ASSERT_NULL(fentry_link, "fentry_attach_fail");
+
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_lsm");
+}
+
+static void bpf_cgroup1_sleepable(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_s_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ ASSERT_NULL(fentry_link, "fentry_attach_fail");
+
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_lsm");
+}
+
+static void bpf_cgroup1_invalid_id(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ if (!ASSERT_OK_PTR(fentry_link, "fentry_attach_success"))
+ goto cleanup;
+
+ err = bpf_link__destroy(fentry_link);
+ ASSERT_OK(err, "destroy_lsm");
+
+cleanup:
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_fentry");
+}
+
+void test_cgroup1_hierarchy(void)
+{
+ struct test_cgroup1_hierarchy *skel;
+ __u64 current_cgid;
+ int hid, err;
+
+ skel = test_cgroup1_hierarchy__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ skel->bss->target_pid = getpid();
+
+ err = bpf_program__set_attach_target(skel->progs.fentry_run, 0, "bpf_fentry_test1");
+ if (!ASSERT_OK(err, "fentry_set_target"))
+ goto destroy;
+
+ err = test_cgroup1_hierarchy__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto destroy;
+
+ /* Setup cgroup1 hierarchy */
+ err = setup_classid_environment();
+ if (!ASSERT_OK(err, "setup_classid_environment"))
+ goto destroy;
+
+ err = join_classid();
+ if (!ASSERT_OK(err, "join_cgroup1"))
+ goto cleanup;
+
+ current_cgid = get_classid_cgroup_id();
+ if (!ASSERT_GE(current_cgid, 0, "cgroup1 id"))
+ goto cleanup;
+
+ hid = get_cgroup1_hierarchy_id("net_cls");
+ if (!ASSERT_GE(hid, 0, "cgroup1 id"))
+ goto cleanup;
+ skel->bss->target_hid = hid;
+
+ if (test__start_subtest("test_cgroup1_hierarchy")) {
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1(skel);
+ }
+
+ if (test__start_subtest("test_root_cgid")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 0;
+ bpf_cgroup1(skel);
+ }
+
+ if (test__start_subtest("test_invalid_level")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 1;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgid")) {
+ skel->bss->target_ancestor_cgid = 0;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_hid")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 0;
+ skel->bss->target_hid = -1;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgrp_name")) {
+ skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cl");
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgrp_name2")) {
+ skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cls,");
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_sleepable_prog")) {
+ skel->bss->target_hid = hid;
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_sleepable(skel);
+ }
+
+cleanup:
+ cleanup_classid_environment();
+destroy:
+ test_cgroup1_hierarchy__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
index 9026b42914d3..addf720428f7 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -71,7 +71,7 @@ void test_cgroup_v1v2(void)
}
ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
setup_classid_environment();
- set_classid(42);
+ set_classid();
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
cleanup_classid_environment();
close(server_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
index b25b870f87ba..e6e50a394472 100644
--- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
@@ -73,6 +73,37 @@ static void test_local_kptr_stash_unstash(void)
local_kptr_stash__destroy(skel);
}
+static void test_refcount_acquire_without_unstash(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+ &opts);
+ ASSERT_OK(ret, "refcount_acquire_without_unstash run");
+ ASSERT_EQ(opts.retval, 2, "refcount_acquire_without_unstash retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_refcounted_node), &opts);
+ ASSERT_OK(ret, "stash_refcounted_node run");
+ ASSERT_OK(opts.retval, "stash_refcounted_node retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+ &opts);
+ ASSERT_OK(ret, "refcount_acquire_without_unstash (2) run");
+ ASSERT_EQ(opts.retval, 42, "refcount_acquire_without_unstash (2) retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
static void test_local_kptr_stash_fail(void)
{
RUN_TESTS(local_kptr_stash_fail);
@@ -86,6 +117,8 @@ void test_local_kptr_stash(void)
test_local_kptr_stash_plain();
if (test__start_subtest("local_kptr_stash_unstash"))
test_local_kptr_stash_unstash();
+ if (test__start_subtest("refcount_acquire_without_unstash"))
+ test_refcount_acquire_without_unstash();
if (test__start_subtest("local_kptr_stash_fail"))
test_local_kptr_stash_fail();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
index fe9a23e65ef4..0f7ea4d7d9f6 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_buf.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -78,7 +78,7 @@ static void obj_load_log_buf(void)
ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
"libbpf_log_not_empty");
ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
- ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
+ ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
"good_log_verbose");
ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
"bad_log_not_empty");
@@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)
opts.log_level = 2;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
- ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
+ ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
new file mode 100644
index 000000000000..0c9abd279e18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -0,0 +1,2124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <limits.h>
+#include <test_progs.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+/* =================================
+ * SHORT AND CONSISTENT NUMBER TYPES
+ * =================================
+ */
+#define U64_MAX ((u64)UINT64_MAX)
+#define U32_MAX ((u32)UINT_MAX)
+#define U16_MAX ((u32)UINT_MAX)
+#define S64_MIN ((s64)INT64_MIN)
+#define S64_MAX ((s64)INT64_MAX)
+#define S32_MIN ((s32)INT_MIN)
+#define S32_MAX ((s32)INT_MAX)
+#define S16_MIN ((s16)0x80000000)
+#define S16_MAX ((s16)0x7fffffff)
+
+typedef unsigned long long ___u64;
+typedef unsigned int ___u32;
+typedef long long ___s64;
+typedef int ___s32;
+
+/* avoid conflicts with already defined types in kernel headers */
+#define u64 ___u64
+#define u32 ___u32
+#define s64 ___s64
+#define s32 ___s32
+
+/* ==================================
+ * STRING BUF ABSTRACTION AND HELPERS
+ * ==================================
+ */
+struct strbuf {
+ size_t buf_sz;
+ int pos;
+ char buf[0];
+};
+
+#define DEFINE_STRBUF(name, N) \
+ struct { struct strbuf buf; char data[(N)]; } ___##name; \
+ struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf)
+
+__printf(2, 3)
+static inline void snappendf(struct strbuf *s, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ s->pos += vsnprintf(s->buf + s->pos,
+ s->pos < s->buf_sz ? s->buf_sz - s->pos : 0,
+ fmt, args);
+ va_end(args);
+}
+
+/* ==================================
+ * GENERIC NUMBER TYPE AND OPERATIONS
+ * ==================================
+ */
+enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 };
+
+static __always_inline u64 min_t(enum num_t t, u64 x, u64 y)
+{
+ switch (t) {
+ case U64: return (u64)x < (u64)y ? (u64)x : (u64)y;
+ case U32: return (u32)x < (u32)y ? (u32)x : (u32)y;
+ case S64: return (s64)x < (s64)y ? (s64)x : (s64)y;
+ case S32: return (s32)x < (s32)y ? (s32)x : (s32)y;
+ default: printf("min_t!\n"); exit(1);
+ }
+}
+
+static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
+{
+ switch (t) {
+ case U64: return (u64)x > (u64)y ? (u64)x : (u64)y;
+ case U32: return (u32)x > (u32)y ? (u32)x : (u32)y;
+ case S64: return (s64)x > (s64)y ? (s64)x : (s64)y;
+ case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y;
+ default: printf("max_t!\n"); exit(1);
+ }
+}
+
+static __always_inline u64 cast_t(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return (u64)x;
+ case U32: return (u32)x;
+ case S64: return (s64)x;
+ case S32: return (u32)(s32)x;
+ default: printf("cast_t!\n"); exit(1);
+ }
+}
+
+static const char *t_str(enum num_t t)
+{
+ switch (t) {
+ case U64: return "u64";
+ case U32: return "u32";
+ case S64: return "s64";
+ case S32: return "s32";
+ default: printf("t_str!\n"); exit(1);
+ }
+}
+
+static enum num_t t_is_32(enum num_t t)
+{
+ switch (t) {
+ case U64: return false;
+ case U32: return true;
+ case S64: return false;
+ case S32: return true;
+ default: printf("t_is_32!\n"); exit(1);
+ }
+}
+
+static enum num_t t_signed(enum num_t t)
+{
+ switch (t) {
+ case U64: return S64;
+ case U32: return S32;
+ case S64: return S64;
+ case S32: return S32;
+ default: printf("t_signed!\n"); exit(1);
+ }
+}
+
+static enum num_t t_unsigned(enum num_t t)
+{
+ switch (t) {
+ case U64: return U64;
+ case U32: return U32;
+ case S64: return U64;
+ case S32: return U32;
+ default: printf("t_unsigned!\n"); exit(1);
+ }
+}
+
+#define UNUM_MAX_DECIMAL U16_MAX
+#define SNUM_MAX_DECIMAL S16_MAX
+#define SNUM_MIN_DECIMAL S16_MIN
+
+static bool num_is_small(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return (u64)x <= UNUM_MAX_DECIMAL;
+ case U32: return (u32)x <= UNUM_MAX_DECIMAL;
+ case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL;
+ case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL;
+ default: printf("num_is_small!\n"); exit(1);
+ }
+}
+
+static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x)
+{
+ bool is_small = num_is_small(t, x);
+
+ if (is_small) {
+ switch (t) {
+ case U64: return snappendf(sb, "%llu", (u64)x);
+ case U32: return snappendf(sb, "%u", (u32)x);
+ case S64: return snappendf(sb, "%lld", (s64)x);
+ case S32: return snappendf(sb, "%d", (s32)x);
+ default: printf("snprintf_num!\n"); exit(1);
+ }
+ } else {
+ switch (t) {
+ case U64:
+ if (x == U64_MAX)
+ return snappendf(sb, "U64_MAX");
+ else if (x >= U64_MAX - 256)
+ return snappendf(sb, "U64_MAX-%llu", U64_MAX - x);
+ else
+ return snappendf(sb, "%#llx", (u64)x);
+ case U32:
+ if ((u32)x == U32_MAX)
+ return snappendf(sb, "U32_MAX");
+ else if ((u32)x >= U32_MAX - 256)
+ return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x);
+ else
+ return snappendf(sb, "%#x", (u32)x);
+ case S64:
+ if ((s64)x == S64_MAX)
+ return snappendf(sb, "S64_MAX");
+ else if ((s64)x >= S64_MAX - 256)
+ return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x);
+ else if ((s64)x == S64_MIN)
+ return snappendf(sb, "S64_MIN");
+ else if ((s64)x <= S64_MIN + 256)
+ return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN);
+ else
+ return snappendf(sb, "%#llx", (s64)x);
+ case S32:
+ if ((s32)x == S32_MAX)
+ return snappendf(sb, "S32_MAX");
+ else if ((s32)x >= S32_MAX - 256)
+ return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x);
+ else if ((s32)x == S32_MIN)
+ return snappendf(sb, "S32_MIN");
+ else if ((s32)x <= S32_MIN + 256)
+ return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN);
+ else
+ return snappendf(sb, "%#x", (s32)x);
+ default: printf("snprintf_num!\n"); exit(1);
+ }
+ }
+}
+
+/* ===================================
+ * GENERIC RANGE STRUCT AND OPERATIONS
+ * ===================================
+ */
+struct range {
+ u64 a, b;
+};
+
+static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
+{
+ if (x.a == x.b)
+ return snprintf_num(t, sb, x.a);
+
+ snappendf(sb, "[");
+ snprintf_num(t, sb, x.a);
+ snappendf(sb, "; ");
+ snprintf_num(t, sb, x.b);
+ snappendf(sb, "]");
+}
+
+static void print_range(enum num_t t, struct range x, const char *sfx)
+{
+ DEFINE_STRBUF(sb, 128);
+
+ snprintf_range(t, sb, x);
+ printf("%s%s", sb->buf, sfx);
+}
+
+static const struct range unkn[] = {
+ [U64] = { 0, U64_MAX },
+ [U32] = { 0, U32_MAX },
+ [S64] = { (u64)S64_MIN, (u64)S64_MAX },
+ [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX },
+};
+
+static struct range unkn_subreg(enum num_t t)
+{
+ switch (t) {
+ case U64: return unkn[U32];
+ case U32: return unkn[U32];
+ case S64: return unkn[U32];
+ case S32: return unkn[S32];
+ default: printf("unkn_subreg!\n"); exit(1);
+ }
+}
+
+static struct range range(enum num_t t, u64 a, u64 b)
+{
+ switch (t) {
+ case U64: return (struct range){ (u64)a, (u64)b };
+ case U32: return (struct range){ (u32)a, (u32)b };
+ case S64: return (struct range){ (s64)a, (s64)b };
+ case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
+ default: printf("range!\n"); exit(1);
+ }
+}
+
+static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; }
+static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; }
+static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); }
+static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; }
+
+static bool range_eq(struct range x, struct range y)
+{
+ return x.a == y.a && x.b == y.b;
+}
+
+static struct range range_cast_to_s32(struct range x)
+{
+ u64 a = x.a, b = x.b;
+
+ /* if upper 32 bits are constant, lower 32 bits should form a proper
+ * s32 range to be correct
+ */
+ if (upper32(a) == upper32(b) && (s32)a <= (s32)b)
+ return range(S32, a, b);
+
+ /* Special case where upper bits form a small sequence of two
+ * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
+ * 0x00000000 is also valid), while lower bits form a proper s32 range
+ * going from negative numbers to positive numbers.
+ *
+ * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating
+ * over full 64-bit numbers range will form a proper [-16, 16]
+ * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
+ */
+ if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0)
+ return range(S32, a, b);
+
+ /* otherwise we can't derive much meaningful information */
+ return unkn[S32];
+}
+
+static struct range range_cast_u64(enum num_t to_t, struct range x)
+{
+ u64 a = (u64)x.a, b = (u64)x.b;
+
+ switch (to_t) {
+ case U64:
+ return x;
+ case U32:
+ if (upper32(a) != upper32(b))
+ return unkn[U32];
+ return range(U32, a, b);
+ case S64:
+ if (sign64(a) != sign64(b))
+ return unkn[S64];
+ return range(S64, a, b);
+ case S32:
+ return range_cast_to_s32(x);
+ default: printf("range_cast_u64!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_s64(enum num_t to_t, struct range x)
+{
+ s64 a = (s64)x.a, b = (s64)x.b;
+
+ switch (to_t) {
+ case U64:
+ /* equivalent to (s64)a <= (s64)b check */
+ if (sign64(a) != sign64(b))
+ return unkn[U64];
+ return range(U64, a, b);
+ case U32:
+ if (upper32(a) != upper32(b) || sign32(a) != sign32(b))
+ return unkn[U32];
+ return range(U32, a, b);
+ case S64:
+ return x;
+ case S32:
+ return range_cast_to_s32(x);
+ default: printf("range_cast_s64!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_u32(enum num_t to_t, struct range x)
+{
+ u32 a = (u32)x.a, b = (u32)x.b;
+
+ switch (to_t) {
+ case U64:
+ case S64:
+ /* u32 is always a valid zero-extended u64/s64 */
+ return range(to_t, a, b);
+ case U32:
+ return x;
+ case S32:
+ return range_cast_to_s32(range(U32, a, b));
+ default: printf("range_cast_u32!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_s32(enum num_t to_t, struct range x)
+{
+ s32 a = (s32)x.a, b = (s32)x.b;
+
+ switch (to_t) {
+ case U64:
+ case U32:
+ case S64:
+ if (sign32(a) != sign32(b))
+ return unkn[to_t];
+ return range(to_t, a, b);
+ case S32:
+ return x;
+ default: printf("range_cast_s32!\n"); exit(1);
+ }
+}
+
+/* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving
+ * all possible information. Worst case, it will be unknown range within
+ * *to_t* domain, if nothing more specific can be guaranteed during the
+ * conversion
+ */
+static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
+{
+ switch (from_t) {
+ case U64: return range_cast_u64(to_t, from);
+ case U32: return range_cast_u32(to_t, from);
+ case S64: return range_cast_s64(to_t, from);
+ case S32: return range_cast_s32(to_t, from);
+ default: printf("range_cast!\n"); exit(1);
+ }
+}
+
+static bool is_valid_num(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return true;
+ case U32: return upper32(x) == 0;
+ case S64: return true;
+ case S32: return upper32(x) == 0;
+ default: printf("is_valid_num!\n"); exit(1);
+ }
+}
+
+static bool is_valid_range(enum num_t t, struct range x)
+{
+ if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b))
+ return false;
+
+ switch (t) {
+ case U64: return (u64)x.a <= (u64)x.b;
+ case U32: return (u32)x.a <= (u32)x.b;
+ case S64: return (s64)x.a <= (s64)x.b;
+ case S32: return (s32)x.a <= (s32)x.b;
+ default: printf("is_valid_range!\n"); exit(1);
+ }
+}
+
+static struct range range_improve(enum num_t t, struct range old, struct range new)
+{
+ return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
+}
+
+static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
+{
+ struct range y_cast;
+
+ y_cast = range_cast(y_t, x_t, y);
+
+ /* the case when new range knowledge, *y*, is a 32-bit subregister
+ * range, while previous range knowledge, *x*, is a full register
+ * 64-bit range, needs special treatment to take into account upper 32
+ * bits of full register range
+ */
+ if (t_is_32(y_t) && !t_is_32(x_t)) {
+ struct range x_swap;
+
+ /* some combinations of upper 32 bits and sign bit can lead to
+ * invalid ranges, in such cases it's easier to detect them
+ * after cast/swap than try to enumerate all the conditions
+ * under which transformation and knowledge transfer is valid
+ */
+ x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
+ if (!is_valid_range(x_t, x_swap))
+ return x;
+ return range_improve(x_t, x, x_swap);
+ }
+
+ /* otherwise, plain range cast and intersection works */
+ return range_improve(x_t, x, y_cast);
+}
+
+/* =======================
+ * GENERIC CONDITIONAL OPS
+ * =======================
+ */
+enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE };
+
+static enum op complement_op(enum op op)
+{
+ switch (op) {
+ case OP_LT: return OP_GE;
+ case OP_LE: return OP_GT;
+ case OP_GT: return OP_LE;
+ case OP_GE: return OP_LT;
+ case OP_EQ: return OP_NE;
+ case OP_NE: return OP_EQ;
+ default: printf("complement_op!\n"); exit(1);
+ }
+}
+
+static const char *op_str(enum op op)
+{
+ switch (op) {
+ case OP_LT: return "<";
+ case OP_LE: return "<=";
+ case OP_GT: return ">";
+ case OP_GE: return ">=";
+ case OP_EQ: return "==";
+ case OP_NE: return "!=";
+ default: printf("op_str!\n"); exit(1);
+ }
+}
+
+/* Can register with range [x.a, x.b] *EVER* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a regsiter with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+#define range_canbe(T) do { \
+ switch (op) { \
+ case OP_LT: return (T)x.a < (T)y.b; \
+ case OP_LE: return (T)x.a <= (T)y.b; \
+ case OP_GT: return (T)x.b > (T)y.a; \
+ case OP_GE: return (T)x.b >= (T)y.a; \
+ case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \
+ case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \
+ default: printf("range_canbe op %d\n", op); exit(1); \
+ } \
+} while (0)
+
+ switch (t) {
+ case U64: { range_canbe(u64); }
+ case U32: { range_canbe(u32); }
+ case S64: { range_canbe(s64); }
+ case S32: { range_canbe(s32); }
+ default: printf("range_canbe!\n"); exit(1);
+ }
+#undef range_canbe
+}
+
+/* Does register with range [x.a, x.b] *ALWAYS* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a regsiter with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ /* always op <=> ! canbe complement(op) */
+ return !range_canbe_op(t, x, y, complement_op(op));
+}
+
+/* Does register with range [x.a, x.b] *NEVER* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a regsiter with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ return !range_canbe_op(t, x, y, op);
+}
+
+/* similar to verifier's is_branch_taken():
+ * 1 - always taken;
+ * 0 - never taken,
+ * -1 - unsure.
+ */
+static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ if (range_always_op(t, x, y, op))
+ return 1;
+ if (range_never_op(t, x, y, op))
+ return 0;
+ return -1;
+}
+
+/* What would be the new estimates for register x and y ranges assuming truthful
+ * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy.
+ *
+ * We assume "interesting" cases where ranges overlap. Cases where it's
+ * obvious that (x OP y) is either always true or false should be filtered with
+ * range_never and range_always checks.
+ */
+static void range_cond(enum num_t t, struct range x, struct range y,
+ enum op op, struct range *newx, struct range *newy)
+{
+ if (!range_canbe_op(t, x, y, op)) {
+ /* nothing to adjust, can't happen, return original values */
+ *newx = x;
+ *newy = y;
+ return;
+ }
+ switch (op) {
+ case OP_LT:
+ *newx = range(t, x.a, min_t(t, x.b, y.b - 1));
+ *newy = range(t, max_t(t, x.a + 1, y.a), y.b);
+ break;
+ case OP_LE:
+ *newx = range(t, x.a, min_t(t, x.b, y.b));
+ *newy = range(t, max_t(t, x.a, y.a), y.b);
+ break;
+ case OP_GT:
+ *newx = range(t, max_t(t, x.a, y.a + 1), x.b);
+ *newy = range(t, y.a, min_t(t, x.b - 1, y.b));
+ break;
+ case OP_GE:
+ *newx = range(t, max_t(t, x.a, y.a), x.b);
+ *newy = range(t, y.a, min_t(t, x.b, y.b));
+ break;
+ case OP_EQ:
+ *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
+ *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
+ break;
+ case OP_NE:
+ /* generic case, can't derive more information */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a, y.b);
+ break;
+
+ /* below extended logic is not supported by verifier just yet */
+ if (x.a == x.b && x.a == y.a) {
+ /* X is a constant matching left side of Y */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a + 1, y.b);
+ } else if (x.a == x.b && x.b == y.b) {
+ /* X is a constant matching rigth side of Y */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a, y.b - 1);
+ } else if (y.a == y.b && x.a == y.a) {
+ /* Y is a constant matching left side of X */
+ *newx = range(t, x.a + 1, x.b);
+ *newy = range(t, y.a, y.b);
+ } else if (y.a == y.b && x.b == y.b) {
+ /* Y is a constant matching rigth side of X */
+ *newx = range(t, x.a, x.b - 1);
+ *newy = range(t, y.a, y.b);
+ } else {
+ /* generic case, can't derive more information */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a, y.b);
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+/* =======================
+ * REGISTER STATE HANDLING
+ * =======================
+ */
+struct reg_state {
+ struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
+ bool valid;
+};
+
+static void print_reg_state(struct reg_state *r, const char *sfx)
+{
+ DEFINE_STRBUF(sb, 512);
+ enum num_t t;
+ int cnt = 0;
+
+ if (!r->valid) {
+ printf("<not found>%s", sfx);
+ return;
+ }
+
+ snappendf(sb, "scalar(");
+ for (t = first_t; t <= last_t; t++) {
+ snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t));
+ snprintf_range(t, sb, r->r[t]);
+ }
+ snappendf(sb, ")");
+
+ printf("%s%s", sb->buf, sfx);
+}
+
+static void print_refinement(enum num_t s_t, struct range src,
+ enum num_t d_t, struct range old, struct range new,
+ const char *ctx)
+{
+ printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t));
+ print_range(s_t, src, "");
+ printf(" (%s)DST_OLD=", t_str(d_t));
+ print_range(d_t, old, "");
+ printf(" (%s)DST_NEW=", t_str(d_t));
+ print_range(d_t, new, "\n");
+}
+
+static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
+{
+ enum num_t d_t, s_t;
+ struct range old;
+ bool keep_going = false;
+
+again:
+ /* try to derive new knowledge from just learned range x of type t */
+ for (d_t = first_t; d_t <= last_t; d_t++) {
+ old = r->r[d_t];
+ r->r[d_t] = range_refine(d_t, r->r[d_t], t, x);
+ if (!range_eq(r->r[d_t], old)) {
+ keep_going = true;
+ if (env.verbosity >= VERBOSE_VERY)
+ print_refinement(t, x, d_t, old, r->r[d_t], ctx);
+ }
+ }
+
+ /* now see if we can derive anything new from updated reg_state's ranges */
+ for (s_t = first_t; s_t <= last_t; s_t++) {
+ for (d_t = first_t; d_t <= last_t; d_t++) {
+ old = r->r[d_t];
+ r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]);
+ if (!range_eq(r->r[d_t], old)) {
+ keep_going = true;
+ if (env.verbosity >= VERBOSE_VERY)
+ print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
+ }
+ }
+ }
+
+ /* keep refining until we converge */
+ if (keep_going) {
+ keep_going = false;
+ goto again;
+ }
+}
+
+static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val)
+{
+ enum num_t tt;
+
+ rs->valid = true;
+ for (tt = first_t; tt <= last_t; tt++)
+ rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
+
+ reg_state_refine(rs, t, rs->r[t], "CONST");
+}
+
+static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op,
+ struct reg_state *newx, struct reg_state *newy, const char *ctx)
+{
+ char buf[32];
+ enum num_t ts[2];
+ struct reg_state xx = *x, yy = *y;
+ int i, t_cnt;
+ struct range z1, z2;
+
+ if (op == OP_EQ || op == OP_NE) {
+ /* OP_EQ and OP_NE are sign-agnostic, so we need to process
+ * both signed and unsigned domains at the same time
+ */
+ ts[0] = t_unsigned(t);
+ ts[1] = t_signed(t);
+ t_cnt = 2;
+ } else {
+ ts[0] = t;
+ t_cnt = 1;
+ }
+
+ for (i = 0; i < t_cnt; i++) {
+ t = ts[i];
+ z1 = x->r[t];
+ z2 = y->r[t];
+
+ range_cond(t, z1, z2, op, &z1, &z2);
+
+ if (newx) {
+ snprintf(buf, sizeof(buf), "%s R1", ctx);
+ reg_state_refine(&xx, t, z1, buf);
+ }
+ if (newy) {
+ snprintf(buf, sizeof(buf), "%s R2", ctx);
+ reg_state_refine(&yy, t, z2, buf);
+ }
+ }
+
+ if (newx)
+ *newx = xx;
+ if (newy)
+ *newy = yy;
+}
+
+static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y,
+ enum op op)
+{
+ if (op == OP_EQ || op == OP_NE) {
+ /* OP_EQ and OP_NE are sign-agnostic */
+ enum num_t tu = t_unsigned(t);
+ enum num_t ts = t_signed(t);
+ int br_u, br_s, br;
+
+ br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op);
+ br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op);
+
+ if (br_u >= 0 && br_s >= 0 && br_u != br_s)
+ ASSERT_FALSE(true, "branch taken inconsistency!\n");
+
+ /* if 64-bit ranges are indecisive, use 32-bit subranges to
+ * eliminate always/never taken branches, if possible
+ */
+ if (br_u == -1 && (t == U64 || t == S64)) {
+ br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op);
+ /* we can only reject for OP_EQ, never take branch
+ * based on lower 32 bits
+ */
+ if (op == OP_EQ && br == 0)
+ return 0;
+ /* for OP_NEQ we can be conclusive only if lower 32 bits
+ * differ and thus inequality branch is always taken
+ */
+ if (op == OP_NE && br == 1)
+ return 1;
+
+ br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op);
+ if (op == OP_EQ && br == 0)
+ return 0;
+ if (op == OP_NE && br == 1)
+ return 1;
+ }
+
+ return br_u >= 0 ? br_u : br_s;
+ }
+ return range_branch_taken_op(t, x->r[t], y->r[t], op);
+}
+
+/* =====================================
+ * BPF PROGS GENERATION AND VERIFICATION
+ * =====================================
+ */
+struct case_spec {
+ /* whether to init full register (r1) or sub-register (w1) */
+ bool init_subregs;
+ /* whether to establish initial value range on full register (r1) or
+ * sub-register (w1)
+ */
+ bool setup_subregs;
+ /* whether to establish initial value range using signed or unsigned
+ * comparisons (i.e., initialize umin/umax or smin/smax directly)
+ */
+ bool setup_signed;
+ /* whether to perform comparison on full registers or sub-registers */
+ bool compare_subregs;
+ /* whether to perform comparison using signed or unsigned operations */
+ bool compare_signed;
+};
+
+/* Generate test BPF program based on provided test ranges, operation, and
+ * specifications about register bitness and signedness.
+ */
+static int load_range_cmp_prog(struct range x, struct range y, enum op op,
+ int branch_taken, struct case_spec spec,
+ char *log_buf, size_t log_sz,
+ int *false_pos, int *true_pos)
+{
+#define emit(insn) ({ \
+ struct bpf_insn __insns[] = { insn }; \
+ int __i; \
+ for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
+ insns[cur_pos + __i] = __insns[__i]; \
+ cur_pos += __i; \
+})
+#define JMP_TO(target) (target - cur_pos - 1)
+ int cur_pos = 0, exit_pos, fd, op_code;
+ struct bpf_insn insns[64];
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_level = 2,
+ .log_buf = log_buf,
+ .log_size = log_sz,
+ .prog_flags = BPF_F_TEST_REG_INVARIANTS,
+ );
+
+ /* ; skip exit block below
+ * goto +2;
+ */
+ emit(BPF_JMP_A(2));
+ exit_pos = cur_pos;
+ /* ; exit block for all the preparatory conditionals
+ * out:
+ * r0 = 0;
+ * exit;
+ */
+ emit(BPF_MOV64_IMM(BPF_REG_0, 0));
+ emit(BPF_EXIT_INSN());
+ /*
+ * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value
+ * call bpf_get_current_pid_tgid;
+ * r6 = r0; | w6 = w0;
+ * call bpf_get_current_pid_tgid;
+ * r7 = r0; | w7 = w0;
+ */
+ emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
+ if (spec.init_subregs)
+ emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0));
+ else
+ emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
+ emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
+ if (spec.init_subregs)
+ emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0));
+ else
+ emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ /* ; setup initial r6/w6 possible value range ([x.a, x.b])
+ * r1 = %[x.a] ll; | w1 = %[x.a];
+ * r2 = %[x.b] ll; | w2 = %[x.b];
+ * if r6 < r1 goto out; | if w6 < w1 goto out;
+ * if r6 > r2 goto out; | if w6 > w2 goto out;
+ */
+ if (spec.setup_subregs) {
+ emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
+ emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
+ } else {
+ emit(BPF_LD_IMM64(BPF_REG_1, x.a));
+ emit(BPF_LD_IMM64(BPF_REG_2, x.b));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
+ }
+ /* ; setup initial r7/w7 possible value range ([y.a, y.b])
+ * r1 = %[y.a] ll; | w1 = %[y.a];
+ * r2 = %[y.b] ll; | w2 = %[y.b];
+ * if r7 < r1 goto out; | if w7 < w1 goto out;
+ * if r7 > r2 goto out; | if w7 > w2 goto out;
+ */
+ if (spec.setup_subregs) {
+ emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
+ emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
+ } else {
+ emit(BPF_LD_IMM64(BPF_REG_1, y.a));
+ emit(BPF_LD_IMM64(BPF_REG_2, y.b));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
+ }
+ /* ; range test instruction
+ * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
+ */
+ switch (op) {
+ case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break;
+ case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break;
+ case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break;
+ case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break;
+ case OP_EQ: op_code = BPF_JEQ; break;
+ case OP_NE: op_code = BPF_JNE; break;
+ default:
+ printf("unrecognized op %d\n", op);
+ return -ENOTSUP;
+ }
+ /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * ; this is used for debugging, as verifier doesn't always print
+ * ; registers states as of condition jump instruction (e.g., when
+ * ; precision marking happens)
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ */
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (spec.compare_subregs)
+ emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
+ else
+ emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
+ /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ * exit;
+ */
+ *false_pos = cur_pos;
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (branch_taken == 1) /* false branch is never taken */
+ emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
+ else
+ emit(BPF_EXIT_INSN());
+ /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ * exit;
+ */
+ *true_pos = cur_pos;
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (branch_taken == 0) /* true branch is never taken */
+ emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
+ emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test",
+ "GPL", insns, cur_pos, &opts);
+ if (fd < 0)
+ return fd;
+
+ close(fd);
+ return 0;
+#undef emit
+#undef JMP_TO
+}
+
+#define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0)
+
+/* Parse register state from verifier log.
+ * `s` should point to the start of "Rx = ..." substring in the verifier log.
+ */
+static int parse_reg_state(const char *s, struct reg_state *reg)
+{
+ /* There are two generic forms for SCALAR register:
+ * - known constant: R6_rwD=P%lld
+ * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated
+ * list of optional range specifiers:
+ * - umin=%llu, if missing, assumed 0;
+ * - umax=%llu, if missing, assumed U64_MAX;
+ * - smin=%lld, if missing, assumed S64_MIN;
+ * - smax=%lld, if missing, assummed S64_MAX;
+ * - umin32=%d, if missing, assumed 0;
+ * - umax32=%d, if missing, assumed U32_MAX;
+ * - smin32=%d, if missing, assumed S32_MIN;
+ * - smax32=%d, if missing, assummed S32_MAX;
+ * - var_off=(%#llx; %#llx), tnum part, we don't care about it.
+ *
+ * If some of the values are equal, they will be grouped (but min/max
+ * are not mixed together, and similarly negative values are not
+ * grouped with non-negative ones). E.g.:
+ *
+ * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000)
+ *
+ * _rwD part is optional (and any of the letters can be missing).
+ * P (precision mark) is optional as well.
+ *
+ * Anything inside scalar() is optional, including id, of course.
+ */
+ struct {
+ const char *pfx;
+ u64 *dst, def;
+ bool is_32, is_set;
+ } *f, fields[8] = {
+ {"smin=", &reg->r[S64].a, S64_MIN},
+ {"smax=", &reg->r[S64].b, S64_MAX},
+ {"umin=", &reg->r[U64].a, 0},
+ {"umax=", &reg->r[U64].b, U64_MAX},
+ {"smin32=", &reg->r[S32].a, (u32)S32_MIN, true},
+ {"smax32=", &reg->r[S32].b, (u32)S32_MAX, true},
+ {"umin32=", &reg->r[U32].a, 0, true},
+ {"umax32=", &reg->r[U32].b, U32_MAX, true},
+ };
+ const char *p;
+ int i;
+
+ p = strchr(s, '=');
+ if (!p)
+ return -EINVAL;
+ p++;
+ if (*p == 'P')
+ p++;
+
+ if (!str_has_pfx(p, "scalar(")) {
+ long long sval;
+ enum num_t t;
+
+ if (p[0] == '0' && p[1] == 'x') {
+ if (sscanf(p, "%llx", &sval) != 1)
+ return -EINVAL;
+ } else {
+ if (sscanf(p, "%lld", &sval) != 1)
+ return -EINVAL;
+ }
+
+ reg->valid = true;
+ for (t = first_t; t <= last_t; t++) {
+ reg->r[t] = range(t, sval, sval);
+ }
+ return 0;
+ }
+
+ p += sizeof("scalar");
+ while (p) {
+ int midxs[ARRAY_SIZE(fields)], mcnt = 0;
+ u64 val;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ if (!str_has_pfx(p, f->pfx))
+ continue;
+ midxs[mcnt++] = i;
+ p += strlen(f->pfx);
+ }
+
+ if (mcnt) {
+ /* populate all matched fields */
+ if (p[0] == '0' && p[1] == 'x') {
+ if (sscanf(p, "%llx", &val) != 1)
+ return -EINVAL;
+ } else {
+ if (sscanf(p, "%lld", &val) != 1)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mcnt; i++) {
+ f = &fields[midxs[i]];
+ f->is_set = true;
+ *f->dst = f->is_32 ? (u64)(u32)val : val;
+ }
+ } else if (str_has_pfx(p, "var_off")) {
+ /* skip "var_off=(0x0; 0x3f)" part completely */
+ p = strchr(p, ')');
+ if (!p)
+ return -EINVAL;
+ p++;
+ }
+
+ p = strpbrk(p, ",)");
+ if (*p == ')')
+ break;
+ if (p)
+ p++;
+ }
+
+ reg->valid = true;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ if (!f->is_set)
+ *f->dst = f->def;
+ }
+
+ return 0;
+}
+
+
+/* Parse all register states (TRUE/FALSE branches and DST/SRC registers)
+ * out of the verifier log for a corresponding test case BPF program.
+ */
+static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
+ int false_pos, int true_pos,
+ struct reg_state *false1_reg, struct reg_state *false2_reg,
+ struct reg_state *true1_reg, struct reg_state *true2_reg)
+{
+ struct {
+ int insn_idx;
+ int reg_idx;
+ const char *reg_upper;
+ struct reg_state *state;
+ } specs[] = {
+ {false_pos, 6, "R6=", false1_reg},
+ {false_pos + 1, 7, "R7=", false2_reg},
+ {true_pos, 6, "R6=", true1_reg},
+ {true_pos + 1, 7, "R7=", true2_reg},
+ };
+ char buf[32];
+ const char *p = log_buf, *q;
+ int i, err;
+
+ for (i = 0; i < 4; i++) {
+ sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx,
+ spec.compare_subregs ? "bc" : "bf",
+ spec.compare_subregs ? "w0" : "r0",
+ spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
+
+ q = strstr(p, buf);
+ if (!q) {
+ *specs[i].state = (struct reg_state){.valid = false};
+ continue;
+ }
+ p = strstr(q, specs[i].reg_upper);
+ if (!p)
+ return -EINVAL;
+ err = parse_reg_state(p, specs[i].state);
+ if (err)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Validate ranges match, and print details if they don't */
+static bool assert_range_eq(enum num_t t, struct range x, struct range y,
+ const char *ctx1, const char *ctx2)
+{
+ DEFINE_STRBUF(sb, 512);
+
+ if (range_eq(x, y))
+ return true;
+
+ snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2);
+ snprintf_range(t, sb, x);
+ snappendf(sb, " != ");
+ snprintf_range(t, sb, y);
+
+ printf("%s\n", sb->buf);
+
+ return false;
+}
+
+/* Validate that register states match, and print details if they don't */
+static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
+{
+ bool ok = true;
+ enum num_t t;
+
+ if (r->valid != e->valid) {
+ printf("MISMATCH %s: actual %s != expected %s\n", ctx,
+ r->valid ? "<valid>" : "<invalid>",
+ e->valid ? "<valid>" : "<invalid>");
+ return false;
+ }
+
+ if (!r->valid)
+ return true;
+
+ for (t = first_t; t <= last_t; t++) {
+ if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t)))
+ ok = false;
+ }
+
+ return ok;
+}
+
+/* Printf verifier log, filtering out irrelevant noise */
+static void print_verifier_log(const char *buf)
+{
+ const char *p;
+
+ while (buf[0]) {
+ p = strchrnul(buf, '\n');
+
+ /* filter out irrelevant precision backtracking logs */
+ if (str_has_pfx(buf, "mark_precise: "))
+ goto skip_line;
+
+ printf("%.*s\n", (int)(p - buf), buf);
+
+skip_line:
+ buf = *p == '\0' ? p : p + 1;
+ }
+}
+
+/* Simulate provided test case purely with our own range-based logic.
+ * This is done to set up expectations for verifier's branch_taken logic and
+ * verifier's register states in the verifier log.
+ */
+static void sim_case(enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, enum op op,
+ struct reg_state *fr1, struct reg_state *fr2,
+ struct reg_state *tr1, struct reg_state *tr2,
+ int *branch_taken)
+{
+ const u64 A = x.a;
+ const u64 B = x.b;
+ const u64 C = y.a;
+ const u64 D = y.b;
+ struct reg_state rc;
+ enum op rev_op = complement_op(op);
+ enum num_t t;
+
+ fr1->valid = fr2->valid = true;
+ tr1->valid = tr2->valid = true;
+ for (t = first_t; t <= last_t; t++) {
+ /* if we are initializing using 32-bit subregisters,
+ * full registers get upper 32 bits zeroed automatically
+ */
+ struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
+
+ fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z;
+ }
+
+ /* step 1: r1 >= A, r2 >= C */
+ reg_state_set_const(&rc, init_t, A);
+ reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A");
+ reg_state_set_const(&rc, init_t, C);
+ reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C");
+ *tr1 = *fr1;
+ *tr2 = *fr2;
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
+ printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
+ }
+
+ /* step 2: r1 <= B, r2 <= D */
+ reg_state_set_const(&rc, init_t, B);
+ reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B");
+ reg_state_set_const(&rc, init_t, D);
+ reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D");
+ *tr1 = *fr1;
+ *tr2 = *fr2;
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
+ printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
+ }
+
+ /* step 3: r1 <op> r2 */
+ *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op);
+ fr1->valid = fr2->valid = false;
+ tr1->valid = tr2->valid = false;
+ if (*branch_taken != 1) { /* FALSE is possible */
+ fr1->valid = fr2->valid = true;
+ reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE");
+ }
+ if (*branch_taken != 0) { /* TRUE is possible */
+ tr1->valid = tr2->valid = true;
+ reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE");
+ }
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n");
+ printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n");
+ printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n");
+ printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n");
+ }
+}
+
+/* ===============================
+ * HIGH-LEVEL TEST CASE VALIDATION
+ * ===============================
+ */
+static u32 upper_seeds[] = {
+ 0,
+ 1,
+ U32_MAX,
+ U32_MAX - 1,
+ S32_MAX,
+ (u32)S32_MIN,
+};
+
+static u32 lower_seeds[] = {
+ 0,
+ 1,
+ 2, (u32)-2,
+ 255, (u32)-255,
+ UINT_MAX,
+ UINT_MAX - 1,
+ INT_MAX,
+ (u32)INT_MIN,
+};
+
+struct ctx {
+ int val_cnt, subval_cnt, range_cnt, subrange_cnt;
+ u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
+ s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
+ u32 usubvals[ARRAY_SIZE(lower_seeds)];
+ s32 ssubvals[ARRAY_SIZE(lower_seeds)];
+ struct range *uranges, *sranges;
+ struct range *usubranges, *ssubranges;
+ int max_failure_cnt, cur_failure_cnt;
+ int total_case_cnt, case_cnt;
+ int rand_case_cnt;
+ unsigned rand_seed;
+ __u64 start_ns;
+ char progress_ctx[64];
+};
+
+static void cleanup_ctx(struct ctx *ctx)
+{
+ free(ctx->uranges);
+ free(ctx->sranges);
+ free(ctx->usubranges);
+ free(ctx->ssubranges);
+}
+
+struct subtest_case {
+ enum num_t init_t;
+ enum num_t cond_t;
+ struct range x;
+ struct range y;
+ enum op op;
+};
+
+static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op)
+{
+ snappendf(sb, "(%s)", t_str(t->init_t));
+ snprintf_range(t->init_t, sb, t->x);
+ snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>");
+ snprintf_range(t->init_t, sb, t->y);
+}
+
+/* Generate and validate test case based on specific combination of setup
+ * register ranges (including their expected num_t domain), and conditional
+ * operation to perform (including num_t domain in which it has to be
+ * performed)
+ */
+static int verify_case_op(enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, enum op op)
+{
+ char log_buf[256 * 1024];
+ size_t log_sz = sizeof(log_buf);
+ int err, false_pos = 0, true_pos = 0, branch_taken;
+ struct reg_state fr1, fr2, tr1, tr2;
+ struct reg_state fe1, fe2, te1, te2;
+ bool failed = false;
+ struct case_spec spec = {
+ .init_subregs = (init_t == U32 || init_t == S32),
+ .setup_subregs = (init_t == U32 || init_t == S32),
+ .setup_signed = (init_t == S64 || init_t == S32),
+ .compare_subregs = (cond_t == U32 || cond_t == S32),
+ .compare_signed = (cond_t == S64 || cond_t == S32),
+ };
+
+ log_buf[0] = '\0';
+
+ sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken);
+
+ err = load_range_cmp_prog(x, y, op, branch_taken, spec,
+ log_buf, log_sz, &false_pos, &true_pos);
+ if (err) {
+ ASSERT_OK(err, "load_range_cmp_prog");
+ failed = true;
+ }
+
+ err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos,
+ &fr1, &fr2, &tr1, &tr2);
+ if (err) {
+ ASSERT_OK(err, "parse_range_cmp_log");
+ failed = true;
+ }
+
+ if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") ||
+ !assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
+ !assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
+ !assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
+ failed = true;
+ }
+
+ if (failed || env.verbosity >= VERBOSE_NORMAL) {
+ if (failed || env.verbosity >= VERBOSE_VERY) {
+ printf("VERIFIER LOG:\n========================\n");
+ print_verifier_log(log_buf);
+ printf("=====================\n");
+ }
+ printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n");
+ printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n");
+ printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n");
+ printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n");
+ printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n");
+ printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n");
+ printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n");
+ printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n");
+
+ return failed ? -EINVAL : 0;
+ }
+
+ return 0;
+}
+
+/* Given setup ranges and number types, go over all supported operations,
+ * generating individual subtest for each allowed combination
+ */
+static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, bool is_subtest)
+{
+ DEFINE_STRBUF(sb, 256);
+ int err;
+ struct subtest_case sub = {
+ .init_t = init_t,
+ .cond_t = cond_t,
+ .x = x,
+ .y = y,
+ };
+
+ sb->pos = 0; /* reset position in strbuf */
+ subtest_case_str(sb, &sub, false /* ignore op */);
+ if (is_subtest && !test__start_subtest(sb->buf))
+ return 0;
+
+ for (sub.op = first_op; sub.op <= last_op; sub.op++) {
+ sb->pos = 0; /* reset position in strbuf */
+ subtest_case_str(sb, &sub, true /* print op */);
+
+ if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
+ printf("TEST CASE: %s\n", sb->buf);
+
+ err = verify_case_op(init_t, cond_t, x, y, sub.op);
+ if (err || env.verbosity >= VERBOSE_NORMAL)
+ ASSERT_OK(err, sb->buf);
+ if (err) {
+ ctx->cur_failure_cnt++;
+ if (ctx->cur_failure_cnt > ctx->max_failure_cnt)
+ return err;
+ return 0; /* keep testing other cases */
+ }
+ ctx->case_cnt++;
+ if ((ctx->case_cnt % 10000) == 0) {
+ double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt;
+ u64 elapsed_ns = get_time_ns() - ctx->start_ns;
+ double remain_ns = elapsed_ns / progress * (1 - progress);
+
+ fprintf(env.stderr, "PROGRESS (%s): %d/%d (%.2lf%%), "
+ "elapsed %llu mins (%.2lf hrs), "
+ "ETA %.0lf mins (%.2lf hrs)\n",
+ ctx->progress_ctx,
+ ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress,
+ elapsed_ns / 1000000000 / 60,
+ elapsed_ns / 1000000000.0 / 3600,
+ remain_ns / 1000000000.0 / 60,
+ remain_ns / 1000000000.0 / 3600);
+ }
+ }
+
+ return 0;
+}
+
+static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y)
+{
+ return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */);
+}
+
+/* ================================
+ * GENERATED CASES FROM SEED VALUES
+ * ================================
+ */
+static int u64_cmp(const void *p1, const void *p2)
+{
+ u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int u32_cmp(const void *p1, const void *p2)
+{
+ u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int s64_cmp(const void *p1, const void *p2)
+{
+ s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int s32_cmp(const void *p1, const void *p2)
+{
+ s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+/* Generate valid unique constants from seeds, both signed and unsigned */
+static void gen_vals(struct ctx *ctx)
+{
+ int i, j, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) {
+ for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
+ ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
+ }
+ }
+
+ /* sort and compact uvals (i.e., it's `sort | uniq`) */
+ qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp);
+ for (i = 1, j = 0; i < cnt; i++) {
+ if (ctx->uvals[j] == ctx->uvals[i])
+ continue;
+ j++;
+ ctx->uvals[j] = ctx->uvals[i];
+ }
+ ctx->val_cnt = j + 1;
+
+ /* we have exactly the same number of s64 values, they are just in
+ * a different order than u64s, so just sort them differently
+ */
+ for (i = 0; i < ctx->val_cnt; i++)
+ ctx->svals[i] = ctx->uvals[i];
+ qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
+
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ for (i = 0; i < ctx->val_cnt; i++) {
+ sb1->pos = sb2->pos = 0;
+ snprintf_num(U64, sb1, ctx->uvals[i]);
+ snprintf_num(S64, sb2, ctx->svals[i]);
+ printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
+ }
+ }
+
+ /* 32-bit values are generated separately */
+ cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
+ ctx->usubvals[cnt++] = lower_seeds[i];
+ }
+
+ /* sort and compact usubvals (i.e., it's `sort | uniq`) */
+ qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp);
+ for (i = 1, j = 0; i < cnt; i++) {
+ if (ctx->usubvals[j] == ctx->usubvals[i])
+ continue;
+ j++;
+ ctx->usubvals[j] = ctx->usubvals[i];
+ }
+ ctx->subval_cnt = j + 1;
+
+ for (i = 0; i < ctx->subval_cnt; i++)
+ ctx->ssubvals[i] = ctx->usubvals[i];
+ qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
+
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ sb1->pos = sb2->pos = 0;
+ snprintf_num(U32, sb1, ctx->usubvals[i]);
+ snprintf_num(S32, sb2, ctx->ssubvals[i]);
+ printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
+ }
+ }
+}
+
+/* Generate valid ranges from upper/lower seeds */
+static int gen_ranges(struct ctx *ctx)
+{
+ int i, j, cnt = 0;
+
+ for (i = 0; i < ctx->val_cnt; i++) {
+ for (j = i; j < ctx->val_cnt; j++) {
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ sb1->pos = sb2->pos = 0;
+ snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
+ snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
+ printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf);
+ }
+ cnt++;
+ }
+ }
+ ctx->range_cnt = cnt;
+
+ ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges));
+ if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc"))
+ return -EINVAL;
+ ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges));
+ if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc"))
+ return -EINVAL;
+
+ cnt = 0;
+ for (i = 0; i < ctx->val_cnt; i++) {
+ for (j = i; j < ctx->val_cnt; j++) {
+ ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
+ ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
+ cnt++;
+ }
+ }
+
+ cnt = 0;
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ for (j = i; j < ctx->subval_cnt; j++) {
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ sb1->pos = sb2->pos = 0;
+ snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
+ snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
+ printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf);
+ }
+ cnt++;
+ }
+ }
+ ctx->subrange_cnt = cnt;
+
+ ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges));
+ if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc"))
+ return -EINVAL;
+ ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges));
+ if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc"))
+ return -EINVAL;
+
+ cnt = 0;
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ for (j = i; j < ctx->subval_cnt; j++) {
+ ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
+ ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
+ cnt++;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_env_vars(struct ctx *ctx)
+{
+ const char *s;
+
+ if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
+ errno = 0;
+ ctx->max_failure_cnt = strtol(s, NULL, 10);
+ if (errno || ctx->max_failure_cnt < 0) {
+ ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT");
+ return -EINVAL;
+ }
+ }
+
+ if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
+ errno = 0;
+ ctx->rand_case_cnt = strtol(s, NULL, 10);
+ if (errno || ctx->rand_case_cnt < 0) {
+ ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
+ return -EINVAL;
+ }
+ }
+
+ if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
+ errno = 0;
+ ctx->rand_seed = strtoul(s, NULL, 10);
+ if (errno) {
+ ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int prepare_gen_tests(struct ctx *ctx)
+{
+ const char *s;
+ int err;
+
+ if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
+ test__skip();
+ return -ENOTSUP;
+ }
+
+ err = parse_env_vars(ctx);
+ if (err)
+ return err;
+
+ gen_vals(ctx);
+ err = gen_ranges(ctx);
+ if (err) {
+ ASSERT_OK(err, "gen_ranges");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Go over generated constants and ranges and validate various supported
+ * combinations of them
+ */
+static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ struct range rconst;
+ const struct range *ranges;
+ const u64 *vals;
+ int i, j;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ ranges = init_t == U64 ? ctx.uranges : ctx.sranges;
+ vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals;
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x CONST, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.val_cnt; i++) {
+ for (j = 0; j < ctx.range_cnt; j++) {
+ rconst = range(init_t, vals[i], vals[i]);
+
+ /* (u64|s64)(<range> x <const>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
+ goto cleanup;
+ /* (u64|s64)(<const> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ struct range rconst;
+ const struct range *ranges;
+ const u32 *vals;
+ int i, j;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges;
+ vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals;
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x CONST, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.subval_cnt; i++) {
+ for (j = 0; j < ctx.subrange_cnt; j++) {
+ rconst = range(init_t, vals[i], vals[i]);
+
+ /* (u32|s32)(<range> x <const>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
+ goto cleanup;
+ /* (u32|s32)(<const> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ const struct range *ranges;
+ int i, j, rcnt;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ switch (init_t)
+ {
+ case U64:
+ ranges = ctx.uranges;
+ rcnt = ctx.range_cnt;
+ break;
+ case U32:
+ ranges = ctx.usubranges;
+ rcnt = ctx.subrange_cnt;
+ break;
+ case S64:
+ ranges = ctx.sranges;
+ rcnt = ctx.range_cnt;
+ break;
+ case S32:
+ ranges = ctx.ssubranges;
+ rcnt = ctx.subrange_cnt;
+ break;
+ default:
+ printf("validate_gen_range_vs_range!\n");
+ exit(1);
+ }
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x RANGE, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < rcnt; i++) {
+ for (j = i; j < rcnt; j++) {
+ /* (<range> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j]))
+ goto cleanup;
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+/* Go over thousands of test cases generated from initial seed values.
+ * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If
+ * envvar is not set, this test is skipped during test_progs testing.
+ *
+ * We split this up into smaller subsets based on initialization and
+ * conditiona numeric domains to get an easy parallelization with test_progs'
+ * -j argument.
+ */
+
+/* RANGE x CONST, U64 initial range */
+void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); }
+void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); }
+void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); }
+void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); }
+/* RANGE x CONST, S64 initial range */
+void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); }
+void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); }
+void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); }
+void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); }
+/* RANGE x CONST, U32 initial range */
+void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); }
+void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); }
+void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); }
+void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); }
+/* RANGE x CONST, S32 initial range */
+void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); }
+void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); }
+void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); }
+void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); }
+
+/* RANGE x RANGE, U64 initial range */
+void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); }
+void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); }
+void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); }
+void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); }
+/* RANGE x RANGE, S64 initial range */
+void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); }
+void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); }
+void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); }
+void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); }
+/* RANGE x RANGE, U32 initial range */
+void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); }
+void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); }
+void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); }
+void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); }
+/* RANGE x RANGE, S32 initial range */
+void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); }
+void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); }
+void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
+void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
+
+#define DEFAULT_RAND_CASE_CNT 100
+
+#define RAND_21BIT_MASK ((1 << 22) - 1)
+
+static u64 rand_u64()
+{
+ /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
+ * seems to be 1<<31, so we need to call it thrice to get full u64;
+ * we'll use rougly equal split: 22 + 21 + 21 bits
+ */
+ return ((u64)random() << 42) |
+ (((u64)random() & RAND_21BIT_MASK) << 21) |
+ (random() & RAND_21BIT_MASK);
+}
+
+static u64 rand_const(enum num_t t)
+{
+ return cast_t(t, rand_u64());
+}
+
+static struct range rand_range(enum num_t t)
+{
+ u64 x = rand_const(t), y = rand_const(t);
+
+ return range(t, min_t(t, x, y), max_t(t, x, y));
+}
+
+static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
+{
+ struct ctx ctx;
+ struct range range1, range2;
+ int err, i;
+ u64 t;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ err = parse_env_vars(&ctx);
+ if (err) {
+ ASSERT_OK(err, "parse_env_vars");
+ return;
+ }
+
+ if (ctx.rand_case_cnt == 0)
+ ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
+ if (ctx.rand_seed == 0)
+ ctx.rand_seed = (unsigned)get_time_ns();
+
+ srandom(ctx.rand_seed);
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "[RANDOM SEED %u] RANGE x %s, %s -> %s",
+ ctx.rand_seed, const_range ? "CONST" : "RANGE",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.rand_case_cnt; i++) {
+ range1 = rand_range(init_t);
+ if (const_range) {
+ t = rand_const(init_t);
+ range2 = range(init_t, t, t);
+ } else {
+ range2 = rand_range(init_t);
+ }
+
+ /* <range1> x <range2> */
+ if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */))
+ goto cleanup;
+ /* <range2> x <range1> */
+ if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */))
+ goto cleanup;
+ }
+
+cleanup:
+ /* make sure we report random seed for reproducing */
+ ASSERT_TRUE(true, ctx.progress_ctx);
+ cleanup_ctx(&ctx);
+}
+
+/* [RANDOM] RANGE x CONST, U64 initial range */
+void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S64 initial range */
+void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, U32 initial range */
+void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S32 initial range */
+void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
+
+/* [RANDOM] RANGE x RANGE, U64 initial range */
+void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S64 initial range */
+void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, U32 initial range */
+void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S32 initial range */
+void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
+
+/* A set of hard-coded "interesting" cases to validate as part of normal
+ * test_progs test runs
+ */
+static struct subtest_case crafted_cases[] = {
+ {U64, U64, {0, 0xffffffff}, {0, 0}},
+ {U64, U64, {0, 0x80000000}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}},
+ {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}},
+
+ /* single point overlap, interesting BPF_EQ and BPF_NE interactions */
+ {U64, U64, {0, 1}, {1, 0x80000000}},
+ {U64, S64, {0, 1}, {1, 0x80000000}},
+ {U64, U32, {0, 1}, {1, 0x80000000}},
+ {U64, S32, {0, 1}, {1, 0x80000000}},
+
+ {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0, 0xffffffffULL}, {1, 1}},
+ {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
+
+ {U64, U32, {0, 0x100000000}, {0, 0}},
+ {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}},
+
+ {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
+ /* these are tricky cases where lower 32 bits allow to tighten 64
+ * bit boundaries based on tightened lower 32 bit boundaries
+ */
+ {U64, S32, {0, 0x0ffffffffULL}, {0, 0}},
+ {U64, S32, {0, 0x100000000ULL}, {0, 0}},
+ {U64, S32, {0, 0x100000001ULL}, {0, 0}},
+ {U64, S32, {0, 0x180000000ULL}, {0, 0}},
+ {U64, S32, {0, 0x17fffffffULL}, {0, 0}},
+ {U64, S32, {0, 0x180000001ULL}, {0, 0}},
+
+ /* verifier knows about [-1, 0] range for s32 for this case already */
+ {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
+ /* but didn't know about these cases initially */
+ {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */
+ {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */
+
+ /* longer convergence case: learning from u64 -> s64 -> u64 -> u32,
+ * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX])
+ */
+ {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
+
+ {U32, U32, {1, U32_MAX}, {0, 0}},
+
+ {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
+
+ {S32, U64, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)-255, 0}},
+ {S32, S64, {(u32)(s32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
+ {S32, S64, {0, 1}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
+ {S32, U32, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
+};
+
+/* Go over crafted hard-coded cases. This is fast, so we do it as part of
+ * normal test_progs run.
+ */
+void test_reg_bounds_crafted(void)
+{
+ struct ctx ctx;
+ int i;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) {
+ struct subtest_case *c = &crafted_cases[i];
+
+ verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y);
+ verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x);
+ }
+
+ cleanup_ctx(&ctx);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
index f29c08d93beb..18d451be57c8 100644
--- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -13,22 +13,22 @@ static struct {
const char *err_msg;
} spin_lock_fail_tests[] = {
{ "lock_id_kptr_preserve",
- "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) "
- "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
+ "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2) "
+ "R1_w=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=ptr_ expected=percpu_ptr_" },
{ "lock_id_global_zero",
- "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"
+ "; R1_w=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
- " R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)"
- " R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n"
+ " R0_w=map_value(id=1,map=array_map,ks=4,vs=8)"
+ " R1_w=map_value(id=1,map=array_map,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_innermapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
- " R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)"
- " R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n"
+ " R0=map_value(id=2,ks=4,vs=8)"
+ " R1_w=map_value(id=2,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index 51883ccb8020..196abf223465 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -2387,12 +2387,9 @@ static int generate_dummy_prog(void)
const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
LIBBPF_OPTS(bpf_prog_load_opts, opts);
const size_t log_buf_sz = 256;
- char *log_buf;
+ char log_buf[log_buf_sz];
int fd = -1;
- log_buf = malloc(log_buf_sz);
- if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
- return fd;
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
@@ -2402,7 +2399,6 @@ static int generate_dummy_prog(void)
prog_insns, prog_insn_cnt, &opts);
ASSERT_STREQ(log_buf, "", "log_0");
ASSERT_GE(fd, 0, "prog_fd");
- free(log_buf);
return fd;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
index 72310cfc6474..6fb2217d940b 100644
--- a/tools/testing/selftests/bpf/prog_tests/vmlinux.c
+++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
@@ -16,27 +16,27 @@ static void nsleep()
void test_vmlinux(void)
{
- int duration = 0, err;
+ int err;
struct test_vmlinux* skel;
struct test_vmlinux__bss *bss;
skel = test_vmlinux__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "test_vmlinux__open_and_load"))
return;
bss = skel->bss;
err = test_vmlinux__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "test_vmlinux__attach"))
goto cleanup;
/* trigger everything */
nsleep();
- CHECK(!bss->tp_called, "tp", "not called\n");
- CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
- CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
- CHECK(!bss->kprobe_called, "kprobe", "not called\n");
- CHECK(!bss->fentry_called, "fentry", "not called\n");
+ ASSERT_TRUE(bss->tp_called, "tp");
+ ASSERT_TRUE(bss->raw_tp_called, "raw_tp");
+ ASSERT_TRUE(bss->tp_btf_called, "tp_btf");
+ ASSERT_TRUE(bss->kprobe_called, "kprobe");
+ ASSERT_TRUE(bss->fentry_called, "fentry");
cleanup:
test_vmlinux__destroy(skel);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index f2b8167b72a8..442f4ca39fd7 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
+int num_user_stacks = 0;
+
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
@@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
if (res <= 0)
return 0;
+ /* Only one task, the current one, should succeed */
+ ++num_user_stacks;
+
buf_sz += res;
/* If the verifier doesn't refine bpf_get_task_stack res, and instead
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
index e1e5c54a6a11..49efaed143fc 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -18,48 +18,48 @@
return *(u64 *)num; \
}
-__msg(": R0_w=-2147483648 R10=fp0")
+__msg(": R0_w=0xffffffff80000000 R10=fp0")
check_assert(s64, eq, int_min, INT_MIN);
-__msg(": R0_w=2147483647 R10=fp0")
+__msg(": R0_w=0x7fffffff R10=fp0")
check_assert(s64, eq, int_max, INT_MAX);
__msg(": R0_w=0 R10=fp0")
check_assert(s64, eq, zero, 0);
-__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
+__msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000 R10=fp0")
check_assert(s64, eq, llong_min, LLONG_MIN);
-__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
+__msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff R10=fp0")
check_assert(s64, eq, llong_max, LLONG_MAX);
-__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
+__msg(": R0_w=scalar(smax=0x7ffffffe) R10=fp0")
check_assert(s64, lt, pos, INT_MAX);
-__msg(": R0_w=scalar(smax=-1,umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, zero, 0);
-__msg(": R0_w=scalar(smax=-2147483649,umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smax=0xffffffff7fffffff,umin=0x8000000000000000,umax=0xffffffff7fffffff,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, neg, INT_MIN);
-__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
+__msg(": R0_w=scalar(smax=0x7fffffff) R10=fp0")
check_assert(s64, le, pos, INT_MAX);
__msg(": R0_w=scalar(smax=0) R10=fp0")
check_assert(s64, le, zero, 0);
-__msg(": R0_w=scalar(smax=-2147483648,umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smax=0xffffffff80000000,umin=0x8000000000000000,umax=0xffffffff80000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, le, neg, INT_MIN);
-__msg(": R0_w=scalar(smin=umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, pos, INT_MAX);
-__msg(": R0_w=scalar(smin=umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, zero, 0);
-__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
+__msg(": R0_w=scalar(smin=0xffffffff80000001) R10=fp0")
check_assert(s64, gt, neg, INT_MIN);
-__msg(": R0_w=scalar(smin=umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0_w=scalar(smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, pos, INT_MAX);
-__msg(": R0_w=scalar(smin=0,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
+__msg(": R0_w=scalar(smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
check_assert(s64, ge, zero, 0);
-__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
+__msg(": R0_w=scalar(smin=0xffffffff80000000) R10=fp0")
check_assert(s64, ge, neg, INT_MIN);
SEC("?tc")
__log_level(2) __failure
-__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=smin32=-2147483646,smax=smax32=2147483645) R10=fp0")
+__msg(": R0=0 R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
int check_assert_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@@ -75,7 +75,7 @@ int check_assert_range_s64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R1=ctx(off=0,imm=0) R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
+__msg(": R1=ctx() R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
int check_assert_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
@@ -86,7 +86,7 @@ int check_assert_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+__msg(": R0=0 R1=ctx() R2=4096 R10=fp0")
int check_assert_single_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@@ -103,7 +103,7 @@ int check_assert_single_range_s64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+__msg(": R1=ctx() R2=4096 R10=fp0")
int check_assert_single_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
@@ -114,7 +114,7 @@ int check_assert_single_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
+__msg(": R1=pkt(off=64,r=64) R2=pkt_end() R6=pkt(r=64) R10=fp0")
int check_assert_generic(struct __sk_buff *ctx)
{
u8 *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index c20c4e38b71c..b2181f850d3e 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -1411,4 +1411,26 @@ __naked int checkpoint_states_deletion(void)
);
}
+struct {
+ int data[32];
+ int n;
+} loop_data;
+
+SEC("raw_tp")
+__success
+int iter_arr_with_actual_elem_count(const void *ctx)
+{
+ int i, n = loop_data.n, sum = 0;
+
+ if (n > ARRAY_SIZE(loop_data.data))
+ return 0;
+
+ bpf_for(i, 0, n) {
+ /* no rechecking of i against ARRAY_SIZE(loop_data.n) */
+ sum += loop_data.data[i];
+ }
+
+ return sum;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index b567a666d2b8..1769fdff6aea 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -14,6 +14,24 @@ struct node_data {
struct bpf_rb_node node;
};
+struct refcounted_node {
+ long data;
+ struct bpf_rb_node rb_node;
+ struct bpf_refcount refcount;
+};
+
+struct stash {
+ struct bpf_spin_lock l;
+ struct refcounted_node __kptr *stashed;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct stash);
+ __uint(max_entries, 10);
+} refcounted_node_stash SEC(".maps");
+
struct plain_local {
long key;
long data;
@@ -38,6 +56,7 @@ struct map_value {
* Had to do the same w/ bpf_kfunc_call_test_release below
*/
struct node_data *just_here_because_btf_bug;
+struct refcounted_node *just_here_because_btf_bug2;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -132,4 +151,56 @@ long stash_test_ref_kfunc(void *ctx)
return 0;
}
+SEC("tc")
+long refcount_acquire_without_unstash(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int ret = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &ret);
+ if (!s)
+ return 1;
+
+ if (!s->stashed)
+ /* refcount_acquire failure is expected when no refcounted_node
+ * has been stashed before this program executes
+ */
+ return 2;
+
+ p = bpf_refcount_acquire(s->stashed);
+ if (!p)
+ return 3;
+
+ ret = s->stashed ? s->stashed->data : -1;
+ bpf_obj_drop(p);
+ return ret;
+}
+
+/* Helper for refcount_acquire_without_unstash test */
+SEC("tc")
+long stash_refcounted_node(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int key = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &key);
+ if (!s)
+ return 1;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 2;
+ p->data = 42;
+
+ p = bpf_kptr_xchg(&s->stashed, p);
+ if (p) {
+ bpf_obj_drop(p);
+ return 3;
+ }
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
index c39f559d3100..42c4a8b62e36 100644
--- a/tools/testing/selftests/bpf/progs/pyperf180.c
+++ b/tools/testing/selftests/bpf/progs/pyperf180.c
@@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
+
+/* llvm upstream commit at clang18
+ * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
+ * changed inlining behavior and caused compilation failure as some branch
+ * target distance exceeded 16bit representation which is the maximum for
+ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
+ * to specify which cpu version is used for compilation. So a smaller
+ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
+ * reduced some branch target distances and resolved the compilation failure.
+ *
+ * To capture the case where a developer/ci uses clang18 but the corresponding
+ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
+ * will be set as well to prevent potential compilation failures.
+ */
+#ifdef __BPF_CPU_VERSION__
+#if __BPF_CPU_VERSION__ < 4
+#define UNROLL_COUNT 90
+#endif
+#elif __clang_major__ == 18
+#define UNROLL_COUNT 90
+#endif
+
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index 1ef07f6ee580..1553b9c16aa7 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -54,6 +54,25 @@ long rbtree_refcounted_node_ref_escapes(void *ctx)
}
SEC("?tc")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+long refcount_acquire_maybe_null(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ /* Intentionally not testing !n
+ * it's MAYBE_NULL for refcount_acquire
+ */
+ m = bpf_refcount_acquire(n);
+ if (m)
+ bpf_obj_drop(m);
+ if (n)
+ bpf_obj_drop(n);
+
+ return 0;
+}
+
+SEC("?tc")
__failure __msg("Unreleased reference id=3 alloc_insn=9")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
new file mode 100644
index 000000000000..44628865fe1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+__u32 target_ancestor_level;
+__u64 target_ancestor_cgid;
+int target_pid, target_hid;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static int bpf_link_create_verify(int cmd)
+{
+ struct cgroup *cgrp, *ancestor;
+ struct task_struct *task;
+ int ret = 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+
+ /* Then it can run in parallel with others */
+ if (task->pid != target_pid)
+ return 0;
+
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ /* Refuse it if its cgid or its ancestor's cgid is the target cgid */
+ if (cgrp->kn->id == target_ancestor_cgid)
+ ret = -1;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, target_ancestor_level);
+ if (!ancestor)
+ goto out;
+
+ if (ancestor->kn->id == target_ancestor_cgid)
+ ret = -1;
+ bpf_cgroup_release(ancestor);
+
+out:
+ bpf_cgroup_release(cgrp);
+ return ret;
+}
+
+SEC("lsm/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("fentry")
+int BPF_PROG(fentry_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index c5588a14fe2e..ec430b71730b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -965,6 +965,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
__success __retval(0)
+__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
__naked void crossing_64_bit_signed_boundary_2(void)
{
asm volatile (" \
@@ -1046,6 +1047,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
__success __retval(0)
+__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
__naked void crossing_32_bit_signed_boundary_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 37ffa57f28a1..a350ecdfba4a 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -153,6 +153,14 @@ static int parse_retval(const char *str, int *val, const char *name)
return parse_int(str, val, name);
}
+static void update_flags(int *flags, int flag, bool clear)
+{
+ if (clear)
+ *flags &= ~flag;
+ else
+ *flags |= flag;
+}
+
/* Uses btf_decl_tag attributes to describe the expected test
* behavior, see bpf_misc.h for detailed description of each attribute
* and attribute combinations.
@@ -171,6 +179,7 @@ static int parse_test_spec(struct test_loader *tester,
memset(spec, 0, sizeof(*spec));
spec->prog_name = bpf_program__name(prog);
+ spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
btf = bpf_object__btf(obj);
if (!btf) {
@@ -187,7 +196,8 @@ static int parse_test_spec(struct test_loader *tester,
for (i = 1; i < btf__type_cnt(btf); i++) {
const char *s, *val, *msg;
const struct btf_type *t;
- int tmp;
+ bool clear;
+ int flags;
t = btf__type_by_id(btf, i);
if (!btf_is_decl_tag(t))
@@ -253,23 +263,30 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
+
+ clear = val[0] == '!';
+ if (clear)
+ val++;
+
if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
- spec->prog_flags |= BPF_F_STRICT_ALIGNMENT;
+ update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
- spec->prog_flags |= BPF_F_ANY_ALIGNMENT;
+ update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
- spec->prog_flags |= BPF_F_TEST_RND_HI32;
+ update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
- spec->prog_flags |= BPF_F_TEST_STATE_FREQ;
+ update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
- spec->prog_flags |= BPF_F_SLEEPABLE;
+ update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
- spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
+ update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
+ } else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
+ update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
} else /* assume numeric value */ {
- err = parse_int(val, &tmp, "test prog flags");
+ err = parse_int(val, &flags, "test prog flags");
if (err)
goto cleanup;
- spec->prog_flags |= tmp;
+ update_flags(&spec->prog_flags, flags, clear);
}
}
}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 7fc00e423e4d..767e0693df10 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1396,13 +1396,18 @@ static void test_map_stress(void)
#define MAX_DELAY_US 50000
#define MIN_DELAY_RANGE_US 5000
-static int map_update_retriable(int map_fd, const void *key, const void *value,
- int flags, int attempts)
+static bool retry_for_again_or_busy(int err)
+{
+ return (err == EAGAIN || err == EBUSY);
+}
+
+int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts,
+ retry_for_error_fn need_retry)
{
int delay = rand() % MIN_DELAY_RANGE_US;
while (bpf_map_update_elem(map_fd, key, value, flags)) {
- if (!attempts || (errno != EAGAIN && errno != EBUSY))
+ if (!attempts || !need_retry(errno))
return -errno;
if (delay <= MAX_DELAY_US / 2)
@@ -1445,11 +1450,13 @@ static void test_update_delete(unsigned int fn, void *data)
key = value = i;
if (do_update) {
- err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
+ err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES,
+ retry_for_again_or_busy);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
- err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
+ err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES,
+ retry_for_again_or_busy);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
diff --git a/tools/testing/selftests/bpf/test_maps.h b/tools/testing/selftests/bpf/test_maps.h
index f6fbca761732..e4ac704a536c 100644
--- a/tools/testing/selftests/bpf/test_maps.h
+++ b/tools/testing/selftests/bpf/test_maps.h
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
@@ -16,4 +17,8 @@
extern int skips;
+typedef bool (*retry_for_error_fn)(int err);
+int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts,
+ retry_for_error_fn need_retry);
+
#endif
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 2c89674fc62c..b0068a9d2cfe 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -679,7 +679,7 @@ static int load_path(const struct sock_addr_test *test, const char *path)
bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR);
bpf_program__set_expected_attach_type(prog, test->expected_attach_type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
+ bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
err = bpf_object__load(obj);
if (err) {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 98107e0452d3..f36e41435be7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1588,7 +1588,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (fixup_skips != skips)
return;
- pflags = BPF_F_TEST_RND_HI32;
+ pflags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 8d994884c7b4..d2458c1b1671 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -276,7 +276,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type)
bpf_program__set_type(prog, type);
- flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32;
+ flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
bpf_program__set_flags(prog, flags);
err = bpf_object__load(obj);
@@ -299,7 +299,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.kern_version = kern_version,
- .prog_flags = BPF_F_TEST_RND_HI32,
+ .prog_flags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS,
.log_level = extra_prog_load_log_flags,
.log_buf = log_buf,
.log_size = log_buf_sz,
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index 655095810d4a..1d418d66e375 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -18,6 +18,7 @@
#include <libelf.h>
#include <gelf.h>
#include <float.h>
+#include <math.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -99,6 +100,7 @@ struct stat_specs {
enum stat_id ids[ALL_STATS_CNT];
enum stat_variant variants[ALL_STATS_CNT];
bool asc[ALL_STATS_CNT];
+ bool abs[ALL_STATS_CNT];
int lens[ALL_STATS_CNT * 3]; /* 3x for comparison mode */
};
@@ -133,6 +135,7 @@ struct filter {
int stat_id;
enum stat_variant stat_var;
long value;
+ bool abs;
};
static struct env {
@@ -142,10 +145,12 @@ static struct env {
bool debug;
bool quiet;
bool force_checkpoints;
+ bool force_reg_invariants;
enum resfmt out_fmt;
bool show_version;
bool comparison_mode;
bool replay_mode;
+ int top_n;
int log_level;
int log_size;
@@ -210,8 +215,7 @@ static const struct argp_option opts[] = {
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
- { "test-states", 't', NULL, 0,
- "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
+ { "top-n", 'n', "N", 0, "Emit only up to first N results." },
{ "quiet", 'q', NULL, 0, "Quiet mode" },
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
{ "sort", 's', "SPEC", 0, "Specify sort order" },
@@ -219,6 +223,10 @@ static const struct argp_option opts[] = {
{ "compare", 'C', NULL, 0, "Comparison mode" },
{ "replay", 'R', NULL, 0, "Replay mode" },
{ "filter", 'f', "FILTER", 0, "Filter expressions (or @filename for file with expressions)." },
+ { "test-states", 't', NULL, 0,
+ "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
+ { "test-reg-invariants", 'r', NULL, 0,
+ "Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" },
{},
};
@@ -290,6 +298,16 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 't':
env.force_checkpoints = true;
break;
+ case 'r':
+ env.force_reg_invariants = true;
+ break;
+ case 'n':
+ errno = 0;
+ env.top_n = strtol(arg, NULL, 10);
+ if (errno) {
+ fprintf(stderr, "invalid top N specifier: %s\n", arg);
+ argp_usage(state);
+ }
case 'C':
env.comparison_mode = true;
break;
@@ -455,7 +473,8 @@ static struct {
{ OP_EQ, "=" },
};
-static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var);
+static bool parse_stat_id_var(const char *name, size_t len, int *id,
+ enum stat_variant *var, bool *is_abs);
static int append_filter(struct filter **filters, int *cnt, const char *str)
{
@@ -488,13 +507,14 @@ static int append_filter(struct filter **filters, int *cnt, const char *str)
long val;
const char *end = str;
const char *op_str;
+ bool is_abs;
op_str = operators[i].op_str;
p = strstr(str, op_str);
if (!p)
continue;
- if (!parse_stat_id_var(str, p - str, &id, &var)) {
+ if (!parse_stat_id_var(str, p - str, &id, &var, &is_abs)) {
fprintf(stderr, "Unrecognized stat name in '%s'!\n", str);
return -EINVAL;
}
@@ -533,6 +553,7 @@ static int append_filter(struct filter **filters, int *cnt, const char *str)
f->stat_id = id;
f->stat_var = var;
f->op = operators[i].op_kind;
+ f->abs = true;
f->value = val;
*cnt += 1;
@@ -657,7 +678,8 @@ static struct stat_def {
[MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, },
};
-static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var)
+static bool parse_stat_id_var(const char *name, size_t len, int *id,
+ enum stat_variant *var, bool *is_abs)
{
static const char *var_sfxs[] = {
[VARIANT_A] = "_a",
@@ -667,6 +689,14 @@ static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_v
};
int i, j, k;
+ /* |<stat>| means we take absolute value of given stat */
+ *is_abs = false;
+ if (len > 2 && name[0] == '|' && name[len - 1] == '|') {
+ *is_abs = true;
+ name += 1;
+ len -= 2;
+ }
+
for (i = 0; i < ARRAY_SIZE(stat_defs); i++) {
struct stat_def *def = &stat_defs[i];
size_t alias_len, sfx_len;
@@ -722,7 +752,7 @@ static bool is_desc_sym(char c)
static int parse_stat(const char *stat_name, struct stat_specs *specs)
{
int id;
- bool has_order = false, is_asc = false;
+ bool has_order = false, is_asc = false, is_abs = false;
size_t len = strlen(stat_name);
enum stat_variant var;
@@ -737,7 +767,7 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
len -= 1;
}
- if (!parse_stat_id_var(stat_name, len, &id, &var)) {
+ if (!parse_stat_id_var(stat_name, len, &id, &var, &is_abs)) {
fprintf(stderr, "Unrecognized stat name '%s'\n", stat_name);
return -ESRCH;
}
@@ -745,6 +775,7 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
specs->ids[specs->spec_cnt] = id;
specs->variants[specs->spec_cnt] = var;
specs->asc[specs->spec_cnt] = has_order ? is_asc : stat_defs[id].asc_by_default;
+ specs->abs[specs->spec_cnt] = is_abs;
specs->spec_cnt++;
return 0;
@@ -997,6 +1028,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
if (env.force_checkpoints)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
+ if (env.force_reg_invariants)
+ bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS);
err = bpf_object__load(obj);
env.progs_processed++;
@@ -1103,7 +1136,7 @@ cleanup:
}
static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
- enum stat_id id, bool asc)
+ enum stat_id id, bool asc, bool abs)
{
int cmp = 0;
@@ -1124,6 +1157,11 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
long v1 = s1->stats[id];
long v2 = s2->stats[id];
+ if (abs) {
+ v1 = v1 < 0 ? -v1 : v1;
+ v2 = v2 < 0 ? -v2 : v2;
+ }
+
if (v1 != v2)
cmp = v1 < v2 ? -1 : 1;
break;
@@ -1142,7 +1180,8 @@ static int cmp_prog_stats(const void *v1, const void *v2)
int i, cmp;
for (i = 0; i < env.sort_spec.spec_cnt; i++) {
- cmp = cmp_stat(s1, s2, env.sort_spec.ids[i], env.sort_spec.asc[i]);
+ cmp = cmp_stat(s1, s2, env.sort_spec.ids[i],
+ env.sort_spec.asc[i], env.sort_spec.abs[i]);
if (cmp != 0)
return cmp;
}
@@ -1211,7 +1250,8 @@ static void fetch_join_stat_value(const struct verif_stats_join *s,
static int cmp_join_stat(const struct verif_stats_join *s1,
const struct verif_stats_join *s2,
- enum stat_id id, enum stat_variant var, bool asc)
+ enum stat_id id, enum stat_variant var,
+ bool asc, bool abs)
{
const char *str1 = NULL, *str2 = NULL;
double v1, v2;
@@ -1220,6 +1260,11 @@ static int cmp_join_stat(const struct verif_stats_join *s1,
fetch_join_stat_value(s1, id, var, &str1, &v1);
fetch_join_stat_value(s2, id, var, &str2, &v2);
+ if (abs) {
+ v1 = fabs(v1);
+ v2 = fabs(v2);
+ }
+
if (str1)
cmp = strcmp(str1, str2);
else if (v1 != v2)
@@ -1237,7 +1282,8 @@ static int cmp_join_stats(const void *v1, const void *v2)
cmp = cmp_join_stat(s1, s2,
env.sort_spec.ids[i],
env.sort_spec.variants[i],
- env.sort_spec.asc[i]);
+ env.sort_spec.asc[i],
+ env.sort_spec.abs[i]);
if (cmp != 0)
return cmp;
}
@@ -1720,6 +1766,9 @@ static bool is_join_stat_filter_matched(struct filter *f, const struct verif_sta
fetch_join_stat_value(stats, f->stat_id, f->stat_var, &str, &value);
+ if (f->abs)
+ value = fabs(value);
+
switch (f->op) {
case OP_EQ: return value > f->value - eps && value < f->value + eps;
case OP_NEQ: return value < f->value - eps || value > f->value + eps;
@@ -1766,7 +1815,7 @@ static int handle_comparison_mode(void)
struct stat_specs base_specs = {}, comp_specs = {};
struct stat_specs tmp_sort_spec;
enum resfmt cur_fmt;
- int err, i, j, last_idx;
+ int err, i, j, last_idx, cnt;
if (env.filename_cnt != 2) {
fprintf(stderr, "Comparison mode expects exactly two input CSV files!\n\n");
@@ -1879,7 +1928,7 @@ static int handle_comparison_mode(void)
env.join_stat_cnt += 1;
}
- /* now sort joined results accorsing to sort spec */
+ /* now sort joined results according to sort spec */
qsort(env.join_stats, env.join_stat_cnt, sizeof(*env.join_stats), cmp_join_stats);
/* for human-readable table output we need to do extra pass to
@@ -1896,16 +1945,22 @@ one_more_time:
output_comp_headers(cur_fmt);
last_idx = -1;
+ cnt = 0;
for (i = 0; i < env.join_stat_cnt; i++) {
const struct verif_stats_join *join = &env.join_stats[i];
if (!should_output_join_stats(join))
continue;
+ if (env.top_n && cnt >= env.top_n)
+ break;
+
if (cur_fmt == RESFMT_TABLE_CALCLEN)
last_idx = i;
output_comp_stats(join, cur_fmt, i == last_idx);
+
+ cnt++;
}
if (cur_fmt == RESFMT_TABLE_CALCLEN) {
@@ -1920,6 +1975,9 @@ static bool is_stat_filter_matched(struct filter *f, const struct verif_stats *s
{
long value = stats->stats[f->stat_id];
+ if (f->abs)
+ value = value < 0 ? -value : value;
+
switch (f->op) {
case OP_EQ: return value == f->value;
case OP_NEQ: return value != f->value;
@@ -1964,7 +2022,7 @@ static bool should_output_stats(const struct verif_stats *stats)
static void output_prog_stats(void)
{
const struct verif_stats *stats;
- int i, last_stat_idx = 0;
+ int i, last_stat_idx = 0, cnt = 0;
if (env.out_fmt == RESFMT_TABLE) {
/* calculate column widths */
@@ -1984,7 +2042,10 @@ static void output_prog_stats(void)
stats = &env.prog_stats[i];
if (!should_output_stats(stats))
continue;
+ if (env.top_n && cnt >= env.top_n)
+ break;
output_stats(stats, env.out_fmt, i == last_stat_idx);
+ cnt++;
}
}
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 685034528018..65d14f3bbe30 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -36,7 +36,9 @@ DEFAULT_COMMAND="./test_progs"
MOUNT_DIR="mnt"
ROOTFS_IMAGE="root.img"
OUTPUT_DIR="$HOME/.bpf_selftests"
-KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.${ARCH}")
+KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config"
+ "tools/testing/selftests/bpf/config.vm"
+ "tools/testing/selftests/bpf/config.${ARCH}")
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/pci_reset.sh b/tools/testing/selftests/drivers/net/mlxsw/pci_reset.sh
new file mode 100755
index 000000000000..fe0343b95e6c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/pci_reset.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that PCI reset works correctly by verifying that only the expected reset
+# methods are supported and that after issuing the reset the ifindex of the
+# port changes.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ pci_reset_test
+"
+NUM_NETIFS=1
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+pci_reset_test()
+{
+ RET=0
+
+ local bus=$(echo $DEVLINK_DEV | cut -d '/' -f 1)
+ local bdf=$(echo $DEVLINK_DEV | cut -d '/' -f 2)
+
+ if [ $bus != "pci" ]; then
+ check_err 1 "devlink device is not a PCI device"
+ log_test "pci reset"
+ return
+ fi
+
+ if [ ! -f /sys/bus/pci/devices/$bdf/reset_method ]; then
+ check_err 1 "reset is not supported"
+ log_test "pci reset"
+ return
+ fi
+
+ [[ $(cat /sys/bus/pci/devices/$bdf/reset_method) == "bus" ]]
+ check_err $? "only \"bus\" reset method should be supported"
+
+ local ifindex_pre=$(ip -j link show dev $swp1 | jq '.[]["ifindex"]')
+
+ echo 1 > /sys/bus/pci/devices/$bdf/reset
+ check_err $? "reset failed"
+
+ # Wait for udev to rename newly created netdev.
+ udevadm settle
+
+ local ifindex_post=$(ip -j link show dev $swp1 | jq '.[]["ifindex"]')
+
+ [[ $ifindex_pre != $ifindex_post ]]
+ check_err $? "reset not performed"
+
+ log_test "pci reset"
+}
+
+swp1=${NETIFS[p1]}
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 5b2aca4c5f10..9274edfb76ff 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -91,6 +91,7 @@ TEST_PROGS += test_bridge_neigh_suppress.sh
TEST_PROGS += test_vxlan_nolocalbypass.sh
TEST_PROGS += test_bridge_backup_port.sh
TEST_PROGS += fdb_flush.sh
+TEST_PROGS += fq_band_pktlimit.sh
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
index 6ff3e732f449..c79e65581dc3 100644
--- a/tools/testing/selftests/net/cmsg_sender.c
+++ b/tools/testing/selftests/net/cmsg_sender.c
@@ -45,11 +45,13 @@ struct options {
const char *host;
const char *service;
unsigned int size;
+ unsigned int num_pkt;
struct {
unsigned int mark;
unsigned int dontfrag;
unsigned int tclass;
unsigned int hlimit;
+ unsigned int priority;
} sockopt;
struct {
unsigned int family;
@@ -72,6 +74,7 @@ struct options {
} v6;
} opt = {
.size = 13,
+ .num_pkt = 1,
.sock = {
.family = AF_UNSPEC,
.type = SOCK_DGRAM,
@@ -112,7 +115,7 @@ static void cs_parse_args(int argc, char *argv[])
{
int o;
- while ((o = getopt(argc, argv, "46sS:p:m:M:d:tf:F:c:C:l:L:H:")) != -1) {
+ while ((o = getopt(argc, argv, "46sS:p:P:m:M:n:d:tf:F:c:C:l:L:H:")) != -1) {
switch (o) {
case 's':
opt.silent_send = true;
@@ -138,7 +141,9 @@ static void cs_parse_args(int argc, char *argv[])
cs_usage(argv[0]);
}
break;
-
+ case 'P':
+ opt.sockopt.priority = atoi(optarg);
+ break;
case 'm':
opt.mark.ena = true;
opt.mark.val = atoi(optarg);
@@ -146,6 +151,9 @@ static void cs_parse_args(int argc, char *argv[])
case 'M':
opt.sockopt.mark = atoi(optarg);
break;
+ case 'n':
+ opt.num_pkt = atoi(optarg);
+ break;
case 'd':
opt.txtime.ena = true;
opt.txtime.delay = atoi(optarg);
@@ -410,6 +418,10 @@ static void ca_set_sockopts(int fd)
setsockopt(fd, SOL_IPV6, IPV6_UNICAST_HOPS,
&opt.sockopt.hlimit, sizeof(opt.sockopt.hlimit)))
error(ERN_SOCKOPT, errno, "setsockopt IPV6_HOPLIMIT");
+ if (opt.sockopt.priority &&
+ setsockopt(fd, SOL_SOCKET, SO_PRIORITY,
+ &opt.sockopt.priority, sizeof(opt.sockopt.priority)))
+ error(ERN_SOCKOPT, errno, "setsockopt SO_PRIORITY");
}
int main(int argc, char *argv[])
@@ -421,6 +433,7 @@ int main(int argc, char *argv[])
char cbuf[1024];
int err;
int fd;
+ int i;
cs_parse_args(argc, argv);
@@ -480,24 +493,27 @@ int main(int argc, char *argv[])
cs_write_cmsg(fd, &msg, cbuf, sizeof(cbuf));
- err = sendmsg(fd, &msg, 0);
- if (err < 0) {
- if (!opt.silent_send)
- fprintf(stderr, "send failed: %s\n", strerror(errno));
- err = ERN_SEND;
- goto err_out;
- } else if (err != (int)opt.size) {
- fprintf(stderr, "short send\n");
- err = ERN_SEND_SHORT;
- goto err_out;
- } else {
- err = ERN_SUCCESS;
+ for (i = 0; i < opt.num_pkt; i++) {
+ err = sendmsg(fd, &msg, 0);
+ if (err < 0) {
+ if (!opt.silent_send)
+ fprintf(stderr, "send failed: %s\n", strerror(errno));
+ err = ERN_SEND;
+ goto err_out;
+ } else if (err != (int)opt.size) {
+ fprintf(stderr, "short send\n");
+ err = ERN_SEND_SHORT;
+ goto err_out;
+ }
}
+ err = ERN_SUCCESS;
- /* Make sure all timestamps have time to loop back */
- usleep(opt.txtime.delay);
+ if (opt.ts.ena) {
+ /* Make sure all timestamps have time to loop back */
+ usleep(opt.txtime.delay);
- cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
+ cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
+ }
err_out:
close(fd);
diff --git a/tools/testing/selftests/net/fq_band_pktlimit.sh b/tools/testing/selftests/net/fq_band_pktlimit.sh
new file mode 100755
index 000000000000..24b77bdf41ff
--- /dev/null
+++ b/tools/testing/selftests/net/fq_band_pktlimit.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Verify that FQ has a packet limit per band:
+#
+# 1. set the limit to 10 per band
+# 2. send 20 pkts on band A: verify that 10 are queued, 10 dropped
+# 3. send 20 pkts on band A: verify that 0 are queued, 20 dropped
+# 4. send 20 pkts on band B: verify that 10 are queued, 10 dropped
+#
+# Send packets with a 100ms delay to ensure that previously sent
+# packets are still queued when later ones are sent.
+# Use SO_TXTIME for this.
+
+die() {
+ echo "$1"
+ exit 1
+}
+
+# run inside private netns
+if [[ $# -eq 0 ]]; then
+ ./in_netns.sh "$0" __subprocess
+ exit
+fi
+
+ip link add type dummy
+ip link set dev dummy0 up
+ip -6 addr add fdaa::1/128 dev dummy0
+ip -6 route add fdaa::/64 dev dummy0
+tc qdisc replace dev dummy0 root handle 1: fq quantum 1514 initial_quantum 1514 limit 10
+
+./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+OUT1="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
+
+./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+OUT2="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
+
+./cmsg_sender -6 -p u -d 100000 -n 20 -P 7 fdaa::2 8000
+OUT3="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
+
+# Initial stats will report zero sent, as all packets are still
+# queued in FQ. Sleep for the delay period (100ms) and see that
+# twenty are now sent.
+sleep 0.1
+OUT4="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
+
+# Log the output after the test
+echo "${OUT1}"
+echo "${OUT2}"
+echo "${OUT3}"
+echo "${OUT4}"
+
+# Test the output for expected values
+echo "${OUT1}" | grep -q '0\ pkt\ (dropped\ 10' || die "unexpected drop count at 1"
+echo "${OUT2}" | grep -q '0\ pkt\ (dropped\ 30' || die "unexpected drop count at 2"
+echo "${OUT3}" | grep -q '0\ pkt\ (dropped\ 40' || die "unexpected drop count at 3"
+echo "${OUT4}" | grep -q '20\ pkt\ (dropped\ 40' || die "unexpected accept count at 4"
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 85a8ee9395b3..95b498efacd1 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -182,23 +182,6 @@ chk_msk_inuse()
__chk_nr get_msk_inuse $expected "$msg" 0
}
-# $1: ns, $2: port
-wait_local_port_listen()
-{
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex i
-
- port_hex="$(printf "%04X" "${port}")"
- for i in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
-}
-
wait_connected()
{
local listener_ns="${1}"
@@ -222,7 +205,7 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10000 -l -t ${timeout_poll} -w 20 \
0.0.0.0 >/dev/null &
-wait_local_port_listen $ns 10000
+mptcp_lib_wait_local_port_listen $ns 10000
chk_msk_nr 0 "no msk on netns creation"
chk_msk_listen 10000
@@ -245,7 +228,7 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} -w 20 \
0.0.0.0 >/dev/null &
-wait_local_port_listen $ns 10001
+mptcp_lib_wait_local_port_listen $ns 10001
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
@@ -266,7 +249,7 @@ for I in `seq 1 $NR_CLIENTS`; do
./mptcp_connect -p $((I+10001)) -l -w 20 \
-t ${timeout_poll} 0.0.0.0 >/dev/null &
done
-wait_local_port_listen $ns $((NR_CLIENTS + 10001))
+mptcp_lib_wait_local_port_listen $ns $((NR_CLIENTS + 10001))
for I in `seq 1 $NR_CLIENTS`; do
echo "b" | \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index b1fc8afd072d..7898d62fce0b 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -254,31 +254,6 @@ else
set_ethtool_flags "$ns4" ns4eth3 "$ethtool_args"
fi
-print_file_err()
-{
- ls -l "$1" 1>&2
- echo "Trailing bytes are: "
- tail -c 27 "$1"
-}
-
-check_transfer()
-{
- local in=$1
- local out=$2
- local what=$3
-
- cmp "$in" "$out" > /dev/null 2>&1
- if [ $? -ne 0 ] ;then
- echo "[ FAIL ] $what does not match (in, out):"
- print_file_err "$in"
- print_file_err "$out"
-
- return 1
- fi
-
- return 0
-}
-
check_mptcp_disabled()
{
local disabled_ns="ns_disabled-$rndh"
@@ -310,12 +285,6 @@ check_mptcp_disabled()
return 0
}
-# $1: IP address
-is_v6()
-{
- [ -z "${1##*:*}" ]
-}
-
do_ping()
{
local listener_ns="$1"
@@ -324,7 +293,7 @@ do_ping()
local ping_args="-q -c 1"
local rc=0
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
$ipv6 || return 0
ping_args="${ping_args} -6"
fi
@@ -341,38 +310,6 @@ do_ping()
return 0
}
-# $1: ns, $2: MIB counter
-get_mib_counter()
-{
- local listener_ns="${1}"
- local mib="${2}"
-
- # strip the header
- ip netns exec "${listener_ns}" \
- nstat -z -a "${mib}" | \
- tail -n+2 | \
- while read a count c rest; do
- echo $count
- done
-}
-
-# $1: ns, $2: port
-wait_local_port_listen()
-{
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex i
-
- port_hex="$(printf "%04X" "${port}")"
- for i in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
-}
-
do_transfer()
{
local listener_ns="$1"
@@ -441,12 +378,12 @@ do_transfer()
nstat -n
fi
- local stat_synrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local stat_csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ local stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ local stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ local stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ local stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ local stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
@@ -454,7 +391,7 @@ do_transfer()
$extra_args $local_addr < "$sin" > "$sout" &
local spid=$!
- wait_local_port_listen "${listener_ns}" "${port}"
+ mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
local start
start=$(date +%s%3N)
@@ -504,16 +441,16 @@ do_transfer()
return 1
fi
- check_transfer $sin $cout "file received by client"
+ mptcp_lib_check_transfer $sin $cout "file received by client"
retc=$?
- check_transfer $cin $sout "file received by server"
+ mptcp_lib_check_transfer $cin $sout "file received by server"
rets=$?
- local stat_synrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ local stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ local stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ local stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ local stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ local stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -542,8 +479,8 @@ do_transfer()
fi
if $checksum; then
- local csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ local csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
if [ $csum_err_s_nr -gt 0 ]; then
@@ -613,9 +550,8 @@ make_file()
ksize=$((SIZE / 1024))
rem=$((SIZE - (ksize * 1024)))
- dd if=/dev/urandom of="$name" bs=1024 count=$ksize 2> /dev/null
- dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$rem 2> /dev/null
- echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+ mptcp_lib_make_file $name 1024 $ksize
+ dd if=/dev/urandom conv=notrunc of="$name" oflag=append bs=1 count=$rem 2> /dev/null
echo "Created $name (size $(du -b "$name")) containing data sent by $who"
}
@@ -635,12 +571,12 @@ run_tests_lo()
fi
# skip if we don't want v6
- if ! $ipv6 && is_v6 "${connect_addr}"; then
+ if ! $ipv6 && mptcp_lib_is_v6 "${connect_addr}"; then
return 0
fi
local local_addr
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
local_addr="::"
else
local_addr="0.0.0.0"
@@ -708,7 +644,7 @@ run_test_transparent()
TEST_GROUP="${msg}"
# skip if we don't want v6
- if ! $ipv6 && is_v6 "${connect_addr}"; then
+ if ! $ipv6 && mptcp_lib_is_v6 "${connect_addr}"; then
return 0
fi
@@ -741,7 +677,7 @@ EOF
fi
local local_addr
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
local_addr="::"
r6flag="-6"
else
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 3c94f2f194d6..8362ea454af3 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -511,13 +511,6 @@ get_failed_tests_ids()
done | sort -n
}
-print_file_err()
-{
- ls -l "$1" 1>&2
- echo -n "Trailing bytes are: "
- tail -c 27 "$1"
-}
-
check_transfer()
{
local in=$1
@@ -548,8 +541,8 @@ check_transfer()
local sum=$((0${a} + 0${b}))
if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then
fail_test "$what does not match (in, out):"
- print_file_err "$in"
- print_file_err "$out"
+ mptcp_lib_print_file_err "$in"
+ mptcp_lib_print_file_err "$out"
return 1
else
@@ -587,49 +580,9 @@ link_failure()
done
}
-# $1: IP address
-is_v6()
-{
- [ -z "${1##*:*}" ]
-}
-
-# $1: ns, $2: port
-wait_local_port_listen()
-{
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex
- port_hex="$(printf "%04X" "${port}")"
-
- local i
- for i in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
-}
-
-# $1: ns ; $2: counter
-get_counter()
-{
- local ns="${1}"
- local counter="${2}"
- local count
-
- count=$(ip netns exec ${ns} nstat -asz "${counter}" | awk 'NR==1 {next} {print $2}')
- if [ -z "${count}" ]; then
- mptcp_lib_fail_if_expected_feature "${counter} counter"
- return 1
- fi
-
- echo "${count}"
-}
-
rm_addr_count()
{
- get_counter "${1}" "MPTcpExtRmAddr"
+ mptcp_lib_get_counter "${1}" "MPTcpExtRmAddr"
}
# $1: ns, $2: old rm_addr counter in $ns
@@ -649,7 +602,7 @@ wait_rm_addr()
rm_sf_count()
{
- get_counter "${1}" "MPTcpExtRmSubflow"
+ mptcp_lib_get_counter "${1}" "MPTcpExtRmSubflow"
}
# $1: ns, $2: old rm_sf counter in $ns
@@ -672,26 +625,20 @@ wait_mpj()
local ns="${1}"
local cnt old_cnt
- old_cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ old_cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
local i
for i in $(seq 10); do
- cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
[ "$cnt" = "${old_cnt}" ] || break
sleep 0.1
done
}
-kill_wait()
-{
- kill $1 > /dev/null 2>&1
- wait $1 2>/dev/null
-}
-
kill_events_pids()
{
- kill_wait $evts_ns1_pid
- kill_wait $evts_ns2_pid
+ mptcp_lib_kill_wait $evts_ns1_pid
+ mptcp_lib_kill_wait $evts_ns2_pid
}
kill_tests_wait()
@@ -901,7 +848,7 @@ pm_nl_set_endpoint()
local id=10
while [ $add_nr_ns1 -gt 0 ]; do
local addr
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
addr="dead:beef:$counter::1"
else
addr="10.0.$counter.1"
@@ -953,7 +900,7 @@ pm_nl_set_endpoint()
local id=20
while [ $add_nr_ns2 -gt 0 ]; do
local addr
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
addr="dead:beef:$counter::2"
else
addr="10.0.$counter.2"
@@ -995,7 +942,7 @@ pm_nl_set_endpoint()
pm_nl_flush_endpoint ${connector_ns}
elif [ $rm_nr_ns2 -eq 9 ]; then
local addr
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
addr="dead:beef:1::2"
else
addr="10.0.1.2"
@@ -1117,7 +1064,7 @@ do_transfer()
fi
local spid=$!
- wait_local_port_listen "${listener_ns}" "${port}"
+ mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
extra_cl_args="$extra_args $extra_cl_args"
if [ "$test_linkfail" -eq 0 ];then
@@ -1199,8 +1146,7 @@ make_file()
local who=$2
local size=$3
- dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
- echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+ mptcp_lib_make_file $name 1024 $size
print_info "Test file (size $size KB) for $who"
}
@@ -1284,7 +1230,7 @@ chk_csum_nr()
fi
print_check "sum"
- count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
if [ "$count" != "$csum_ns1" ]; then
extra_msg="$extra_msg ns1=$count"
fi
@@ -1297,7 +1243,7 @@ chk_csum_nr()
print_ok
fi
print_check "csum"
- count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
if [ "$count" != "$csum_ns2" ]; then
extra_msg="$extra_msg ns2=$count"
fi
@@ -1341,7 +1287,7 @@ chk_fail_nr()
fi
print_check "ftx"
- count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
+ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
if [ "$count" != "$fail_tx" ]; then
extra_msg="$extra_msg,tx=$count"
fi
@@ -1355,7 +1301,7 @@ chk_fail_nr()
fi
print_check "failrx"
- count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
+ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
if [ "$count" != "$fail_rx" ]; then
extra_msg="$extra_msg,rx=$count"
fi
@@ -1388,7 +1334,7 @@ chk_fclose_nr()
fi
print_check "ctx"
- count=$(get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
+ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$fclose_tx" ]; then
@@ -1399,7 +1345,7 @@ chk_fclose_nr()
fi
print_check "fclzrx"
- count=$(get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
+ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$fclose_rx" ]; then
@@ -1429,7 +1375,7 @@ chk_rst_nr()
fi
print_check "rtx"
- count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
+ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPRstTx")
if [ -z "$count" ]; then
print_skip
# accept more rst than expected except if we don't expect any
@@ -1441,7 +1387,7 @@ chk_rst_nr()
fi
print_check "rstrx"
- count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
+ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPRstRx")
if [ -z "$count" ]; then
print_skip
# accept more rst than expected except if we don't expect any
@@ -1462,7 +1408,7 @@ chk_infi_nr()
local count
print_check "itx"
- count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtInfiniteMapTx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$infi_tx" ]; then
@@ -1472,7 +1418,7 @@ chk_infi_nr()
fi
print_check "infirx"
- count=$(get_counter ${ns1} "MPTcpExtInfiniteMapRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtInfiniteMapRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$infi_rx" ]; then
@@ -1501,7 +1447,7 @@ chk_join_nr()
fi
print_check "syn"
- count=$(get_counter ${ns1} "MPTcpExtMPJoinSynRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$syn_nr" ]; then
@@ -1512,7 +1458,7 @@ chk_join_nr()
print_check "synack"
with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
- count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$syn_ack_nr" ]; then
@@ -1529,7 +1475,7 @@ chk_join_nr()
fi
print_check "ack"
- count=$(get_counter ${ns1} "MPTcpExtMPJoinAckRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$ack_nr" ]; then
@@ -1562,8 +1508,8 @@ chk_stale_nr()
print_check "stale"
- stale_nr=$(get_counter ${ns} "MPTcpExtSubflowStale")
- recover_nr=$(get_counter ${ns} "MPTcpExtSubflowRecover")
+ stale_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowStale")
+ recover_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowRecover")
if [ -z "$stale_nr" ] || [ -z "$recover_nr" ]; then
print_skip
elif [ $stale_nr -lt $stale_min ] ||
@@ -1600,7 +1546,7 @@ chk_add_nr()
timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
print_check "add"
- count=$(get_counter ${ns2} "MPTcpExtAddAddr")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr")
if [ -z "$count" ]; then
print_skip
# if the test configured a short timeout tolerate greater then expected
@@ -1612,7 +1558,7 @@ chk_add_nr()
fi
print_check "echo"
- count=$(get_counter ${ns1} "MPTcpExtEchoAdd")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$echo_nr" ]; then
@@ -1623,7 +1569,7 @@ chk_add_nr()
if [ $port_nr -gt 0 ]; then
print_check "pt"
- count=$(get_counter ${ns2} "MPTcpExtPortAdd")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$port_nr" ]; then
@@ -1633,7 +1579,7 @@ chk_add_nr()
fi
print_check "syn"
- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$syn_nr" ]; then
@@ -1644,7 +1590,7 @@ chk_add_nr()
fi
print_check "synack"
- count=$(get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$syn_ack_nr" ]; then
@@ -1655,7 +1601,7 @@ chk_add_nr()
fi
print_check "ack"
- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$ack_nr" ]; then
@@ -1666,7 +1612,7 @@ chk_add_nr()
fi
print_check "syn"
- count=$(get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$mis_syn_nr" ]; then
@@ -1677,7 +1623,7 @@ chk_add_nr()
fi
print_check "ack"
- count=$(get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$mis_ack_nr" ]; then
@@ -1699,7 +1645,7 @@ chk_add_tx_nr()
timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
print_check "add TX"
- count=$(get_counter ${ns1} "MPTcpExtAddAddrTx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtAddAddrTx")
if [ -z "$count" ]; then
print_skip
# if the test configured a short timeout tolerate greater then expected
@@ -1711,7 +1657,7 @@ chk_add_tx_nr()
fi
print_check "echo TX"
- count=$(get_counter ${ns2} "MPTcpExtEchoAddTx")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtEchoAddTx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$echo_tx_nr" ]; then
@@ -1749,7 +1695,7 @@ chk_rm_nr()
fi
print_check "rm"
- count=$(get_counter ${addr_ns} "MPTcpExtRmAddr")
+ count=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmAddr")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$rm_addr_nr" ]; then
@@ -1759,13 +1705,13 @@ chk_rm_nr()
fi
print_check "rmsf"
- count=$(get_counter ${subflow_ns} "MPTcpExtRmSubflow")
+ count=$(mptcp_lib_get_counter ${subflow_ns} "MPTcpExtRmSubflow")
if [ -z "$count" ]; then
print_skip
elif [ -n "$simult" ]; then
local cnt suffix
- cnt=$(get_counter ${addr_ns} "MPTcpExtRmSubflow")
+ cnt=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmSubflow")
# in case of simult flush, the subflow removal count on each side is
# unreliable
@@ -1794,7 +1740,7 @@ chk_rm_tx_nr()
local rm_addr_tx_nr=$1
print_check "rm TX"
- count=$(get_counter ${ns2} "MPTcpExtRmAddrTx")
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtRmAddrTx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$rm_addr_tx_nr" ]; then
@@ -1811,7 +1757,7 @@ chk_prio_nr()
local count
print_check "ptx"
- count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioTx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$mp_prio_nr_tx" ]; then
@@ -1821,7 +1767,7 @@ chk_prio_nr()
fi
print_check "prx"
- count=$(get_counter ${ns1} "MPTcpExtMPPrioRx")
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioRx")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$mp_prio_nr_rx" ]; then
@@ -1867,12 +1813,10 @@ chk_mptcp_info()
local cnt2
local dump_stats
- print_check "mptcp_info ${info1:0:8}=$exp1:$exp2"
+ print_check "mptcp_info ${info1:0:15}=$exp1:$exp2"
- cnt1=$(ss -N $ns1 -inmHM | grep "$info1:" |
- sed -n 's/.*\('"$info1"':\)\([[:digit:]]*\).*$/\2/p;q')
- cnt2=$(ss -N $ns2 -inmHM | grep "$info2:" |
- sed -n 's/.*\('"$info2"':\)\([[:digit:]]*\).*$/\2/p;q')
+ cnt1=$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value "$info1" "$info1")
+ cnt2=$(ss -N $ns2 -inmHM | mptcp_lib_get_info_value "$info2" "$info2")
# 'ss' only display active connections and counters that are not 0.
[ -z "$cnt1" ] && cnt1=0
[ -z "$cnt2" ] && cnt2=0
@@ -1890,6 +1834,42 @@ chk_mptcp_info()
fi
}
+# $1: subflows in ns1 ; $2: subflows in ns2
+# number of all subflows, including the initial subflow.
+chk_subflows_total()
+{
+ local cnt1
+ local cnt2
+ local info="subflows_total"
+ local dump_stats
+
+ # if subflows_total counter is supported, use it:
+ if [ -n "$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value $info $info)" ]; then
+ chk_mptcp_info $info $1 $info $2
+ return
+ fi
+
+ print_check "$info $1:$2"
+
+ # if not, count the TCP connections that are in fact MPTCP subflows
+ cnt1=$(ss -N $ns1 -ti state established state syn-sent state syn-recv |
+ grep -c tcp-ulp-mptcp)
+ cnt2=$(ss -N $ns2 -ti state established state syn-sent state syn-recv |
+ grep -c tcp-ulp-mptcp)
+
+ if [ "$1" != "$cnt1" ] || [ "$2" != "$cnt2" ]; then
+ fail_test "got subflows $cnt1:$cnt2 expected $1:$2"
+ dump_stats=1
+ else
+ print_ok
+ fi
+
+ if [ "$dump_stats" = 1 ]; then
+ ss -N $ns1 -ti
+ ss -N $ns2 -ti
+ fi
+}
+
chk_link_usage()
{
local ns=$1
@@ -1921,7 +1901,7 @@ wait_attempt_fail()
while [ $time -lt $timeout_ms ]; do
local cnt
- cnt=$(get_counter ${ns} "TcpAttemptFails")
+ cnt=$(mptcp_lib_get_counter ${ns} "TcpAttemptFails")
[ "$cnt" = 1 ] && return 1
time=$((time + 100))
@@ -2814,6 +2794,7 @@ backup_tests()
fi
}
+SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
@@ -2848,13 +2829,13 @@ verify_listener_events()
return
fi
- type=$(grep "type:$e_type," $evt | sed -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
- family=$(grep "type:$e_type," $evt | sed -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
- sport=$(grep "type:$e_type," $evt | sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ type=$(mptcp_lib_evts_get_info type "$evt" "$e_type")
+ family=$(mptcp_lib_evts_get_info family "$evt" "$e_type")
+ sport=$(mptcp_lib_evts_get_info sport "$evt" "$e_type")
if [ $family ] && [ $family = $AF_INET6 ]; then
- saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" "$e_type")
else
- saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
+ saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" "$e_type")
fi
if [ $type ] && [ $type = $e_type ] &&
@@ -3249,8 +3230,7 @@ fastclose_tests()
pedit_action_pkts()
{
tc -n $ns2 -j -s action show action pedit index 100 | \
- grep "packets" | \
- sed 's/.*"packets":\([0-9]\+\),.*/\1/'
+ mptcp_lib_get_info_value \"packets\" packets
}
fail_tests()
@@ -3275,75 +3255,70 @@ fail_tests()
fi
}
+# $1: ns ; $2: addr ; $3: id
userspace_pm_add_addr()
{
- local addr=$1
- local id=$2
+ local evts=$evts_ns1
local tk
- tk=$(grep "type:1," "$evts_ns1" |
- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
- ip netns exec $ns1 ./pm_nl_ctl ann $addr token $tk id $id
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl ann $2 token $tk id $3
sleep 1
}
-userspace_pm_rm_sf_addr_ns1()
+# $1: ns ; $2: id
+userspace_pm_rm_addr()
{
- local addr=$1
- local id=$2
- local tk sp da dp
- local cnt_addr cnt_sf
-
- tk=$(grep "type:1," "$evts_ns1" |
- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
- sp=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
- da=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
- dp=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
- cnt_addr=$(rm_addr_count ${ns1})
- cnt_sf=$(rm_sf_count ${ns1})
- ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
- ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
- lport $sp rip $da rport $dp token $tk
- wait_rm_addr $ns1 "${cnt_addr}"
- wait_rm_sf $ns1 "${cnt_sf}"
+ local evts=$evts_ns1
+ local tk
+ local cnt
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ cnt=$(rm_addr_count ${1})
+ ip netns exec $1 ./pm_nl_ctl rem token $tk id $2
+ wait_rm_addr $1 "${cnt}"
}
+# $1: ns ; $2: addr ; $3: id
userspace_pm_add_sf()
{
- local addr=$1
- local id=$2
+ local evts=$evts_ns1
local tk da dp
- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- ip netns exec $ns2 ./pm_nl_ctl csf lip $addr lid $id \
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+ da=$(mptcp_lib_evts_get_info daddr4 "$evts")
+ dp=$(mptcp_lib_evts_get_info dport "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl csf lip $2 lid $3 \
rip $da rport $dp token $tk
sleep 1
}
-userspace_pm_rm_sf_addr_ns2()
+# $1: ns ; $2: addr $3: event type
+userspace_pm_rm_sf()
{
- local addr=$1
- local id=$2
+ local evts=$evts_ns1
+ local t=${3:-1}
+ local ip=4
local tk da dp sp
- local cnt_addr cnt_sf
-
- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- sp=$(grep "type:10" "$evts_ns2" |
- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
- cnt_addr=$(rm_addr_count ${ns2})
- cnt_sf=$(rm_sf_count ${ns2})
- ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
- ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
+ local cnt
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ if mptcp_lib_is_v6 $2; then ip=6; fi
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+ da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t)
+ dp=$(mptcp_lib_evts_get_info dport "$evts" $t)
+ sp=$(mptcp_lib_evts_get_info sport "$evts" $t)
+
+ cnt=$(rm_sf_count ${1})
+ ip netns exec $1 ./pm_nl_ctl dsf lip $2 lport $sp \
rip $da rport $dp token $tk
- wait_rm_addr $ns2 "${cnt_addr}"
- wait_rm_sf $ns2 "${cnt_sf}"
+ wait_rm_sf $1 "${cnt}"
}
userspace_tests()
@@ -3430,14 +3405,17 @@ userspace_tests()
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
wait_mpj $ns1
- userspace_pm_add_addr 10.0.2.1 10
+ userspace_pm_add_addr $ns1 10.0.2.1 10
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 2 2
chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
- userspace_pm_rm_sf_addr_ns1 10.0.2.1 10
+ userspace_pm_rm_addr $ns1 10
+ userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
chk_rm_nr 1 1 invert
chk_mptcp_info subflows 0 subflows 0
+ chk_subflows_total 1 1
kill_events_pids
wait $tests_pid
fi
@@ -3451,12 +3429,84 @@ userspace_tests()
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
wait_mpj $ns2
- userspace_pm_add_sf 10.0.3.2 20
+ userspace_pm_add_sf $ns2 10.0.3.2 20
chk_join_nr 1 1 1
chk_mptcp_info subflows 1 subflows 1
- userspace_pm_rm_sf_addr_ns2 10.0.3.2 20
+ chk_subflows_total 2 2
+ userspace_pm_rm_addr $ns2 20
+ userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED
chk_rm_nr 1 1
chk_mptcp_info subflows 0 subflows 0
+ chk_subflows_total 1 1
+ kill_events_pids
+ wait $tests_pid
+ fi
+
+ # userspace pm create id 0 subflow
+ if reset_with_events "userspace pm create id 0 subflow" &&
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns2
+ chk_mptcp_info subflows 0 subflows 0
+ chk_subflows_total 1 1
+ userspace_pm_add_sf $ns2 10.0.3.2 0
+ chk_join_nr 1 1 1
+ chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 2 2
+ kill_events_pids
+ wait $tests_pid
+ fi
+
+ # userspace pm remove initial subflow
+ if reset_with_events "userspace pm remove initial subflow" &&
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns2
+ userspace_pm_add_sf $ns2 10.0.3.2 20
+ chk_join_nr 1 1 1
+ chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 2 2
+ userspace_pm_rm_sf $ns2 10.0.1.2
+ # we don't look at the counter linked to the RM_ADDR but
+ # to the one linked to the subflows that have been removed
+ chk_rm_nr 0 1
+ chk_rst_nr 0 0 invert
+ chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 1 1
+ kill_events_pids
+ wait $tests_pid
+ fi
+
+ # userspace pm send RM_ADDR for ID 0
+ if reset_with_events "userspace pm send RM_ADDR for ID 0" &&
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns2 1 1
+ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns1
+ userspace_pm_add_addr $ns1 10.0.2.1 10
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 2 2
+ chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
+ userspace_pm_rm_addr $ns1 0
+ # we don't look at the counter linked to the subflows that
+ # have been removed but to the one linked to the RM_ADDR
+ chk_rm_nr 1 0 invert
+ chk_rst_nr 0 0 invert
+ chk_mptcp_info subflows 1 subflows 1
+ chk_subflows_total 1 1
kill_events_pids
wait $tests_pid
fi
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 92a5befe8039..022262a2cfe0 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -207,3 +207,94 @@ mptcp_lib_result_print_all_tap() {
printf "%s\n" "${subtest}"
done
}
+
+# get the value of keyword $1 in the line marked by keyword $2
+mptcp_lib_get_info_value() {
+ grep "${2}" | sed -n 's/.*\('"${1}"':\)\([0-9a-f:.]*\).*$/\2/p;q'
+}
+
+# $1: info name ; $2: evts_ns ; $3: event type
+mptcp_lib_evts_get_info() {
+ mptcp_lib_get_info_value "${1}" "^type:${3:-1}," < "${2}"
+}
+
+# $1: PID
+mptcp_lib_kill_wait() {
+ [ "${1}" -eq 0 ] && return 0
+
+ kill -SIGUSR1 "${1}" > /dev/null 2>&1
+ kill "${1}" > /dev/null 2>&1
+ wait "${1}" 2>/dev/null
+}
+
+# $1: IP address
+mptcp_lib_is_v6() {
+ [ -z "${1##*:*}" ]
+}
+
+# $1: ns, $2: MIB counter
+mptcp_lib_get_counter() {
+ local ns="${1}"
+ local counter="${2}"
+ local count
+
+ count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
+ awk 'NR==1 {next} {print $2}')
+ if [ -z "${count}" ]; then
+ mptcp_lib_fail_if_expected_feature "${counter} counter"
+ return 1
+ fi
+
+ echo "${count}"
+}
+
+mptcp_lib_make_file() {
+ local name="${1}"
+ local bs="${2}"
+ local size="${3}"
+
+ dd if=/dev/urandom of="${name}" bs="${bs}" count="${size}" 2> /dev/null
+ echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "${name}"
+}
+
+# $1: file
+mptcp_lib_print_file_err() {
+ ls -l "${1}" 1>&2
+ echo "Trailing bytes are: "
+ tail -c 27 "${1}"
+}
+
+# $1: input file ; $2: output file ; $3: what kind of file
+mptcp_lib_check_transfer() {
+ local in="${1}"
+ local out="${2}"
+ local what="${3}"
+
+ if ! cmp "$in" "$out" > /dev/null 2>&1; then
+ echo "[ FAIL ] $what does not match (in, out):"
+ mptcp_lib_print_file_err "$in"
+ mptcp_lib_print_file_err "$out"
+
+ return 1
+ fi
+
+ return 0
+}
+
+# $1: ns, $2: port
+mptcp_lib_wait_local_port_listen() {
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex
+ port_hex="$(printf "%04X" "${port}")"
+
+ local _
+ for _ in $(seq 10); do
+ ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
+ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) \
+ {rc=0; exit}} END {exit rc}" &&
+ break
+ sleep 0.1
+ done
+}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index a817af6616ec..c643872ddf47 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -135,38 +135,6 @@ check_mark()
return 0
}
-print_file_err()
-{
- ls -l "$1" 1>&2
- echo "Trailing bytes are: "
- tail -c 27 "$1"
-}
-
-check_transfer()
-{
- local in=$1
- local out=$2
- local what=$3
-
- cmp "$in" "$out" > /dev/null 2>&1
- if [ $? -ne 0 ] ;then
- echo "[ FAIL ] $what does not match (in, out):"
- print_file_err "$in"
- print_file_err "$out"
- ret=1
-
- return 1
- fi
-
- return 0
-}
-
-# $1: IP address
-is_v6()
-{
- [ -z "${1##*:*}" ]
-}
-
do_transfer()
{
local listener_ns="$1"
@@ -183,7 +151,7 @@ do_transfer()
local mptcp_connect="./mptcp_connect -r 20"
local local_addr ip
- if is_v6 "${connect_addr}"; then
+ if mptcp_lib_is_v6 "${connect_addr}"; then
local_addr="::"
ip=ipv6
else
@@ -238,7 +206,7 @@ do_transfer()
check_mark $connector_ns 4 || retc=1
fi
- check_transfer $cin $sout "file received by server"
+ mptcp_lib_check_transfer $cin $sout "file received by server"
rets=$?
mptcp_lib_result_code "${retc}" "mark ${ip}"
@@ -257,8 +225,7 @@ make_file()
local who=$2
local size=$3
- dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
- echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+ mptcp_lib_make_file $name 1024 $size
echo "Created $name (size $size KB) containing data sent by $who"
}
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index ce9203b817f8..ae8ad5d6fb9d 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -123,23 +123,6 @@ setup()
grep -q ' kmemleak_init$\| lockdep_init$\| kasan_init$\| prove_locking$' /proc/kallsyms && slack=$((slack+550))
}
-# $1: ns, $2: port
-wait_local_port_listen()
-{
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex i
-
- port_hex="$(printf "%04X" "${port}")"
- for i in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
-}
-
do_transfer()
{
local cin=$1
@@ -179,7 +162,7 @@ do_transfer()
0.0.0.0 < "$sin" > "$sout" &
local spid=$!
- wait_local_port_listen "${ns3}" "${port}"
+ mptcp_lib_wait_local_port_listen "${ns3}" "${port}"
timeout ${timeout_test} \
ip netns exec ${ns1} \
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index b25a3e33eb25..6167837f48e1 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -108,15 +108,6 @@ test_fail()
mptcp_lib_result_fail "${test_name}"
}
-kill_wait()
-{
- [ $1 -eq 0 ] && return 0
-
- kill -SIGUSR1 $1 > /dev/null 2>&1
- kill $1 > /dev/null 2>&1
- wait $1 2>/dev/null
-}
-
# This function is used in the cleanup trap
#shellcheck disable=SC2317
cleanup()
@@ -128,7 +119,7 @@ cleanup()
for pid in $client4_pid $server4_pid $client6_pid $server6_pid\
$server_evts_pid $client_evts_pid
do
- kill_wait $pid
+ mptcp_lib_kill_wait $pid
done
local netns
@@ -173,22 +164,12 @@ print_title "Init"
print_test "Created network namespaces ns1, ns2"
test_pass
-make_file()
-{
- # Store a chunk of data in a file to transmit over an MPTCP connection
- local name=$1
- local ksize=1
-
- dd if=/dev/urandom of="$name" bs=2 count=$ksize 2> /dev/null
- echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
-}
-
make_connection()
{
if [ -z "$file" ]; then
file=$(mktemp)
fi
- make_file "$file" "client"
+ mptcp_lib_make_file "$file" 2 1
local is_v6=$1
local app_port=$app4_port
@@ -210,7 +191,7 @@ make_connection()
fi
:>"$client_evts"
if [ $client_evts_pid -ne 0 ]; then
- kill_wait $client_evts_pid
+ mptcp_lib_kill_wait $client_evts_pid
fi
ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
client_evts_pid=$!
@@ -219,7 +200,7 @@ make_connection()
fi
:>"$server_evts"
if [ $server_evts_pid -ne 0 ]; then
- kill_wait $server_evts_pid
+ mptcp_lib_kill_wait $server_evts_pid
fi
ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
server_evts_pid=$!
@@ -247,14 +228,11 @@ make_connection()
local server_token
local server_serverside
- client_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
- client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
- client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
- "$client_evts")
- server_token=$(grep "type:1," "$server_evts" |
- sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
- server_serverside=$(grep "type:1," "$server_evts" |
- sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q')
+ client_token=$(mptcp_lib_evts_get_info token "$client_evts")
+ client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
+ client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
+ server_token=$(mptcp_lib_evts_get_info token "$server_evts")
+ server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
@@ -340,16 +318,16 @@ verify_announce_event()
local dport
local id
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
+ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
if [ "$e_af" = "v6" ]
then
- addr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ addr=$(mptcp_lib_evts_get_info daddr6 "$evt" $e_type)
else
- addr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ addr=$(mptcp_lib_evts_get_info daddr4 "$evt" $e_type)
fi
- dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ dport=$(mptcp_lib_evts_get_info dport "$evt" $e_type)
+ id=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
check_expected "type" "token" "addr" "dport" "id"
}
@@ -367,7 +345,7 @@ test_announce()
$client_addr_id dev ns2eth1 > /dev/null 2>&1
local type
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ type=$(mptcp_lib_evts_get_info type "$server_evts")
print_test "ADD_ADDR 10.0.2.2 (ns2) => ns1, invalid token"
if [ "$type" = "" ]
then
@@ -446,9 +424,9 @@ verify_remove_event()
local token
local id
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
+ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
+ id=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
check_expected "type" "token" "id"
}
@@ -466,7 +444,7 @@ test_remove()
$client_addr_id > /dev/null 2>&1
print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
local type
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
then
test_pass
@@ -479,7 +457,7 @@ test_remove()
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$invalid_id > /dev/null 2>&1
print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
then
test_pass
@@ -583,19 +561,19 @@ verify_subflow_events()
fi
fi
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- family=$(sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- locid=$(sed --unbuffered -n 's/.*\(loc_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
- remid=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
+ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
+ family=$(mptcp_lib_evts_get_info family "$evt" $e_type)
+ dport=$(mptcp_lib_evts_get_info dport "$evt" $e_type)
+ locid=$(mptcp_lib_evts_get_info loc_id "$evt" $e_type)
+ remid=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
if [ "$family" = "$AF_INET6" ]
then
- saddr=$(sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
- daddr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" $e_type)
+ daddr=$(mptcp_lib_evts_get_info daddr6 "$evt" $e_type)
else
- saddr=$(sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
- daddr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" $e_type)
+ daddr=$(mptcp_lib_evts_get_info daddr4 "$evt" $e_type)
fi
check_expected "type" "token" "daddr" "dport" "family" "saddr" "locid" "remid"
@@ -627,10 +605,10 @@ test_subflows()
"10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
local sport
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW from server to client machine
:>"$server_evts"
@@ -666,9 +644,9 @@ test_subflows()
"$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW6 from server to client machine
:>"$server_evts"
@@ -705,9 +683,9 @@ test_subflows()
"$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW from server to client machine
:>"$server_evts"
@@ -743,9 +721,9 @@ test_subflows()
"10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW from client to server machine
:>"$client_evts"
@@ -782,9 +760,9 @@ test_subflows()
"$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW6 from client to server machine
:>"$client_evts"
@@ -819,9 +797,9 @@ test_subflows()
"10.0.2.2" "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW from client to server machine
:>"$client_evts"
@@ -865,9 +843,9 @@ test_subflows_v4_v6_mix()
"$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
# DESTROY_SUBFLOW from client to server machine
:>"$client_evts"
@@ -896,9 +874,10 @@ test_prio()
# Check TX
print_test "MP_PRIO TX"
- count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
- [ -z "$count" ] && count=0
- if [ $count != 1 ]; then
+ count=$(mptcp_lib_get_counter "$ns2" "MPTcpExtMPPrioTx")
+ if [ -z "$count" ]; then
+ test_skip
+ elif [ $count != 1 ]; then
test_fail "Count != 1: ${count}"
else
test_pass
@@ -906,9 +885,10 @@ test_prio()
# Check RX
print_test "MP_PRIO RX"
- count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
- [ -z "$count" ] && count=0
- if [ $count != 1 ]; then
+ count=$(mptcp_lib_get_counter "$ns1" "MPTcpExtMPPrioRx")
+ if [ -z "$count" ]; then
+ test_skip
+ elif [ $count != 1 ]; then
test_fail "Count != 1: ${count}"
else
test_pass
@@ -933,18 +913,13 @@ verify_listener_events()
print_test "CLOSE_LISTENER $e_saddr:$e_sport"
fi
- type=$(grep "type:$e_type," $evt |
- sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
- family=$(grep "type:$e_type," $evt |
- sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
- sport=$(grep "type:$e_type," $evt |
- sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ type=$(mptcp_lib_evts_get_info type $evt $e_type)
+ family=$(mptcp_lib_evts_get_info family $evt $e_type)
+ sport=$(mptcp_lib_evts_get_info sport $evt $e_type)
if [ $family ] && [ $family = $AF_INET6 ]; then
- saddr=$(grep "type:$e_type," $evt |
- sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ saddr=$(mptcp_lib_evts_get_info saddr6 $evt $e_type)
else
- saddr=$(grep "type:$e_type," $evt |
- sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
+ saddr=$(mptcp_lib_evts_get_info saddr4 $evt $e_type)
fi
check_expected "type" "family" "saddr" "sport"
@@ -982,7 +957,7 @@ test_listener()
sleep 0.5
# Delete the listener from the client ns, if one was created
- kill_wait $listener_pid
+ mptcp_lib_kill_wait $listener_pid
sleep 0.5
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh
new file mode 100755
index 000000000000..4fe0befa13fb
--- /dev/null
+++ b/tools/testing/selftests/net/net_helper.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Helper functions
+
+wait_local_port_listen()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+ local protocol="${3}"
+ local port_hex
+ local i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ if ip netns exec "${listener_ns}" cat /proc/net/"${protocol}"* | \
+ grep -q "${port_hex}"; then
+ break
+ fi
+ sleep 0.1
+ done
+}
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index 0c743752669a..af5dc57c8ce9 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -3,6 +3,8 @@
#
# Run a series of udpgro functional tests.
+source net_helper.sh
+
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
BPF_FILE="../bpf/xdp_dummy.bpf.o"
@@ -51,8 +53,7 @@ run_one() {
echo "ok" || \
echo "failed" &
- # Hack: let bg programs complete the startup
- sleep 0.2
+ wait_local_port_listen ${PEER_NS} 8000 udp
./udpgso_bench_tx ${tx_args}
ret=$?
wait $(jobs -p)
@@ -97,7 +98,7 @@ run_one_nat() {
echo "ok" || \
echo "failed"&
- sleep 0.1
+ wait_local_port_listen "${PEER_NS}" 8000 udp
./udpgso_bench_tx ${tx_args}
ret=$?
kill -INT $pid
@@ -118,11 +119,9 @@ run_one_2sock() {
echo "ok" || \
echo "failed" &
- # Hack: let bg programs complete the startup
- sleep 0.2
+ wait_local_port_listen "${PEER_NS}" 12345 udp
./udpgso_bench_tx ${tx_args} -p 12345
- sleep 0.1
- # first UDP GSO socket should be closed at this point
+ wait_local_port_listen "${PEER_NS}" 8000 udp
./udpgso_bench_tx ${tx_args}
ret=$?
wait $(jobs -p)
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
index 894972877e8b..cb664679b434 100755
--- a/tools/testing/selftests/net/udpgro_bench.sh
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -3,6 +3,8 @@
#
# Run a series of udpgro benchmarks
+source net_helper.sh
+
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
BPF_FILE="../bpf/xdp_dummy.bpf.o"
@@ -40,8 +42,7 @@ run_one() {
ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
- # Hack: let bg programs complete the startup
- sleep 0.2
+ wait_local_port_listen "${PEER_NS}" 8000 udp
./udpgso_bench_tx ${tx_args}
}
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index 0a6359bed0b9..dd47fa96f6b3 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -3,6 +3,8 @@
#
# Run a series of udpgro benchmarks
+source net_helper.sh
+
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
BPF_FILE="../bpf/xdp_dummy.bpf.o"
@@ -45,8 +47,7 @@ run_one() {
echo ${rx_args}
ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
- # Hack: let bg programs complete the startup
- sleep 0.2
+ wait_local_port_listen "${PEER_NS}" 8000 udp
./udpgso_bench_tx ${tx_args}
}
diff --git a/tools/testing/selftests/tc-testing/Makefile b/tools/testing/selftests/tc-testing/Makefile
index b1fa2e177e2f..e8b3dde4fa16 100644
--- a/tools/testing/selftests/tc-testing/Makefile
+++ b/tools/testing/selftests/tc-testing/Makefile
@@ -1,31 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-include ../../../scripts/Makefile.include
-top_srcdir = $(abspath ../../../..)
-APIDIR := $(top_scrdir)/include/uapi
-TEST_GEN_FILES = action.o
+TEST_PROGS += ./tdc.sh
+TEST_FILES := action-ebpf tdc*.py Tdc*.py plugins plugin-lib tc-tests scripts
include ../lib.mk
-
-PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
-
-ifeq ($(PROBE),)
- CPU ?= probe
-else
- CPU ?= generic
-endif
-
-CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
- | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
-
-CLANG_FLAGS = -I. -I$(APIDIR) \
- $(CLANG_SYS_INCLUDES) \
- -Wno-compare-distinct-pointer-types
-
-$(OUTPUT)/%.o: %.c
- $(CLANG) $(CLANG_FLAGS) \
- -O2 --target=bpf -emit-llvm -c $< -o - | \
- $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
-
-TEST_PROGS += ./tdc.sh
-TEST_FILES := tdc*.py Tdc*.py plugins plugin-lib tc-tests scripts
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
index be7b00799b3e..fc8e858ff119 100644
--- a/tools/testing/selftests/tc-testing/README
+++ b/tools/testing/selftests/tc-testing/README
@@ -195,8 +195,6 @@ directory:
and the other is a test whether the command leaked memory or not.
(This one is a preliminary version, it may not work quite right yet,
but the overall template is there and it should only need tweaks.)
- - buildebpfPlugin.py:
- builds all programs in $EBPFDIR.
ACKNOWLEDGEMENTS
diff --git a/tools/testing/selftests/tc-testing/action-ebpf b/tools/testing/selftests/tc-testing/action-ebpf
new file mode 100644
index 000000000000..4879479b2ee5
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/action-ebpf
Binary files differ
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
deleted file mode 100644
index d34fe06268d2..000000000000
--- a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
+++ /dev/null
@@ -1,67 +0,0 @@
-'''
-build ebpf program
-'''
-
-import os
-import signal
-from string import Template
-import subprocess
-import time
-from TdcPlugin import TdcPlugin
-from tdc_config import *
-
-class SubPlugin(TdcPlugin):
- def __init__(self):
- self.sub_class = 'buildebpf/SubPlugin'
- self.tap = ''
- super().__init__()
-
- def pre_suite(self, testcount, testidlist):
- super().pre_suite(testcount, testidlist)
-
- if self.args.buildebpf:
- self._ebpf_makeall()
-
- def post_suite(self, index):
- super().post_suite(index)
-
- self._ebpf_makeclean()
-
- def add_args(self, parser):
- super().add_args(parser)
-
- self.argparser_group = self.argparser.add_argument_group(
- 'buildebpf',
- 'options for buildebpfPlugin')
- self.argparser_group.add_argument(
- '--nobuildebpf', action='store_false', default=True,
- dest='buildebpf',
- help='Don\'t build eBPF programs')
-
- return self.argparser
-
- def _ebpf_makeall(self):
- if self.args.buildebpf:
- self._make('all')
-
- def _ebpf_makeclean(self):
- if self.args.buildebpf:
- self._make('clean')
-
- def _make(self, target):
- command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target)
- proc = subprocess.Popen(command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=os.environ.copy())
- (rawout, serr) = proc.communicate()
-
- if proc.returncode != 0 and len(serr) > 0:
- foutput = serr.decode("utf-8")
- else:
- foutput = rawout.decode("utf-8")
-
- proc.stdout.close()
- proc.stderr.close()
- return proc, foutput
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
index b62429b0fcdb..bb19b8b76d3b 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
@@ -9,43 +9,13 @@ from TdcPlugin import TdcPlugin
from tdc_config import *
-def prepare_suite(obj, test):
- original = obj.args.NAMES
-
- if 'skip' in test and test['skip'] == 'yes':
- return
-
- if 'nsPlugin' not in test['plugins']:
- return
-
- shadow = {}
- shadow['IP'] = original['IP']
- shadow['TC'] = original['TC']
- shadow['NS'] = '{}-{}'.format(original['NS'], test['random'])
- shadow['DEV0'] = '{}id{}'.format(original['DEV0'], test['id'])
- shadow['DEV1'] = '{}id{}'.format(original['DEV1'], test['id'])
- shadow['DUMMY'] = '{}id{}'.format(original['DUMMY'], test['id'])
- shadow['DEV2'] = original['DEV2']
- obj.args.NAMES = shadow
-
- if obj.args.namespace:
- obj._ns_create()
- else:
- obj._ports_create()
-
- # Make sure the netns is visible in the fs
- while True:
- obj._proc_check()
- try:
- ns = obj.args.NAMES['NS']
- f = open('/run/netns/{}'.format(ns))
- f.close()
- break
- except:
- time.sleep(0.1)
- continue
-
- obj.args.NAMES = original
+try:
+ from pyroute2 import netns
+ from pyroute2 import IPRoute
+ netlink = True
+except ImportError:
+ netlink = False
+ print("!!! Consider installing pyroute2 !!!")
class SubPlugin(TdcPlugin):
def __init__(self):
@@ -53,64 +23,71 @@ class SubPlugin(TdcPlugin):
super().__init__()
def pre_suite(self, testcount, testlist):
- from itertools import cycle
-
super().pre_suite(testcount, testlist)
- print("Setting up namespaces and devices...")
+ def prepare_test(self, test):
+ if 'skip' in test and test['skip'] == 'yes':
+ return
- with Pool(self.args.mp) as p:
- it = zip(cycle([self]), testlist)
- p.starmap(prepare_suite, it)
+ if 'nsPlugin' not in test['plugins']:
+ return
- def pre_case(self, caseinfo, test_skip):
+ if netlink == True:
+ self._nl_ns_create()
+ else:
+ self._ipr2_ns_create()
+
+ # Make sure the netns is visible in the fs
+ ticks = 20
+ while True:
+ if ticks == 0:
+ raise TimeoutError
+ self._proc_check()
+ try:
+ ns = self.args.NAMES['NS']
+ f = open('/run/netns/{}'.format(ns))
+ f.close()
+ break
+ except:
+ time.sleep(0.1)
+ ticks -= 1
+ continue
+
+ def pre_case(self, test, test_skip):
if self.args.verbose:
print('{}.pre_case'.format(self.sub_class))
if test_skip:
return
+ self.prepare_test(test)
def post_case(self):
if self.args.verbose:
print('{}.post_case'.format(self.sub_class))
- if self.args.namespace:
- self._ns_destroy()
+ if netlink == True:
+ self._nl_ns_destroy()
else:
- self._ports_destroy()
+ self._ipr2_ns_destroy()
def post_suite(self, index):
if self.args.verbose:
print('{}.post_suite'.format(self.sub_class))
# Make sure we don't leak resources
- for f in os.listdir('/run/netns/'):
- cmd = self._replace_keywords("$IP netns del {}".format(f))
+ cmd = self._replace_keywords("$IP -a netns del")
- if self.args.verbose > 3:
- print('_exec_cmd: command "{}"'.format(cmd))
-
- subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ if self.args.verbose > 3:
+ print('_exec_cmd: command "{}"'.format(cmd))
- def add_args(self, parser):
- super().add_args(parser)
- self.argparser_group = self.argparser.add_argument_group(
- 'netns',
- 'options for nsPlugin(run commands in net namespace)')
- self.argparser_group.add_argument(
- '-N', '--no-namespace', action='store_false', default=True,
- dest='namespace', help='Don\'t run commands in namespace')
- return self.argparser
+ subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def adjust_command(self, stage, command):
super().adjust_command(stage, command)
cmdform = 'list'
cmdlist = list()
- if not self.args.namespace:
- return command
-
if self.args.verbose:
print('{}.adjust_command'.format(self.sub_class))
@@ -138,63 +115,90 @@ class SubPlugin(TdcPlugin):
print('adjust_command: return command [{}]'.format(command))
return command
- def _ports_create_cmds(self):
- cmds = []
-
- cmds.append(self._replace_keywords('link add $DEV0 type veth peer name $DEV1'))
- cmds.append(self._replace_keywords('link set $DEV0 up'))
- cmds.append(self._replace_keywords('link add $DUMMY type dummy'))
- if not self.args.namespace:
- cmds.append(self._replace_keywords('link set $DEV1 up'))
-
- return cmds
-
- def _ports_create(self):
- self._exec_cmd_batched('pre', self._ports_create_cmds())
+ def _nl_ns_create(self):
+ ns = self.args.NAMES["NS"];
+ dev0 = self.args.NAMES["DEV0"];
+ dev1 = self.args.NAMES["DEV1"];
+ dummy = self.args.NAMES["DUMMY"];
- def _ports_destroy_cmd(self):
- return self._replace_keywords('link del $DEV0')
-
- def _ports_destroy(self):
- self._exec_cmd('post', self._ports_destroy_cmd())
-
- def _ns_create_cmds(self):
+ if self.args.verbose:
+ print('{}._nl_ns_create'.format(self.sub_class))
+
+ netns.create(ns)
+ netns.pushns(newns=ns)
+ with IPRoute() as ip:
+ ip.link('add', ifname=dev1, kind='veth', peer={'ifname': dev0, 'net_ns_fd':'/proc/1/ns/net'})
+ ip.link('add', ifname=dummy, kind='dummy')
+ ticks = 20
+ while True:
+ if ticks == 0:
+ raise TimeoutError
+ try:
+ dev1_idx = ip.link_lookup(ifname=dev1)[0]
+ dummy_idx = ip.link_lookup(ifname=dummy)[0]
+ ip.link('set', index=dev1_idx, state='up')
+ ip.link('set', index=dummy_idx, state='up')
+ break
+ except:
+ time.sleep(0.1)
+ ticks -= 1
+ continue
+ netns.popns()
+
+ with IPRoute() as ip:
+ ticks = 20
+ while True:
+ if ticks == 0:
+ raise TimeoutError
+ try:
+ dev0_idx = ip.link_lookup(ifname=dev0)[0]
+ ip.link('set', index=dev0_idx, state='up')
+ break
+ except:
+ time.sleep(0.1)
+ ticks -= 1
+ continue
+
+ def _ipr2_ns_create_cmds(self):
cmds = []
- if self.args.namespace:
- ns = self.args.NAMES['NS']
+ ns = self.args.NAMES['NS']
- cmds.append(self._replace_keywords('netns add {}'.format(ns)))
- cmds.append(self._replace_keywords('link set $DEV1 netns {}'.format(ns)))
- cmds.append(self._replace_keywords('link set $DUMMY netns {}'.format(ns)))
- cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV1 up'.format(ns)))
- cmds.append(self._replace_keywords('netns exec {} $IP link set $DUMMY up'.format(ns)))
+ cmds.append(self._replace_keywords('netns add {}'.format(ns)))
+ cmds.append(self._replace_keywords('link add $DEV1 type veth peer name $DEV0'))
+ cmds.append(self._replace_keywords('link set $DEV1 netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('link add $DUMMY type dummy'.format(ns)))
+ cmds.append(self._replace_keywords('link set $DUMMY netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV1 up'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DUMMY up'.format(ns)))
+ cmds.append(self._replace_keywords('link set $DEV0 up'.format(ns)))
- if self.args.device:
- cmds.append(self._replace_keywords('link set $DEV2 netns {}'.format(ns)))
- cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV2 up'.format(ns)))
+ if self.args.device:
+ cmds.append(self._replace_keywords('link set $DEV2 netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV2 up'.format(ns)))
return cmds
- def _ns_create(self):
+ def _ipr2_ns_create(self):
'''
Create the network namespace in which the tests will be run and set up
the required network devices for it.
'''
- self._ports_create()
- self._exec_cmd_batched('pre', self._ns_create_cmds())
+ self._exec_cmd_batched('pre', self._ipr2_ns_create_cmds())
+
+ def _nl_ns_destroy(self):
+ ns = self.args.NAMES['NS']
+ netns.remove(ns)
- def _ns_destroy_cmd(self):
+ def _ipr2_ns_destroy_cmd(self):
return self._replace_keywords('netns delete {}'.format(self.args.NAMES['NS']))
- def _ns_destroy(self):
+ def _ipr2_ns_destroy(self):
'''
Destroy the network namespace for testing (and any associated network
devices as well)
'''
- if self.args.namespace:
- self._exec_cmd('post', self._ns_destroy_cmd())
- self._ports_destroy()
+ self._exec_cmd('post', self._ipr2_ns_destroy_cmd())
@cached_property
def _proc(self):
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 91832400ddbd..6e00bf32ef9a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -54,9 +54,6 @@
"actions",
"bpf"
],
- "plugins": {
- "requires": "buildebpfPlugin"
- },
"setup": [
[
"$TC action flush action bpf",
@@ -65,10 +62,10 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action-ebpf section action-ok index 667",
"expExitCode": "0",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action-ebpf:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
"matchCount": "1",
"teardown": [
"$TC action flush action bpf"
@@ -81,9 +78,6 @@
"actions",
"bpf"
],
- "plugins": {
- "requires": "buildebpfPlugin"
- },
"setup": [
[
"$TC action flush action bpf",
@@ -92,10 +86,10 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action-ebpf section action-ko index 667",
"expExitCode": "255",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action-ebpf:\\[action-ko\\] id [0-9].*index 667 ref",
"matchCount": "0",
"teardown": [
[
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
index 013fb983bc3f..725d406a30ac 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
@@ -52,17 +52,16 @@
],
"plugins": {
"requires": [
- "buildebpfPlugin",
"nsPlugin"
]
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf object-file $EBPFDIR/action.o section action-ok",
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf object-file $EBPFDIR/action-ebpf section action-ok",
"expExitCode": "0",
"verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf",
- "matchPattern": "filter parent ffff: protocol ip pref 100 bpf chain [0-9]+ handle 0x1 action.o:\\[action-ok\\].*tag [0-9a-f]{16}( jited)?",
+ "matchPattern": "filter parent ffff: protocol ip pref 100 bpf chain [0-9]+ handle 0x1 action-ebpf:\\[action-ok\\].*tag [0-9a-f]{16}( jited)?",
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
@@ -77,17 +76,16 @@
],
"plugins": {
"requires": [
- "buildebpfPlugin",
"nsPlugin"
]
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf object-file $EBPFDIR/action.o section action-ko",
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf object-file $EBPFDIR/action-ebpf section action-ko",
"expExitCode": "1",
"verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 100 bpf",
- "matchPattern": "filter parent ffff: protocol ip pref 100 bpf chain [0-9]+ handle 0x1 action.o:\\[action-ko\\].*tag [0-9a-f]{16}( jited)?",
+ "matchPattern": "filter parent ffff: protocol ip pref 100 bpf chain [0-9]+ handle 0x1 action-ebpf:\\[action-ko\\].*tag [0-9a-f]{16}( jited)?",
"matchCount": "0",
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
index ddc7c355be0a..24bd0c2a3014 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
@@ -272,5 +272,62 @@
"teardown": [
"$TC qdisc del dev $DEV1 parent root drr"
]
+ },
+ {
+ "id": "bd32",
+ "name": "Try to delete hashtable referenced by another u32 filter",
+ "category": [
+ "filter",
+ "u32"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 handle 1: u32 divisor 1",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 800: match ip src any link 1:"
+ ],
+ "cmdUnderTest": "$TC filter delete dev $DEV1 parent 10: prio 2 handle 1: u32",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter show dev $DEV1",
+ "matchPattern": "protocol ip pref 2 u32 chain 0 fh 1:",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
+ },
+ {
+ "id": "4585",
+ "name": "Delete small tree of u32 hashtables and filters",
+ "category": [
+ "filter",
+ "u32"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 handle 1: u32 divisor 1",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 handle 2: u32 divisor 1",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 handle 3: u32 divisor 2",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 handle 4: u32 divisor 1",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 1: match ip src any action drop",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 2: match ip src any action drop",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 3: match ip src any link 2:",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 3: match ip src any link 1:",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 4: match ip src any action drop",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 800: match ip src any link 3:",
+ "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 ht 800: match ip src any link 4:"
+ ],
+ "cmdUnderTest": "$TC filter delete dev $DEV1 parent 10:",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1",
+ "matchPattern": "protocol ip pref 2 u32",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index a6718192aff3..caeacc691587 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -497,11 +497,6 @@ def prepare_run(pm, args, testlist):
pm.call_post_suite(1)
return emergency_exit_message
- if args.verbose:
- print('give test rig 2 seconds to stabilize')
-
- time.sleep(2)
-
def purge_run(pm, index):
pm.call_post_suite(index)
@@ -616,7 +611,7 @@ def test_runner_mp(pm, args, alltests):
batches.insert(0, serial)
print("Executing {} tests in parallel and {} in serial".format(len(parallel), len(serial)))
- print("Using {} batches".format(len(batches)))
+ print("Using {} batches and {} workers".format(len(batches), args.mp))
# We can't pickle these objects so workaround them
global mp_pm
@@ -1017,12 +1012,17 @@ def main():
parser = pm.call_add_args(parser)
(args, remaining) = parser.parse_known_args()
args.NAMES = NAMES
+ args.mp = min(args.mp, 4)
pm.set_args(args)
check_default_settings(args, remaining, pm)
if args.verbose > 2:
print('args is {}'.format(args))
- set_operation_mode(pm, parser, args, remaining)
+ try:
+ set_operation_mode(pm, parser, args, remaining)
+ except KeyboardInterrupt:
+ # Cleanup on Ctrl-C
+ pm.call_post_suite(None)
if __name__ == "__main__":
main()
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index eb357bd7923c..407fa53822a0 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -1,7 +1,68 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-modprobe netdevsim
-modprobe sch_teql
-./tdc.py -c actions --nobuildebpf
-./tdc.py -c qdisc
+# If a module is required and was not compiled
+# the test that requires it will fail anyways
+try_modprobe() {
+ modprobe -q -R "$1"
+ if [ $? -ne 0 ]; then
+ echo "Module $1 not found... skipping."
+ else
+ modprobe "$1"
+ fi
+}
+
+try_modprobe netdevsim
+try_modprobe act_bpf
+try_modprobe act_connmark
+try_modprobe act_csum
+try_modprobe act_ct
+try_modprobe act_ctinfo
+try_modprobe act_gact
+try_modprobe act_gate
+try_modprobe act_ipt
+try_modprobe act_mirred
+try_modprobe act_mpls
+try_modprobe act_nat
+try_modprobe act_pedit
+try_modprobe act_police
+try_modprobe act_sample
+try_modprobe act_simple
+try_modprobe act_skbedit
+try_modprobe act_skbmod
+try_modprobe act_tunnel_key
+try_modprobe act_vlan
+try_modprobe cls_basic
+try_modprobe cls_bpf
+try_modprobe cls_cgroup
+try_modprobe cls_flow
+try_modprobe cls_flower
+try_modprobe cls_fw
+try_modprobe cls_matchall
+try_modprobe cls_route
+try_modprobe cls_u32
+try_modprobe em_canid
+try_modprobe em_cmp
+try_modprobe em_ipset
+try_modprobe em_ipt
+try_modprobe em_meta
+try_modprobe em_nbyte
+try_modprobe em_text
+try_modprobe em_u32
+try_modprobe sch_cake
+try_modprobe sch_cbs
+try_modprobe sch_choke
+try_modprobe sch_codel
+try_modprobe sch_drr
+try_modprobe sch_etf
+try_modprobe sch_ets
+try_modprobe sch_fq
+try_modprobe sch_fq_codel
+try_modprobe sch_fq_pie
+try_modprobe sch_gred
+try_modprobe sch_hfsc
+try_modprobe sch_hhf
+try_modprobe sch_htb
+try_modprobe sch_teql
+./tdc.py -J`nproc` -c actions
+./tdc.py -J`nproc` -c qdisc