summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch6412
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x26
-rw-r--r--tools/testing/selftests/bpf/Makefile56
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h346
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h14
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c11
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h2
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c38
-rw-r--r--tools/testing/selftests/bpf/config3
-rw-r--r--tools/testing/selftests/bpf/config.x86_642
-rw-r--r--tools/testing/selftests/bpf/liburandom_read.map15
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c4
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c20
-rw-r--r--tools/testing/selftests/bpf/netlink_helpers.c358
-rw-r--r--tools/testing/selftests/bpf/netlink_helpers.h46
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c34
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c241
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_ping.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exceptions.c409
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c83
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c209
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/missed.c138
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/percpu_alloc.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/section_names.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_addr.c612
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c189
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c113
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c353
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_helpers.h20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_links.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_netkit.c687
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c484
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c317
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c (renamed from tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tasks.c (renamed from tools/testing/selftests/bpf/progs/bpf_iter_task.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop_bench.c13
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h3
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c1
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c40
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions.c368
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c135
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_ext.c72
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c349
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername_unix_prog.c39
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname_unix_prog.c39
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c695
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css.c72
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css_task.c102
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task.c41
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_failure.c105
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_vma.c44
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_override.c13
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe.c30
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c48
-rw-r--r--tools/testing/selftests/bpf/progs/missed_tp_recursion.c41
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c190
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c109
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_fail.c164
-rw-r--r--tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c106
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h2
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c39
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c40
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h78
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_poke.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_ma.c291
-rw-r--r--tools/testing/selftests/bpf/progs/test_ldsx_insn.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_under_cgroup.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_link.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c4
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c63
-rw-r--r--tools/testing/selftests/bpf/progs/timer_failure.c47
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bswap.c5
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cfg.c62
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotol.c5
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c242
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c153
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c9
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c5
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c133
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c5
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c86
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c88
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c22
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py9
-rw-r--r--tools/testing/selftests/bpf/test_loader.c4
-rw-r--r--tools/testing/selftests/bpf/test_progs.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.h2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh40
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c134
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h8
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c33
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c15
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib1.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c32
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c8
-rw-r--r--tools/testing/selftests/bpf/xdp_features.c4
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c80
-rw-r--r--tools/testing/selftests/bpf/xsk.c3
-rw-r--r--tools/testing/selftests/bpf/xsk.h2
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh10
-rw-r--r--tools/testing/selftests/bpf/xsk_xdp_common.h12
-rw-r--r--tools/testing/selftests/bpf/xsk_xdp_metadata.h5
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c1037
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h57
141 files changed, 10337 insertions, 1311 deletions
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 7f768d335698..5c2cc7e8c5d0 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,17 +1,13 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
-kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3
+kprobe_multi_bench_attach # needs CONFIG_FPROBE
+kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 5061d9e24c16..1a63996c0304 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,29 +1,5 @@
# TEMPORARY
# Alphabetical order
-bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
-bpf_cookie # failed to open_and_load program: -524 (trampoline)
-bpf_loop # attaches to __x64_sys_nanosleep
-cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
-dynptr/test_dynptr_skb_data
-dynptr/test_skb_readonly
-fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
+exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
-iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
-kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test # relies on fentry
-ksyms_btf/weak_ksyms* # test_ksyms_weak__open_and_load unexpected error: -22 (kfunc)
-ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
-ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
-ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
-module_attach # skel_attach skeleton attach failed: -524 (trampoline)
-ringbuf # skel_load skeleton load failed (?)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
-test_lsm # attach unexpected error: -524 (trampoline)
-trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
-trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
-unpriv_bpf_disabled # fentry
-user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
-verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
-xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-xdp_metadata # JIT does not support calling kernel function (kfunc)
-test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index caede9b574cb..9c27b67bc7b1 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -27,7 +27,11 @@ endif
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
SAN_LDFLAGS ?= $(SAN_CFLAGS)
-CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \
+RELEASE ?=
+OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0)
+CFLAGS += -g $(OPT_FLAGS) -rdynamic \
+ -Wall -Werror \
+ $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
LDFLAGS += $(SAN_LDFLAGS)
@@ -104,7 +108,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
xdp_features
-TEST_GEN_FILES += liburandom_read.so urandom_read sign-file
+TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
# Emit succinct information message describing current building step
# $1 - generic step name (e.g., CC, LINK, etc);
@@ -188,7 +192,7 @@ $(OUTPUT)/%:%.c
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
# LLVM's ld.lld doesn't support all the architectures, so use it only on x86
-ifeq ($(SRCARCH),x86)
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv))
LLD := lld
else
LLD := ld
@@ -196,17 +200,20 @@ endif
# Filter out -static for liburandom_read.so and its dependent targets so that static builds
# do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
-$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
+$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c liburandom_read.map
$(call msg,LIB,,$@)
- $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \
- $^ $(filter-out -static,$(LDLIBS)) \
+ $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \
+ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \
+ $(filter %.c,$^) $(filter-out -static,$(LDLIBS)) \
-fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ -Wl,--version-script=liburandom_read.map \
-fPIC -shared -o $@
$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
- $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
- -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \
+ $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \
+ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
+ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \
-fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
-Wl,-rpath=. -o $@
@@ -238,7 +245,7 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \
- EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \
EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \
cp $(RUNQSLOWER_OUTPUT)runqslower $@
@@ -276,7 +283,7 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \
- EXTRA_CFLAGS='-g -O0' \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS)' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
@@ -287,7 +294,7 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(BPFOBJ) | $(BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \
- EXTRA_CFLAGS='-g -O0' \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS)' \
OUTPUT=$(BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
@@ -310,7 +317,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
- EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \
EXTRA_LDFLAGS='$(SAN_LDFLAGS)' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
@@ -319,7 +326,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
- EXTRA_CFLAGS='-g -O0' ARCH= CROSS_COMPILE= \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS)' ARCH= CROSS_COMPILE= \
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
CC="$(HOSTCC)" LD="$(HOSTLD)" \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
@@ -578,11 +585,20 @@ endef
# Define test_progs test runner.
TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
-TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
- network_helpers.c testing_helpers.c \
- btf_helpers.c flow_dissector_load.h \
- cap_helpers.c test_loader.c xsk.c disasm.c \
- json_writer.c unpriv_helpers.c \
+TRUNNER_EXTRA_SOURCES := test_progs.c \
+ cgroup_helpers.c \
+ trace_helpers.c \
+ network_helpers.c \
+ testing_helpers.c \
+ btf_helpers.c \
+ cap_helpers.c \
+ unpriv_helpers.c \
+ netlink_helpers.c \
+ test_loader.c \
+ xsk.c \
+ disasm.c \
+ json_writer.c \
+ flow_dissector_load.h \
ip_check_defrag_frags.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/liburandom_read.so \
@@ -640,7 +656,9 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
-$(OUTPUT)/xskxceiver: xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
+# Include find_bit.c to compile xskxceiver.
+EXTRA_SRC := $(TOOLSDIR)/lib/find_bit.c
+$(OUTPUT)/xskxceiver: $(EXTRA_SRC) xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 209811b1993a..1386baf9ae4a 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -131,4 +131,350 @@ extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *nod
*/
extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
+/* Description
+ * Allocates a percpu object of the type represented by 'local_type_id' in
+ * program BTF. User may use the bpf_core_type_id_local macro to pass the
+ * type ID of a struct in program BTF.
+ *
+ * The 'local_type_id' parameter must be a known constant.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * A pointer to a percpu object of the type corresponding to the passed in
+ * 'local_type_id', or NULL on failure.
+ */
+extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
+#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
+
+/* Description
+ * Free an allocated percpu object. All fields of the object that require
+ * destruction will be destructed before the storage is freed.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * Void.
+ */
+extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+struct bpf_iter_task_vma;
+
+extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
+ struct task_struct *task,
+ unsigned long addr) __ksym;
+extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
+extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_drop_impl */
+#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
+
+/* Description
+ * Throw a BPF exception from the program, immediately terminating its
+ * execution and unwinding the stack. The supplied 'cookie' parameter
+ * will be the return value of the program when an exception is thrown,
+ * and the default exception callback is used. Otherwise, if an exception
+ * callback is set using the '__exception_cb(callback)' declaration tag
+ * on the main program, the 'cookie' parameter will be the callback's only
+ * input argument.
+ *
+ * Thus, in case of default exception callback, 'cookie' is subjected to
+ * constraints on the program's return value (as with R0 on exit).
+ * Otherwise, the return value of the marked exception callback will be
+ * subjected to the same checks.
+ *
+ * Note that throwing an exception with lingering resources (locks,
+ * references, etc.) will lead to a verification error.
+ *
+ * Note that callbacks *cannot* call this helper.
+ * Returns
+ * Never.
+ * Throws
+ * An exception with the specified 'cookie' value.
+ */
+extern void bpf_throw(u64 cookie) __ksym;
+
+/* This macro must be used to mark the exception callback corresponding to the
+ * main program. For example:
+ *
+ * int exception_cb(u64 cookie) {
+ * return cookie;
+ * }
+ *
+ * SEC("tc")
+ * __exception_cb(exception_cb)
+ * int main_prog(struct __sk_buff *ctx) {
+ * ...
+ * return TC_ACT_OK;
+ * }
+ *
+ * Here, exception callback for the main program will be 'exception_cb'. Note
+ * that this attribute can only be used once, and multiple exception callbacks
+ * specified for the main program will lead to verification error.
+ */
+#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
+
+#define __bpf_assert_signed(x) _Generic((x), \
+ unsigned long: 0, \
+ unsigned long long: 0, \
+ signed long: 1, \
+ signed long long: 1 \
+)
+
+#define __bpf_assert_check(LHS, op, RHS) \
+ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
+ _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
+ _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
+ _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
+
+#define __bpf_assert(LHS, op, cons, RHS, VAL) \
+ ({ \
+ (void)bpf_throw; \
+ asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
+ : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
+ })
+
+#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
+ ({ \
+ __bpf_assert_check(LHS, op, RHS); \
+ if (__bpf_assert_signed(LHS) && !(supp_sign)) \
+ __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
+ else \
+ __bpf_assert(LHS, #op, cons, RHS, VAL); \
+ })
+
+#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
+ ({ \
+ if (sizeof(typeof(RHS)) == 8) { \
+ const typeof(RHS) rhs_var = (RHS); \
+ __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
+ } else { \
+ __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
+ } \
+ })
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
+
+/* Description
+ * Assert that LHS is equal to RHS. This statement updates the known value
+ * of LHS during verification. Note that RHS must be a constant value, and
+ * must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_eq(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, ==, RHS, 0, true); \
+ })
+
+/* Description
+ * Assert that LHS is equal to RHS. This statement updates the known value
+ * of LHS during verification. Note that RHS must be a constant value, and
+ * must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_eq_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, ==, RHS, value, true); \
+ })
+
+/* Description
+ * Assert that LHS is less than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_lt(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_lt_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_gt(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_gt_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than or equal to RHS. This statement updates the
+ * known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_le(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <=, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than or equal to RHS. This statement updates the
+ * known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_le_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <=, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than or equal to RHS. This statement updates
+ * the known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_ge(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than or equal to RHS. This statement updates
+ * the known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_ge_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_range(LHS, BEG, END) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, 0, false); \
+ __bpf_assert_op(LHS, <=, END, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_range_with(LHS, BEG, END, value) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, value, false); \
+ __bpf_assert_op(LHS, <=, END, value, false); \
+ })
+
+struct bpf_iter_css_task;
+struct cgroup_subsys_state;
+extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
+ struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
+extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
+extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
+
+struct bpf_iter_task;
+extern int bpf_iter_task_new(struct bpf_iter_task *it,
+ struct task_struct *task, unsigned int flags) __weak __ksym;
+extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
+extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
+
+struct bpf_iter_css;
+extern int bpf_iter_css_new(struct bpf_iter_css *it,
+ struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
+extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
+extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index 642dda0e758a..5ca68ff0b59f 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -1,6 +1,8 @@
#ifndef __BPF_KFUNCS__
#define __BPF_KFUNCS__
+struct bpf_sock_addr_kern;
+
/* Description
* Initializes an skb-type dynptr
* Returns
@@ -41,4 +43,16 @@ extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
+/* Description
+ * Modify the address of a AF_UNIX sockaddr.
+ * Returns__bpf_kfunc
+ * -EINVAL if the address size is too big or, 0 if the sockaddr was successfully modified.
+ */
+extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
+ const __u8 *sun_path, __u32 sun_path__sz) __ksym;
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+
+void *bpf_rdonly_cast(void *obj, __u32 btf_id) __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index cefc5dd72573..91907b321f91 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -39,9 +39,7 @@ struct bpf_testmod_struct_arg_4 {
int b;
};
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in bpf_testmod.ko BTF");
+__bpf_hook_start();
noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
@@ -138,6 +136,10 @@ __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
it->cnt = 0;
}
+__bpf_kfunc void bpf_kfunc_common_test(void)
+{
+}
+
struct bpf_testmod_btf_type_tag_1 {
int a;
};
@@ -331,7 +333,7 @@ noinline int bpf_fentry_shadow_test(int a)
}
EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
-__diag_pop();
+__bpf_hook_end();
static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.attr = { .name = "bpf_testmod", .mode = 0666, },
@@ -343,6 +345,7 @@ BTF_SET8_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_kfunc_common_test)
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index f5c5b1375c24..7c664dd61059 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -104,4 +104,6 @@ void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p);
void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p);
void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
+
+void bpf_kfunc_common_test(void) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 2caee8423ee0..5b1da2a32ea7 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -49,6 +49,10 @@
snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR)
+static __thread bool cgroup_workdir_mounted;
+
+static void __cleanup_cgroup_environment(void);
+
static int __enable_controllers(const char *cgroup_path, const char *controllers)
{
char path[PATH_MAX + 1];
@@ -195,6 +199,11 @@ int setup_cgroup_environment(void)
format_cgroup_path(cgroup_workdir, "");
+ if (mkdir(CGROUP_MOUNT_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir mount");
+ return 1;
+ }
+
if (unshare(CLONE_NEWNS)) {
log_err("unshare");
return 1;
@@ -209,9 +218,10 @@ int setup_cgroup_environment(void)
log_err("mount cgroup2");
return 1;
}
+ cgroup_workdir_mounted = true;
/* Cleanup existing failed runs, now that the environment is setup */
- cleanup_cgroup_environment();
+ __cleanup_cgroup_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
@@ -306,10 +316,25 @@ int join_parent_cgroup(const char *relative_path)
}
/**
+ * __cleanup_cgroup_environment() - Delete temporary cgroups
+ *
+ * This is a helper for cleanup_cgroup_environment() that is responsible for
+ * deletion of all temporary cgroups that have been created during the test.
+ */
+static void __cleanup_cgroup_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_workdir, "");
+ join_cgroup_from_top(CGROUP_MOUNT_PATH);
+ nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
+
+/**
* cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment
*
* This is an idempotent function to delete all temporary cgroups that
- * have been created during the test, including the cgroup testing work
+ * have been created during the test and unmount the cgroup testing work
* directory.
*
* At call time, it moves the calling process to the root cgroup, and then
@@ -320,11 +345,10 @@ int join_parent_cgroup(const char *relative_path)
*/
void cleanup_cgroup_environment(void)
{
- char cgroup_workdir[PATH_MAX + 1];
-
- format_cgroup_path(cgroup_workdir, "");
- join_cgroup_from_top(CGROUP_MOUNT_PATH);
- nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+ __cleanup_cgroup_environment();
+ if (cgroup_workdir_mounted && umount(CGROUP_MOUNT_PATH))
+ log_err("umount cgroup2");
+ cgroup_workdir_mounted = false;
}
/**
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 1c7584e8dd9e..3ec5927ec3e5 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -4,6 +4,7 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BPF=y
CONFIG_BPF_EVENTS=y
CONFIG_BPF_JIT=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_BPF_LIRC_MODE2=y
CONFIG_BPF_LSM=y
CONFIG_BPF_STREAM_PARSER=y
@@ -70,6 +71,7 @@ CONFIG_NETFILTER_SYNPROXY=y
CONFIG_NETFILTER_XT_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETKIT=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_DEFRAG_IPV4=y
@@ -83,3 +85,4 @@ CONFIG_USERFAULTFD=y
CONFIG_VXLAN=y
CONFIG_XDP_SOCKETS=y
CONFIG_XFRM_INTERFACE=y
+CONFIG_VSOCKETS=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index b650b2e617b8..49a29dbc1910 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -20,7 +20,6 @@ CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BONDING=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_BPF_PRELOAD=y
CONFIG_BPF_PRELOAD_UMD=y
CONFIG_BPFILTER=y
@@ -51,7 +50,6 @@ CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_CREDENTIALS=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/tools/testing/selftests/bpf/liburandom_read.map b/tools/testing/selftests/bpf/liburandom_read.map
new file mode 100644
index 000000000000..38a97a419a04
--- /dev/null
+++ b/tools/testing/selftests/bpf/liburandom_read.map
@@ -0,0 +1,15 @@
+LIBURANDOM_READ_1.0.0 {
+ global:
+ urandlib_api;
+ urandlib_api_sameoffset;
+ urandlib_read_without_sema;
+ urandlib_read_with_sema;
+ urandlib_read_with_sema_semaphore;
+ local:
+ *;
+};
+
+LIBURANDOM_READ_2.0.0 {
+ global:
+ urandlib_api;
+} LIBURANDOM_READ_1.0.0;
diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
index 16f1671e4bde..66191ae9863c 100644
--- a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
@@ -33,11 +33,11 @@ static void create_inner_maps(enum bpf_map_type map_type,
{
int map_fd, map_index, ret;
__u32 map_key = 0, map_id;
- char map_name[15];
+ char map_name[16];
for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) {
memset(map_name, 0, sizeof(map_name));
- sprintf(map_name, "inner_map_fd_%d", map_index);
+ snprintf(map_name, sizeof(map_name), "inner_map_fd_%d", map_index);
map_fd = bpf_map_create(map_type, map_name, sizeof(__u32),
sizeof(__u32), 1, NULL);
CHECK(map_fd < 0,
diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
index 1a9eeefda9a8..8bf497a9843e 100644
--- a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
@@ -326,20 +326,14 @@ static int map_create(__u32 type, const char *name, struct bpf_map_create_opts *
static int create_hash(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = BPF_F_NO_PREALLOC,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts);
}
static int create_percpu_hash(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = BPF_F_NO_PREALLOC,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts);
}
@@ -356,21 +350,17 @@ static int create_percpu_hash_prealloc(void)
static int create_lru_hash(__u32 type, __u32 map_flags)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = map_flags,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = map_flags);
return map_create(type, "lru_hash", &map_opts);
}
static int create_hash_of_maps(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
.map_flags = BPF_F_NO_PREALLOC,
.inner_map_fd = create_small_hash(),
- };
+ );
int ret;
ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
diff --git a/tools/testing/selftests/bpf/netlink_helpers.c b/tools/testing/selftests/bpf/netlink_helpers.c
new file mode 100644
index 000000000000..caf36eb1d032
--- /dev/null
+++ b/tools/testing/selftests/bpf/netlink_helpers.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Taken & modified from iproute2's libnetlink.c
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/socket.h>
+
+#include "netlink_helpers.h"
+
+static int rcvbuf = 1024 * 1024;
+
+void rtnl_close(struct rtnl_handle *rth)
+{
+ if (rth->fd >= 0) {
+ close(rth->fd);
+ rth->fd = -1;
+ }
+}
+
+int rtnl_open_byproto(struct rtnl_handle *rth, unsigned int subscriptions,
+ int protocol)
+{
+ socklen_t addr_len;
+ int sndbuf = 32768;
+ int one = 1;
+
+ memset(rth, 0, sizeof(*rth));
+ rth->proto = protocol;
+ rth->fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
+ if (rth->fd < 0) {
+ perror("Cannot open netlink socket");
+ return -1;
+ }
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf, sizeof(sndbuf)) < 0) {
+ perror("SO_SNDBUF");
+ goto err;
+ }
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF,
+ &rcvbuf, sizeof(rcvbuf)) < 0) {
+ perror("SO_RCVBUF");
+ goto err;
+ }
+
+ /* Older kernels may no support extended ACK reporting */
+ setsockopt(rth->fd, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one));
+
+ memset(&rth->local, 0, sizeof(rth->local));
+ rth->local.nl_family = AF_NETLINK;
+ rth->local.nl_groups = subscriptions;
+
+ if (bind(rth->fd, (struct sockaddr *)&rth->local,
+ sizeof(rth->local)) < 0) {
+ perror("Cannot bind netlink socket");
+ goto err;
+ }
+ addr_len = sizeof(rth->local);
+ if (getsockname(rth->fd, (struct sockaddr *)&rth->local,
+ &addr_len) < 0) {
+ perror("Cannot getsockname");
+ goto err;
+ }
+ if (addr_len != sizeof(rth->local)) {
+ fprintf(stderr, "Wrong address length %d\n", addr_len);
+ goto err;
+ }
+ if (rth->local.nl_family != AF_NETLINK) {
+ fprintf(stderr, "Wrong address family %d\n",
+ rth->local.nl_family);
+ goto err;
+ }
+ rth->seq = time(NULL);
+ return 0;
+err:
+ rtnl_close(rth);
+ return -1;
+}
+
+int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions)
+{
+ return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE);
+}
+
+static int __rtnl_recvmsg(int fd, struct msghdr *msg, int flags)
+{
+ int len;
+
+ do {
+ len = recvmsg(fd, msg, flags);
+ } while (len < 0 && (errno == EINTR || errno == EAGAIN));
+ if (len < 0) {
+ fprintf(stderr, "netlink receive error %s (%d)\n",
+ strerror(errno), errno);
+ return -errno;
+ }
+ if (len == 0) {
+ fprintf(stderr, "EOF on netlink\n");
+ return -ENODATA;
+ }
+ return len;
+}
+
+static int rtnl_recvmsg(int fd, struct msghdr *msg, char **answer)
+{
+ struct iovec *iov = msg->msg_iov;
+ char *buf;
+ int len;
+
+ iov->iov_base = NULL;
+ iov->iov_len = 0;
+
+ len = __rtnl_recvmsg(fd, msg, MSG_PEEK | MSG_TRUNC);
+ if (len < 0)
+ return len;
+ if (len < 32768)
+ len = 32768;
+ buf = malloc(len);
+ if (!buf) {
+ fprintf(stderr, "malloc error: not enough buffer\n");
+ return -ENOMEM;
+ }
+ iov->iov_base = buf;
+ iov->iov_len = len;
+ len = __rtnl_recvmsg(fd, msg, 0);
+ if (len < 0) {
+ free(buf);
+ return len;
+ }
+ if (answer)
+ *answer = buf;
+ else
+ free(buf);
+ return len;
+}
+
+static void rtnl_talk_error(struct nlmsghdr *h, struct nlmsgerr *err,
+ nl_ext_ack_fn_t errfn)
+{
+ fprintf(stderr, "RTNETLINK answers: %s\n",
+ strerror(-err->error));
+}
+
+static int __rtnl_talk_iov(struct rtnl_handle *rtnl, struct iovec *iov,
+ size_t iovlen, struct nlmsghdr **answer,
+ bool show_rtnl_err, nl_ext_ack_fn_t errfn)
+{
+ struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
+ struct iovec riov;
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = iov,
+ .msg_iovlen = iovlen,
+ };
+ unsigned int seq = 0;
+ struct nlmsghdr *h;
+ int i, status;
+ char *buf;
+
+ for (i = 0; i < iovlen; i++) {
+ h = iov[i].iov_base;
+ h->nlmsg_seq = seq = ++rtnl->seq;
+ if (answer == NULL)
+ h->nlmsg_flags |= NLM_F_ACK;
+ }
+ status = sendmsg(rtnl->fd, &msg, 0);
+ if (status < 0) {
+ perror("Cannot talk to rtnetlink");
+ return -1;
+ }
+ /* change msg to use the response iov */
+ msg.msg_iov = &riov;
+ msg.msg_iovlen = 1;
+ i = 0;
+ while (1) {
+next:
+ status = rtnl_recvmsg(rtnl->fd, &msg, &buf);
+ ++i;
+ if (status < 0)
+ return status;
+ if (msg.msg_namelen != sizeof(nladdr)) {
+ fprintf(stderr,
+ "Sender address length == %d!\n",
+ msg.msg_namelen);
+ exit(1);
+ }
+ for (h = (struct nlmsghdr *)buf; status >= sizeof(*h); ) {
+ int len = h->nlmsg_len;
+ int l = len - sizeof(*h);
+
+ if (l < 0 || len > status) {
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Truncated message!\n");
+ free(buf);
+ return -1;
+ }
+ fprintf(stderr,
+ "Malformed message: len=%d!\n",
+ len);
+ exit(1);
+ }
+ if (nladdr.nl_pid != 0 ||
+ h->nlmsg_pid != rtnl->local.nl_pid ||
+ h->nlmsg_seq > seq || h->nlmsg_seq < seq - iovlen) {
+ /* Don't forget to skip that message. */
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ continue;
+ }
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h);
+ int error = err->error;
+
+ if (l < sizeof(struct nlmsgerr)) {
+ fprintf(stderr, "ERROR truncated\n");
+ free(buf);
+ return -1;
+ }
+ if (error) {
+ errno = -error;
+ if (rtnl->proto != NETLINK_SOCK_DIAG &&
+ show_rtnl_err)
+ rtnl_talk_error(h, err, errfn);
+ }
+ if (i < iovlen) {
+ free(buf);
+ goto next;
+ }
+ if (error) {
+ free(buf);
+ return -i;
+ }
+ if (answer)
+ *answer = (struct nlmsghdr *)buf;
+ else
+ free(buf);
+ return 0;
+ }
+ if (answer) {
+ *answer = (struct nlmsghdr *)buf;
+ return 0;
+ }
+ fprintf(stderr, "Unexpected reply!\n");
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ }
+ free(buf);
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Message truncated!\n");
+ continue;
+ }
+ if (status) {
+ fprintf(stderr, "Remnant of size %d!\n", status);
+ exit(1);
+ }
+ }
+}
+
+static int __rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer, bool show_rtnl_err,
+ nl_ext_ack_fn_t errfn)
+{
+ struct iovec iov = {
+ .iov_base = n,
+ .iov_len = n->nlmsg_len,
+ };
+
+ return __rtnl_talk_iov(rtnl, &iov, 1, answer, show_rtnl_err, errfn);
+}
+
+int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer)
+{
+ return __rtnl_talk(rtnl, n, answer, true, NULL);
+}
+
+int addattr(struct nlmsghdr *n, int maxlen, int type)
+{
+ return addattr_l(n, maxlen, type, NULL, 0);
+}
+
+int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u8));
+}
+
+int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u16));
+}
+
+int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u32));
+}
+
+int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u64));
+}
+
+int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *str)
+{
+ return addattr_l(n, maxlen, type, str, strlen(str)+1);
+}
+
+int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data,
+ int alen)
+{
+ int len = RTA_LENGTH(alen);
+ struct rtattr *rta;
+
+ if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) {
+ fprintf(stderr, "%s: Message exceeded bound of %d\n",
+ __func__, maxlen);
+ return -1;
+ }
+ rta = NLMSG_TAIL(n);
+ rta->rta_type = type;
+ rta->rta_len = len;
+ if (alen)
+ memcpy(RTA_DATA(rta), data, alen);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len);
+ return 0;
+}
+
+int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len)
+{
+ if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) {
+ fprintf(stderr, "%s: Message exceeded bound of %d\n",
+ __func__, maxlen);
+ return -1;
+ }
+
+ memcpy(NLMSG_TAIL(n), data, len);
+ memset((void *) NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len);
+ return 0;
+}
+
+struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type)
+{
+ struct rtattr *nest = NLMSG_TAIL(n);
+
+ addattr_l(n, maxlen, type, NULL, 0);
+ return nest;
+}
+
+int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest)
+{
+ nest->rta_len = (void *)NLMSG_TAIL(n) - (void *)nest;
+ return n->nlmsg_len;
+}
diff --git a/tools/testing/selftests/bpf/netlink_helpers.h b/tools/testing/selftests/bpf/netlink_helpers.h
new file mode 100644
index 000000000000..68116818a47e
--- /dev/null
+++ b/tools/testing/selftests/bpf/netlink_helpers.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef NETLINK_HELPERS_H
+#define NETLINK_HELPERS_H
+
+#include <string.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
+struct rtnl_handle {
+ int fd;
+ struct sockaddr_nl local;
+ struct sockaddr_nl peer;
+ __u32 seq;
+ __u32 dump;
+ int proto;
+ FILE *dump_fp;
+#define RTNL_HANDLE_F_LISTEN_ALL_NSID 0x01
+#define RTNL_HANDLE_F_SUPPRESS_NLERR 0x02
+#define RTNL_HANDLE_F_STRICT_CHK 0x04
+ int flags;
+};
+
+#define NLMSG_TAIL(nmsg) \
+ ((struct rtattr *) (((void *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
+
+typedef int (*nl_ext_ack_fn_t)(const char *errmsg, uint32_t off,
+ const struct nlmsghdr *inner_nlh);
+
+int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions)
+ __attribute__((warn_unused_result));
+void rtnl_close(struct rtnl_handle *rth);
+int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer)
+ __attribute__((warn_unused_result));
+
+int addattr(struct nlmsghdr *n, int maxlen, int type);
+int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data);
+int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data);
+int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data);
+int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data);
+int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *data);
+int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen);
+int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len);
+struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type);
+int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest);
+#endif /* NETLINK_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index da72a3a66230..6db27a9088e9 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -11,6 +11,7 @@
#include <arpa/inet.h>
#include <sys/mount.h>
#include <sys/stat.h>
+#include <sys/un.h>
#include <linux/err.h>
#include <linux/in.h>
@@ -257,6 +258,26 @@ static int connect_fd_to_addr(int fd,
return 0;
}
+int connect_to_addr(const struct sockaddr_storage *addr, socklen_t addrlen, int type)
+{
+ int fd;
+
+ fd = socket(addr->ss_family, type, 0);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
+ if (connect_fd_to_addr(fd, addr, addrlen, false))
+ goto error_close;
+
+ return fd;
+
+error_close:
+ save_errno_close(fd);
+ return -1;
+}
+
static const struct network_helper_opts default_opts;
int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
@@ -380,6 +401,19 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
if (len)
*len = sizeof(*sin6);
return 0;
+ } else if (family == AF_UNIX) {
+ /* Note that we always use abstract unix sockets to avoid having
+ * to clean up leftover files.
+ */
+ struct sockaddr_un *sun = (void *)addr;
+
+ memset(addr, 0, sizeof(*sun));
+ sun->sun_family = family;
+ sun->sun_path[0] = 0;
+ strcpy(sun->sun_path + 1, addr_str);
+ if (len)
+ *len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(addr_str);
+ return 0;
}
return -1;
}
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 5eccc67d1a99..34f1200a781b 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -51,6 +51,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
__u16 port, int timeout_ms,
unsigned int nr_listens);
void free_fds(int *fds, unsigned int nr_close_fds);
+int connect_to_addr(const struct sockaddr_storage *addr, socklen_t len, int type);
int connect_to_fd(int server_fd, int timeout_ms);
int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index b92770592563..465c1c3a3d3c 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -6,6 +6,7 @@
struct bpf_reg_match {
unsigned int line;
+ const char *reg;
const char *match;
};
@@ -39,13 +40,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(off=0,imm=0)"},
- {0, "R10=fp0"},
- {0, "R3_w=2"},
- {1, "R3_w=4"},
- {2, "R3_w=8"},
- {3, "R3_w=16"},
- {4, "R3_w=32"},
+ {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R10", "fp0"},
+ {0, "R3_w", "2"},
+ {1, "R3_w", "4"},
+ {2, "R3_w", "8"},
+ {3, "R3_w", "16"},
+ {4, "R3_w", "32"},
},
},
{
@@ -67,19 +68,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(off=0,imm=0)"},
- {0, "R10=fp0"},
- {0, "R3_w=1"},
- {1, "R3_w=2"},
- {2, "R3_w=4"},
- {3, "R3_w=8"},
- {4, "R3_w=16"},
- {5, "R3_w=1"},
- {6, "R4_w=32"},
- {7, "R4_w=16"},
- {8, "R4_w=8"},
- {9, "R4_w=4"},
- {10, "R4_w=2"},
+ {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R10", "fp0"},
+ {0, "R3_w", "1"},
+ {1, "R3_w", "2"},
+ {2, "R3_w", "4"},
+ {3, "R3_w", "8"},
+ {4, "R3_w", "16"},
+ {5, "R3_w", "1"},
+ {6, "R4_w", "32"},
+ {7, "R4_w", "16"},
+ {8, "R4_w", "8"},
+ {9, "R4_w", "4"},
+ {10, "R4_w", "2"},
},
},
{
@@ -96,14 +97,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(off=0,imm=0)"},
- {0, "R10=fp0"},
- {0, "R3_w=4"},
- {1, "R3_w=8"},
- {2, "R3_w=10"},
- {3, "R4_w=8"},
- {4, "R4_w=12"},
- {5, "R4_w=14"},
+ {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R10", "fp0"},
+ {0, "R3_w", "4"},
+ {1, "R3_w", "8"},
+ {2, "R3_w", "10"},
+ {3, "R4_w", "8"},
+ {4, "R4_w", "12"},
+ {5, "R4_w", "14"},
},
},
{
@@ -118,12 +119,12 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(off=0,imm=0)"},
- {0, "R10=fp0"},
- {0, "R3_w=7"},
- {1, "R3_w=7"},
- {2, "R3_w=14"},
- {3, "R3_w=56"},
+ {0, "R1", "ctx(off=0,imm=0)"},
+ {0, "R10", "fp0"},
+ {0, "R3_w", "7"},
+ {1, "R3_w", "7"},
+ {2, "R3_w", "14"},
+ {3, "R3_w", "56"},
},
},
@@ -161,19 +162,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R0_w=pkt(off=8,r=8,imm=0)"},
- {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
- {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
- {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
- {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
- {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
- {12, "R3_w=pkt_end(off=0,imm=0)"},
- {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
- {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
- {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
- {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
- {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
- {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
+ {6, "R0_w", "pkt(off=8,r=8,imm=0)"},
+ {6, "R3_w", "var_off=(0x0; 0xff)"},
+ {7, "R3_w", "var_off=(0x0; 0x1fe)"},
+ {8, "R3_w", "var_off=(0x0; 0x3fc)"},
+ {9, "R3_w", "var_off=(0x0; 0x7f8)"},
+ {10, "R3_w", "var_off=(0x0; 0xff0)"},
+ {12, "R3_w", "pkt_end(off=0,imm=0)"},
+ {17, "R4_w", "var_off=(0x0; 0xff)"},
+ {18, "R4_w", "var_off=(0x0; 0x1fe0)"},
+ {19, "R4_w", "var_off=(0x0; 0xff0)"},
+ {20, "R4_w", "var_off=(0x0; 0x7f8)"},
+ {21, "R4_w", "var_off=(0x0; 0x3fc)"},
+ {22, "R4_w", "var_off=(0x0; 0x1fe)"},
},
},
{
@@ -194,16 +195,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
- {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
- {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
- {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
- {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
- {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
- {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
- {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
- {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
- {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
+ {6, "R3_w", "var_off=(0x0; 0xff)"},
+ {7, "R4_w", "var_off=(0x0; 0xff)"},
+ {8, "R4_w", "var_off=(0x0; 0xff)"},
+ {9, "R4_w", "var_off=(0x0; 0xff)"},
+ {10, "R4_w", "var_off=(0x0; 0x1fe)"},
+ {11, "R4_w", "var_off=(0x0; 0xff)"},
+ {12, "R4_w", "var_off=(0x0; 0x3fc)"},
+ {13, "R4_w", "var_off=(0x0; 0xff)"},
+ {14, "R4_w", "var_off=(0x0; 0x7f8)"},
+ {15, "R4_w", "var_off=(0x0; 0xff0)"},
},
},
{
@@ -234,14 +235,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {2, "R5_w=pkt(off=0,r=0,imm=0)"},
- {4, "R5_w=pkt(off=14,r=0,imm=0)"},
- {5, "R4_w=pkt(off=14,r=0,imm=0)"},
- {9, "R2=pkt(off=0,r=18,imm=0)"},
- {10, "R5=pkt(off=14,r=18,imm=0)"},
- {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
- {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
- {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
+ {2, "R5_w", "pkt(off=0,r=0,imm=0)"},
+ {4, "R5_w", "pkt(off=14,r=0,imm=0)"},
+ {5, "R4_w", "pkt(off=14,r=0,imm=0)"},
+ {9, "R2", "pkt(off=0,r=18,imm=0)"},
+ {10, "R5", "pkt(off=14,r=18,imm=0)"},
+ {10, "R4_w", "var_off=(0x0; 0xff)"},
+ {13, "R4_w", "var_off=(0x0; 0xffff)"},
+ {14, "R4_w", "var_off=(0x0; 0xffff)"},
},
},
{
@@ -298,20 +299,20 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(off=0,r=8,imm=0)"},
- {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R5_w", "pkt(id=1,off=14,"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
- {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
- {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {15, "R4", "var_off=(0x0; 0x3fc)"},
+ {15, "R5", "var_off=(0x0; 0x3fc)"},
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4. To avoid BPF
* verifier's precision backtracking logging
@@ -319,46 +320,46 @@ static struct bpf_align_test tests[] = {
* instruction to validate R5 state. We also check
* that R4 is what it should be in such case.
*/
- {18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
- {18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R4_w", "var_off=(0x0; 0x3fc)"},
+ {18, "R5_w", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {19, "R5_w", "pkt(id=2,off=14,"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
- {24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
- {24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {24, "R4", "var_off=(0x0; 0x3fc)"},
+ {24, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5_w=pkt(off=14,r=8"},
+ {26, "R5_w", "pkt(off=14,r=8,"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
*/
- {28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
- {28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R4_w", "var_off=(0x0; 0x3fc)"},
+ {28, "R5_w", "var_off=(0x0; 0x3fc)"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {29, "R5_w", "pkt(id=3,off=18,"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
- {31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
+ {31, "R4_w", "var_off=(0x0; 0x7fc)"},
+ {31, "R5_w", "var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
- {35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+ {35, "R4", "var_off=(0x0; 0x7fc)"},
+ {35, "R5", "var_off=(0x0; 0x7fc)"},
},
},
{
@@ -396,36 +397,36 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(off=0,r=8,imm=0)"},
- {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
+ {8, "R6_w", "var_off=(0x2; 0x7fc)"},
/* Packet pointer has (4n+2) offset */
- {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
- {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
+ {11, "R5_w", "var_off=(0x2; 0x7fc)"},
+ {12, "R4", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
+ {15, "R5", "var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {17, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
- {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
+ {19, "R5_w", "var_off=(0x2; 0xffc)"},
+ {20, "R4", "var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
+ {23, "R5", "var_off=(0x2; 0xffc)"},
},
},
{
@@ -458,18 +459,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {3, "R5_w=pkt_end(off=0,imm=0)"},
+ {3, "R5_w", "pkt_end(off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
- {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
+ {5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {6, "R5_w", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
- {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
- {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {11, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
+ {12, "R4_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -477,7 +478,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {15, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@@ -512,24 +513,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(off=0,r=8,imm=0)"},
- {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {8, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
+ {9, "R6_w", "var_off=(0x2; 0x7fc)"},
/* New unknown value in R7 is (4n) */
- {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {10, "R7_w", "var_off=(0x0; 0x3fc)"},
/* Subtracting it from R6 blows our unsigned bounds */
- {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
- {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
+ {14, "R6", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
-
+ {20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
{
@@ -566,23 +566,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(off=0,r=8,imm=0)"},
- {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
+ {6, "R2_w", "pkt(off=0,r=8,imm=0)"},
+ {9, "R6_w", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
+ {10, "R6_w", "var_off=(0x2; 0x7c)"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
+ {13, "R5_w", "var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
- {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
+ {14, "R7_w", "var_off=(0x0; 0x7fc)"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ {16, "R5_w", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ {20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
};
@@ -635,6 +635,7 @@ static int do_test_single(struct bpf_align_test *test)
line_ptr = strtok(bpf_vlog_copy, "\n");
for (i = 0; i < MAX_MATCHES; i++) {
struct bpf_reg_match m = test->matches[i];
+ const char *p;
int tmp;
if (!m.match)
@@ -649,8 +650,8 @@ static int do_test_single(struct bpf_align_test *test)
line_ptr = strtok(NULL, "\n");
}
if (!line_ptr) {
- printf("Failed to find line %u for match: %s\n",
- m.line, m.match);
+ printf("Failed to find line %u for match: %s=%s\n",
+ m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
@@ -667,15 +668,15 @@ static int do_test_single(struct bpf_align_test *test)
* 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
*/
- while (!strstr(line_ptr, m.match)) {
+ while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
cur_line = -1;
line_ptr = strtok(NULL, "\n");
sscanf(line_ptr ?: "", "%u: ", &cur_line);
if (!line_ptr || cur_line != m.line)
break;
}
- if (cur_line != m.line || !line_ptr || !strstr(line_ptr, m.match)) {
- printf("Failed to find match %u: %s\n", m.line, m.match);
+ if (cur_line != m.line || !line_ptr || !(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
+ printf("Failed to find match %u: %s=%s\n", m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
index d2d9e965eba5..053f4d6da77a 100644
--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
@@ -193,8 +193,8 @@ error:
void test_bloom_filter_map(void)
{
- __u32 *rand_vals, nr_rand_vals;
- struct bloom_filter_map *skel;
+ __u32 *rand_vals = NULL, nr_rand_vals = 0;
+ struct bloom_filter_map *skel = NULL;
int err;
test_fail_cases();
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 1f02168103dd..e3498f607b49 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -7,10 +7,10 @@
#include "bpf_iter_ipv6_route.skel.h"
#include "bpf_iter_netlink.skel.h"
#include "bpf_iter_bpf_map.skel.h"
-#include "bpf_iter_task.skel.h"
+#include "bpf_iter_tasks.skel.h"
#include "bpf_iter_task_stack.skel.h"
#include "bpf_iter_task_file.skel.h"
-#include "bpf_iter_task_vma.skel.h"
+#include "bpf_iter_task_vmas.skel.h"
#include "bpf_iter_task_btf.skel.h"
#include "bpf_iter_tcp4.skel.h"
#include "bpf_iter_tcp6.skel.h"
@@ -215,12 +215,12 @@ static void *do_nothing_wait(void *arg)
static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
int *num_unknown, int *num_known)
{
- struct bpf_iter_task *skel;
+ struct bpf_iter_tasks *skel;
pthread_t thread_id;
void *ret;
- skel = bpf_iter_task__open_and_load();
- if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
+ skel = bpf_iter_tasks__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
return;
ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
@@ -239,7 +239,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
"pthread_join");
- bpf_iter_task__destroy(skel);
+ bpf_iter_tasks__destroy(skel);
}
static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
@@ -307,10 +307,10 @@ static void test_task_pidfd(void)
static void test_task_sleepable(void)
{
- struct bpf_iter_task *skel;
+ struct bpf_iter_tasks *skel;
- skel = bpf_iter_task__open_and_load();
- if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
+ skel = bpf_iter_tasks__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task_sleepable);
@@ -320,7 +320,7 @@ static void test_task_sleepable(void)
ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
"num_success_copy_from_user_task");
- bpf_iter_task__destroy(skel);
+ bpf_iter_tasks__destroy(skel);
}
static void test_task_stack(void)
@@ -1399,19 +1399,19 @@ static void str_strip_first_line(char *str)
static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
{
int err, iter_fd = -1, proc_maps_fd = -1;
- struct bpf_iter_task_vma *skel;
+ struct bpf_iter_task_vmas *skel;
int len, read_size = 4;
char maps_path[64];
- skel = bpf_iter_task_vma__open();
- if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
+ skel = bpf_iter_task_vmas__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
return;
skel->bss->pid = getpid();
skel->bss->one_task = opts ? 1 : 0;
- err = bpf_iter_task_vma__load(skel);
- if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
+ err = bpf_iter_task_vmas__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
@@ -1462,25 +1462,25 @@ static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
out:
close(proc_maps_fd);
close(iter_fd);
- bpf_iter_task_vma__destroy(skel);
+ bpf_iter_task_vmas__destroy(skel);
}
static void test_task_vma_dead_task(void)
{
- struct bpf_iter_task_vma *skel;
+ struct bpf_iter_task_vmas *skel;
int wstatus, child_pid = -1;
time_t start_tm, cur_tm;
int err, iter_fd = -1;
int wait_sec = 3;
- skel = bpf_iter_task_vma__open();
- if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
+ skel = bpf_iter_task_vmas__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
return;
skel->bss->pid = getpid();
- err = bpf_iter_task_vma__load(skel);
- if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
+ err = bpf_iter_task_vmas__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
@@ -1533,7 +1533,7 @@ static void test_task_vma_dead_task(void)
out:
waitpid(child_pid, &wstatus, 0);
close(iter_fd);
- bpf_iter_task_vma__destroy(skel);
+ bpf_iter_task_vmas__destroy(skel);
}
void test_bpf_sockmap_map_iter_fd(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index a53c254c6058..4aabeaa525d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -185,6 +185,8 @@ static void test_cubic(void)
do_test("bpf_cubic", NULL);
+ ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
+
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 4e0cdb593318..92d51f377fe5 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -7296,7 +7296,7 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
- BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
/* tag -> t */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
@@ -7317,7 +7317,7 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
- BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
index e02feb5fae97..574d9a0cdc8e 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
@@ -4,6 +4,7 @@
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
+#include "iters_css_task.skel.h"
#include "cgroup_iter.skel.h"
#include "cgroup_helpers.h"
@@ -263,6 +264,35 @@ close_cgrp:
close(cgrp_fd);
}
+static void test_walk_self_only_css_task(void)
+{
+ struct iters_css_task *skel;
+ int err;
+
+ skel = iters_css_task__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.cgroup_id_printer, true);
+
+ err = iters_css_task__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = join_cgroup(cg_path[CHILD2]);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]);
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2],
+ BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task");
+ ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+cleanup:
+ iters_css_task__destroy(skel);
+}
+
void test_cgroup_iter(void)
{
struct cgroup_iter *skel = NULL;
@@ -293,6 +323,9 @@ void test_cgroup_iter(void)
test_walk_self_only(skel);
if (test__start_subtest("cgroup_iter__dead_self_only"))
test_walk_dead_self_only(skel);
+ if (test__start_subtest("cgroup_iter__self_only_css_task"))
+ test_walk_self_only_css_task();
+
out:
cgroup_iter__destroy(skel);
cleanup_cgroups();
diff --git a/tools/testing/selftests/bpf/prog_tests/connect_ping.c b/tools/testing/selftests/bpf/prog_tests/connect_ping.c
index 289218c2216c..40fe571f2fe7 100644
--- a/tools/testing/selftests/bpf/prog_tests/connect_ping.c
+++ b/tools/testing/selftests/bpf/prog_tests/connect_ping.c
@@ -28,9 +28,9 @@ static void subtest(int cgroup_fd, struct connect_ping *skel,
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_LOOPBACK_INIT,
};
- struct sockaddr *sa;
+ struct sockaddr *sa = NULL;
socklen_t sa_len;
- int protocol;
+ int protocol = -1;
int sock_fd;
switch (family) {
diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
index 3b77d8a422db..261228eb68e8 100644
--- a/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
@@ -24,6 +24,7 @@ void test_empty_skb(void)
int *ifindex;
int err;
int ret;
+ int lwt_egress_ret; /* expected retval at lwt/egress */
bool success_on_tc;
} tests[] = {
/* Empty packets are always rejected. */
@@ -57,6 +58,7 @@ void test_empty_skb(void)
.data_size_in = sizeof(eth_hlen),
.ifindex = &veth_ifindex,
.ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
.success_on_tc = true,
},
{
@@ -70,6 +72,7 @@ void test_empty_skb(void)
.data_size_in = sizeof(eth_hlen),
.ifindex = &ipip_ifindex,
.ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
},
/* ETH_HLEN+1-sized packet should be redirected. */
@@ -79,6 +82,7 @@ void test_empty_skb(void)
.data_in = eth_hlen_pp,
.data_size_in = sizeof(eth_hlen_pp),
.ifindex = &veth_ifindex,
+ .lwt_egress_ret = 1, /* veth_xmit NET_XMIT_DROP */
},
{
.msg = "ipip ETH_HLEN+1 packet ingress",
@@ -108,8 +112,12 @@ void test_empty_skb(void)
for (i = 0; i < ARRAY_SIZE(tests); i++) {
bpf_object__for_each_program(prog, bpf_obj->obj) {
- char buf[128];
+ bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL;
bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
+ int expected_ret;
+ char buf[128];
+
+ expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret;
tattr.data_in = tests[i].data_in;
tattr.data_size_in = tests[i].data_size_in;
@@ -128,7 +136,7 @@ void test_empty_skb(void)
if (at_tc && tests[i].success_on_tc)
ASSERT_GE(bpf_obj->bss->ret, 0, buf);
else
- ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf);
+ ASSERT_EQ(bpf_obj->bss->ret, expected_ret, buf);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/exceptions.c b/tools/testing/selftests/bpf/prog_tests/exceptions.c
new file mode 100644
index 000000000000..516f4a13013c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/exceptions.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "exceptions.skel.h"
+#include "exceptions_ext.skel.h"
+#include "exceptions_fail.skel.h"
+#include "exceptions_assert.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static void test_exceptions_failure(void)
+{
+ RUN_TESTS(exceptions_fail);
+}
+
+static void test_exceptions_success(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, ropts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct exceptions_ext *eskel = NULL;
+ struct exceptions *skel;
+ int ret;
+
+ skel = exceptions__open();
+ if (!ASSERT_OK_PTR(skel, "exceptions__open"))
+ return;
+
+ ret = exceptions__load(skel);
+ if (!ASSERT_OK(ret, "exceptions__load"))
+ goto done;
+
+ if (!ASSERT_OK(bpf_map_update_elem(bpf_map__fd(skel->maps.jmp_table), &(int){0},
+ &(int){bpf_program__fd(skel->progs.exception_tail_call_target)}, BPF_ANY),
+ "bpf_map_update_elem jmp_table"))
+ goto done;
+
+#define RUN_SUCCESS(_prog, return_val) \
+ if (!test__start_subtest(#_prog)) goto _prog##_##return_val; \
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs._prog), &ropts); \
+ ASSERT_OK(ret, #_prog " prog run ret"); \
+ ASSERT_EQ(ropts.retval, return_val, #_prog " prog run retval"); \
+ _prog##_##return_val:
+
+ RUN_SUCCESS(exception_throw_always_1, 64);
+ RUN_SUCCESS(exception_throw_always_2, 32);
+ RUN_SUCCESS(exception_throw_unwind_1, 16);
+ RUN_SUCCESS(exception_throw_unwind_2, 32);
+ RUN_SUCCESS(exception_throw_default, 0);
+ RUN_SUCCESS(exception_throw_default_value, 5);
+ RUN_SUCCESS(exception_tail_call, 24);
+ RUN_SUCCESS(exception_ext, 0);
+ RUN_SUCCESS(exception_ext_mod_cb_runtime, 35);
+ RUN_SUCCESS(exception_throw_subprog, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc_with, 1);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc_with, 100);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc_with, 105);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc_with, 200);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc_with, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc_with, 101);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc_with, 99);
+ RUN_SUCCESS(exception_assert_range, 1);
+ RUN_SUCCESS(exception_assert_range_with, 1);
+ RUN_SUCCESS(exception_bad_assert_range, 0);
+ RUN_SUCCESS(exception_bad_assert_range_with, 10);
+
+#define RUN_EXT(load_ret, attach_err, expr, msg, after_link) \
+ { \
+ LIBBPF_OPTS(bpf_object_open_opts, o, .kernel_log_buf = log_buf, \
+ .kernel_log_size = sizeof(log_buf), \
+ .kernel_log_level = 2); \
+ exceptions_ext__destroy(eskel); \
+ eskel = exceptions_ext__open_opts(&o); \
+ struct bpf_program *prog = NULL; \
+ struct bpf_link *link = NULL; \
+ if (!ASSERT_OK_PTR(eskel, "exceptions_ext__open")) \
+ goto done; \
+ (expr); \
+ ASSERT_OK_PTR(bpf_program__name(prog), bpf_program__name(prog)); \
+ if (!ASSERT_EQ(exceptions_ext__load(eskel), load_ret, \
+ "exceptions_ext__load")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ if (load_ret != 0) { \
+ if (!ASSERT_OK_PTR(strstr(log_buf, msg), "strstr")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ } \
+ if (!load_ret && attach_err) { \
+ if (!ASSERT_ERR_PTR(link = bpf_program__attach(prog), "attach err")) \
+ goto done; \
+ } else if (!load_ret) { \
+ if (!ASSERT_OK_PTR(link = bpf_program__attach(prog), "attach ok")) \
+ goto done; \
+ (void)(after_link); \
+ bpf_link__destroy(link); \
+ } \
+ }
+
+ if (test__start_subtest("non-throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("non-throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension (with custom cb) -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "Extension programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext_mod_cb_runtime, 131); }));
+
+ if (test__start_subtest("throwing extension (with custom cb) -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext),
+ "exception_ext_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext, 128); }));
+
+ if (test__start_subtest("non-throwing fentry -> non-throwing subprog"))
+ /* non-throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> non-throwing subprog"))
+ /* throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fentry -> throwing subprog"))
+ /* non-throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> throwing subprog"))
+ /* throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> non-throwing subprog"))
+ /* non-throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> non-throwing subprog"))
+ /* throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> throwing subprog"))
+ /* non-throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> throwing subprog"))
+ /* throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing global subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing subprog"))
+ /* non-throwing extension -> throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "throwing_subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing global subprog"))
+ /* non-throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> throwing global subprog"))
+ /* throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> non-throwing global subprog"))
+ /* throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> main subprog"))
+ /* non-throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> main subprog"))
+ /* throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+done:
+ exceptions_ext__destroy(eskel);
+ exceptions__destroy(skel);
+}
+
+static void test_exceptions_assertions(void)
+{
+ RUN_TESTS(exceptions_assert);
+}
+
+void test_exceptions(void)
+{
+ test_exceptions_success();
+ test_exceptions_failure();
+ test_exceptions_assertions();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
index 2fd05649bad1..4ad4cd69152e 100644
--- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -11,9 +11,13 @@
#define NS_TEST "fib_lookup_ns"
#define IPV6_IFACE_ADDR "face::face"
+#define IPV6_IFACE_ADDR_SEC "cafe::cafe"
+#define IPV6_ADDR_DST "face::3"
#define IPV6_NUD_FAILED_ADDR "face::1"
#define IPV6_NUD_STALE_ADDR "face::2"
#define IPV4_IFACE_ADDR "10.0.0.254"
+#define IPV4_IFACE_ADDR_SEC "10.1.0.254"
+#define IPV4_ADDR_DST "10.2.0.254"
#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
#define IPV4_NUD_STALE_ADDR "10.0.0.2"
#define IPV4_TBID_ADDR "172.0.0.254"
@@ -31,6 +35,7 @@ struct fib_lookup_test {
const char *desc;
const char *daddr;
int expected_ret;
+ const char *expected_src;
int lookup_flags;
__u32 tbid;
__u8 dmac[6];
@@ -69,6 +74,22 @@ static const struct fib_lookup_test tests[] = {
.daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
.dmac = DMAC_INIT2, },
+ { .desc = "IPv4 set src addr from netdev",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV4_IFACE_ADDR,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv6 set src addr from netdev",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV6_IFACE_ADDR,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 set prefsrc addr from route",
+ .daddr = IPV4_ADDR_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV4_IFACE_ADDR_SEC,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv6 set prefsrc addr route",
+ .daddr = IPV6_ADDR_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV6_IFACE_ADDR_SEC,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
};
static int ifindex;
@@ -97,6 +118,13 @@ static int setup_netns(void)
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
+ /* Setup for prefsrc IP addr selection */
+ SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR_SEC);
+ SYS(fail, "ip route add %s/32 dev veth1 src %s", IPV4_ADDR_DST, IPV4_IFACE_ADDR_SEC);
+
+ SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR_SEC);
+ SYS(fail, "ip route add %s/128 dev veth1 src %s", IPV6_ADDR_DST, IPV6_IFACE_ADDR_SEC);
+
/* Setup for tbid lookup tests */
SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR);
SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET);
@@ -133,9 +161,12 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_loo
if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) {
params->family = AF_INET6;
- ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
- if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
- return -1;
+ if (!(test->lookup_flags & BPF_FIB_LOOKUP_SRC)) {
+ ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
+ return -1;
+ }
+
return 0;
}
@@ -143,9 +174,12 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_loo
if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
return -1;
params->family = AF_INET;
- ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, &params->ipv4_src);
- if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
- return -1;
+
+ if (!(test->lookup_flags & BPF_FIB_LOOKUP_SRC)) {
+ ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, &params->ipv4_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
+ return -1;
+ }
return 0;
}
@@ -156,6 +190,40 @@ static void mac_str(char *b, const __u8 *mac)
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
}
+static void assert_src_ip(struct bpf_fib_lookup *fib_params, const char *expected_src)
+{
+ int ret;
+ __u32 src6[4];
+ __be32 src4;
+
+ switch (fib_params->family) {
+ case AF_INET6:
+ ret = inet_pton(AF_INET6, expected_src, src6);
+ ASSERT_EQ(ret, 1, "inet_pton(expected_src)");
+
+ ret = memcmp(src6, fib_params->ipv6_src, sizeof(fib_params->ipv6_src));
+ if (!ASSERT_EQ(ret, 0, "fib_lookup ipv6 src")) {
+ char str_src6[64];
+
+ inet_ntop(AF_INET6, fib_params->ipv6_src, str_src6,
+ sizeof(str_src6));
+ printf("ipv6 expected %s actual %s ", expected_src,
+ str_src6);
+ }
+
+ break;
+ case AF_INET:
+ ret = inet_pton(AF_INET, expected_src, &src4);
+ ASSERT_EQ(ret, 1, "inet_pton(expected_src)");
+
+ ASSERT_EQ(fib_params->ipv4_src, src4, "fib_lookup ipv4 src");
+
+ break;
+ default:
+ PRINT_FAIL("invalid addr family: %d", fib_params->family);
+ }
+}
+
void test_fib_lookup(void)
{
struct bpf_fib_lookup *fib_params;
@@ -207,6 +275,9 @@ void test_fib_lookup(void)
ASSERT_EQ(skel->bss->fib_lookup_ret, tests[i].expected_ret,
"fib_lookup_ret");
+ if (tests[i].expected_src)
+ assert_src_ip(fib_params, tests[i].expected_src);
+
ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
if (!ASSERT_EQ(ret, 0, "dmac not match")) {
char expected[18], actual[18];
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index 9d768e083714..97142a4db374 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -308,7 +308,7 @@ void test_fill_link_info(void)
return;
/* load kallsyms to compare the addr */
- if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;
kprobe_addr = ksym_get_addr(KPROBE_FUNC);
diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c
index 10804ae5ae97..bf84d4a1d9ae 100644
--- a/tools/testing/selftests/bpf/prog_tests/iters.c
+++ b/tools/testing/selftests/bpf/prog_tests/iters.c
@@ -1,13 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <stdlib.h>
#include <test_progs.h>
+#include "cgroup_helpers.h"
#include "iters.skel.h"
#include "iters_state_safety.skel.h"
#include "iters_looping.skel.h"
#include "iters_num.skel.h"
#include "iters_testmod_seq.skel.h"
+#include "iters_task_vma.skel.h"
+#include "iters_task.skel.h"
+#include "iters_css_task.skel.h"
+#include "iters_css.skel.h"
+#include "iters_task_failure.skel.h"
static void subtest_num_iters(void)
{
@@ -90,11 +102,199 @@ cleanup:
iters_testmod_seq__destroy(skel);
}
+static void subtest_task_vma_iters(void)
+{
+ unsigned long start, end, bpf_iter_start, bpf_iter_end;
+ struct iters_task_vma *skel;
+ char rest_of_line[1000];
+ unsigned int seen;
+ FILE *f = NULL;
+ int err;
+
+ skel = iters_task_vma__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+
+ err = iters_task_vma__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ getpgid(skel->bss->target_pid);
+ iters_task_vma__detach(skel);
+
+ if (!ASSERT_GT(skel->bss->vmas_seen, 0, "vmas_seen_gt_zero"))
+ goto cleanup;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!ASSERT_OK_PTR(f, "proc_maps_fopen"))
+ goto cleanup;
+
+ seen = 0;
+ while (fscanf(f, "%lx-%lx %[^\n]\n", &start, &end, rest_of_line) == 3) {
+ /* [vsyscall] vma isn't _really_ part of task->mm vmas.
+ * /proc/PID/maps returns it when out of vmas - see get_gate_vma
+ * calls in fs/proc/task_mmu.c
+ */
+ if (strstr(rest_of_line, "[vsyscall]"))
+ continue;
+
+ bpf_iter_start = skel->bss->vm_ranges[seen].vm_start;
+ bpf_iter_end = skel->bss->vm_ranges[seen].vm_end;
+
+ ASSERT_EQ(bpf_iter_start, start, "vma->vm_start match");
+ ASSERT_EQ(bpf_iter_end, end, "vma->vm_end match");
+ seen++;
+ }
+
+ if (!ASSERT_EQ(skel->bss->vmas_seen, seen, "vmas_seen_eq"))
+ goto cleanup;
+
+cleanup:
+ if (f)
+ fclose(f);
+ iters_task_vma__destroy(skel);
+}
+
+static pthread_mutex_t do_nothing_mutex;
+
+static void *do_nothing_wait(void *arg)
+{
+ pthread_mutex_lock(&do_nothing_mutex);
+ pthread_mutex_unlock(&do_nothing_mutex);
+
+ pthread_exit(arg);
+}
+
+#define thread_num 2
+
+static void subtest_task_iters(void)
+{
+ struct iters_task *skel = NULL;
+ pthread_t thread_ids[thread_num];
+ void *ret;
+ int err;
+
+ skel = iters_task__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+ skel->bss->target_pid = getpid();
+ err = iters_task__attach(skel);
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+ pthread_mutex_lock(&do_nothing_mutex);
+ for (int i = 0; i < thread_num; i++)
+ ASSERT_OK(pthread_create(&thread_ids[i], NULL, &do_nothing_wait, NULL),
+ "pthread_create");
+
+ syscall(SYS_getpgid);
+ iters_task__detach(skel);
+ ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt");
+ ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt");
+ ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt");
+ pthread_mutex_unlock(&do_nothing_mutex);
+ for (int i = 0; i < thread_num; i++)
+ ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join");
+cleanup:
+ iters_task__destroy(skel);
+}
+
+extern int stack_mprotect(void);
+
+static void subtest_css_task_iters(void)
+{
+ struct iters_css_task *skel = NULL;
+ int err, cg_fd, cg_id;
+ const char *cgrp_path = "/cg1";
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto cleanup;
+ cg_fd = create_and_get_cgroup(cgrp_path);
+ if (!ASSERT_GE(cg_fd, 0, "create_and_get_cgroup"))
+ goto cleanup;
+ cg_id = get_cgroup_id(cgrp_path);
+ err = join_cgroup(cgrp_path);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto cleanup;
+
+ skel = iters_css_task__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->cg_id = cg_id;
+ err = iters_css_task__attach(skel);
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+ err = stack_mprotect();
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ goto cleanup;
+ iters_css_task__detach(skel);
+ ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+
+cleanup:
+ cleanup_cgroup_environment();
+ iters_css_task__destroy(skel);
+}
+
+static void subtest_css_iters(void)
+{
+ struct iters_css *skel = NULL;
+ struct {
+ const char *path;
+ int fd;
+ } cgs[] = {
+ { "/cg1" },
+ { "/cg1/cg2" },
+ { "/cg1/cg2/cg3" },
+ { "/cg1/cg2/cg3/cg4" },
+ };
+ int err, cg_nr = ARRAY_SIZE(cgs);
+ int i;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto cleanup;
+ for (i = 0; i < cg_nr; i++) {
+ cgs[i].fd = create_and_get_cgroup(cgs[i].path);
+ if (!ASSERT_GE(cgs[i].fd, 0, "create_and_get_cgroup"))
+ goto cleanup;
+ }
+
+ skel = iters_css__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->root_cg_id = get_cgroup_id(cgs[0].path);
+ skel->bss->leaf_cg_id = get_cgroup_id(cgs[cg_nr - 1].path);
+ err = iters_css__attach(skel);
+
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+
+ syscall(SYS_getpgid);
+ ASSERT_EQ(skel->bss->pre_order_cnt, cg_nr, "pre_order_cnt");
+ ASSERT_EQ(skel->bss->first_cg_id, get_cgroup_id(cgs[0].path), "first_cg_id");
+
+ ASSERT_EQ(skel->bss->post_order_cnt, cg_nr, "post_order_cnt");
+ ASSERT_EQ(skel->bss->last_cg_id, get_cgroup_id(cgs[0].path), "last_cg_id");
+ ASSERT_EQ(skel->bss->tree_high, cg_nr - 1, "tree_high");
+ iters_css__detach(skel);
+cleanup:
+ cleanup_cgroup_environment();
+ iters_css__destroy(skel);
+}
+
void test_iters(void)
{
RUN_TESTS(iters_state_safety);
RUN_TESTS(iters_looping);
RUN_TESTS(iters);
+ RUN_TESTS(iters_css_task);
if (env.has_testmod)
RUN_TESTS(iters_testmod_seq);
@@ -103,4 +303,13 @@ void test_iters(void)
subtest_num_iters();
if (test__start_subtest("testmod_seq"))
subtest_testmod_seq_iters();
+ if (test__start_subtest("task_vma"))
+ subtest_task_vma_iters();
+ if (test__start_subtest("task"))
+ subtest_task_iters();
+ if (test__start_subtest("css_task"))
+ subtest_css_task_iters();
+ if (test__start_subtest("css"))
+ subtest_css_iters();
+ RUN_TESTS(iters_task_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index 179fe300534f..4041cfa670eb 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -3,6 +3,7 @@
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
#include "kprobe_multi_empty.skel.h"
+#include "kprobe_multi_override.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
@@ -453,6 +454,40 @@ cleanup:
}
}
+static void test_attach_override(void)
+{
+ struct kprobe_multi_override *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_multi_override__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ /* The test_override calls bpf_override_return so it should fail
+ * to attach to bpf_fentry_test1 function, which is not on error
+ * injection list.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ /* The should_fail_bio function is on error injection list,
+ * attach should succeed.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "should_fail_bio", NULL);
+ if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio"))
+ goto cleanup;
+
+ bpf_link__destroy(link);
+
+cleanup:
+ kprobe_multi_override__destroy(skel);
+}
+
void serial_test_kprobe_multi_bench_attach(void)
{
if (test__start_subtest("kernel"))
@@ -480,4 +515,6 @@ void test_kprobe_multi_test(void)
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
+ if (test__start_subtest("attach_override"))
+ test_attach_override();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
index 1fbe7e4ac00a..9d03528f05db 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
@@ -4,6 +4,8 @@
#include "trace_helpers.h"
#include "bpf/libbpf_internal.h"
+static struct ksyms *ksyms;
+
static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
{
ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
@@ -50,12 +52,12 @@ static void test_testmod_attach_api_addrs(void)
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[3];
- addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
- ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
- addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
- ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
- addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
- ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
+ addrs[0] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test1");
+ ASSERT_NEQ(addrs[0], 0, "ksym_get_addr_local");
+ addrs[1] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test2");
+ ASSERT_NEQ(addrs[1], 0, "ksym_get_addr_local");
+ addrs[2] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test3");
+ ASSERT_NEQ(addrs[2], 0, "ksym_get_addr_local");
opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
@@ -79,11 +81,15 @@ static void test_testmod_attach_api_syms(void)
void serial_test_kprobe_multi_testmod_test(void)
{
- if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
return;
if (test__start_subtest("testmod_attach_api_syms"))
test_testmod_attach_api_syms();
+
if (test__start_subtest("testmod_attach_api_addrs"))
test_testmod_attach_api_addrs();
+
+ free_kallsyms_local(ksyms);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
index efb8bd43653c..c440ea3311ed 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -142,10 +142,14 @@ static void test_libbpf_bpf_map_type_str(void)
/* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
* where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
* (map_type). For this enum value, libbpf_bpf_map_type_str() picks
- * BPF_MAP_TYPE_CGROUP_STORAGE.
+ * BPF_MAP_TYPE_CGROUP_STORAGE. The same for
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE.
*/
if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
continue;
+ if (strcmp(map_type_name, "BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED") == 0)
+ continue;
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 18cf7b17463d..2fb89de63bd2 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -65,8 +65,8 @@ static struct {
{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
- { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
- { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
{ "obj_new_acq", "Unreleased reference id=" },
@@ -94,14 +94,8 @@ static struct {
{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
{ "incorrect_head_off1", "bpf_list_head not found at offset=25" },
{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },
- { "pop_front_off",
- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
- { "pop_back_off",
- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
+ { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
+ { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
};
static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
@@ -268,7 +262,7 @@ end:
static void list_and_rb_node_same_struct(bool refcount_field)
{
- int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id;
+ int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
struct btf *btf;
int id, err;
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
index 61333f2a03f9..e9190574e79f 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
@@ -49,7 +49,8 @@ static int open_tuntap(const char *dev_name, bool need_mac)
return -1;
ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN);
- memcpy(ifr.ifr_name, dev_name, IFNAMSIZ);
+ strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1);
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
err = ioctl(fd, TUNSETIFF, &ifr);
if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {
diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c
new file mode 100644
index 000000000000..70d90c43537c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/missed.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "missed_kprobe.skel.h"
+#include "missed_kprobe_recursion.skel.h"
+#include "missed_tp_recursion.skel.h"
+
+/*
+ * Putting kprobe on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc, which has also kprobe on. The latter won't get triggered due
+ * to kprobe recursion check and kprobe missed counter is incremented.
+ */
+static void test_missed_perf_kprobe(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_link_info info = {};
+ struct missed_kprobe *skel;
+ __u32 len = sizeof(info);
+ int err, prog_fd;
+
+ skel = missed_kprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ err = bpf_link_get_info_by_fd(bpf_link__fd(skel->links.test2), &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ goto cleanup;
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "info.type");
+ ASSERT_EQ(info.perf_event.type, BPF_PERF_EVENT_KPROBE, "info.perf_event.type");
+ ASSERT_EQ(info.perf_event.kprobe.missed, 1, "info.perf_event.kprobe.missed");
+
+cleanup:
+ missed_kprobe__destroy(skel);
+}
+
+static __u64 get_missed_count(int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ return (__u64) -1;
+ return info.recursion_misses;
+}
+
+/*
+ * Putting kprobe.multi on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc which has 3 perf event kprobes and 1 kprobe.multi attached.
+ *
+ * Because fprobe (kprobe.multi attach layear) does not have strict recursion
+ * check the kprobe's bpf_prog_active check is hit for test2-5.
+ */
+static void test_missed_kprobe_recursion(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct missed_kprobe_recursion *skel;
+ int err, prog_fd;
+
+ skel = missed_kprobe_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe_recursion__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe_recursion__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe_recursion__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
+
+cleanup:
+ missed_kprobe_recursion__destroy(skel);
+}
+
+/*
+ * Putting kprobe on bpf_fentry_test1 that calls bpf_printk and invokes
+ * bpf_trace_printk tracepoint. The bpf_trace_printk tracepoint has test[234]
+ * programs attached to it.
+ *
+ * Because kprobe execution goes through bpf_prog_active check, programs
+ * attached to the tracepoint will fail the recursion check and increment
+ * the recursion_misses stats.
+ */
+static void test_missed_tp_recursion(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct missed_tp_recursion *skel;
+ int err, prog_fd;
+
+ skel = missed_tp_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_tp_recursion__open_and_load"))
+ goto cleanup;
+
+ err = missed_tp_recursion__attach(skel);
+ if (!ASSERT_OK(err, "missed_tp_recursion__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
+
+cleanup:
+ missed_tp_recursion__destroy(skel);
+}
+
+void test_missed(void)
+{
+ if (test__start_subtest("perf_kprobe"))
+ test_missed_perf_kprobe();
+ if (test__start_subtest("kprobe_recursion"))
+ test_missed_kprobe_recursion();
+ if (test__start_subtest("tp_recursion"))
+ test_missed_tp_recursion();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
index c7636e18b1eb..aa9f67eb1c95 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
int link_fd[2] = {};
__s32 btf_id[2] = {};
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
.expected_attach_type = BPF_TRACE_FENTRY,
);
diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
new file mode 100644
index 000000000000..343da65864d6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "percpu_alloc_array.skel.h"
+#include "percpu_alloc_cgrp_local_storage.skel.h"
+#include "percpu_alloc_fail.skel.h"
+
+static void test_array(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_1, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_2, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_3, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_4, true);
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map 1-4");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_array_sleepable(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_10, true);
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map_10");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_cgrp_local_storage(void)
+{
+ struct percpu_alloc_cgrp_local_storage *skel;
+ int err, cgroup_fd, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ cgroup_fd = test__join_cgroup("/percpu_alloc");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
+ return;
+
+ skel = percpu_alloc_cgrp_local_storage__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
+ goto close_fd;
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_cgrp_local_storage__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
+ goto destroy_skel;
+
+ err = percpu_alloc_cgrp_local_storage__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
+ goto destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+
+destroy_skel:
+ percpu_alloc_cgrp_local_storage__destroy(skel);
+close_fd:
+ close(cgroup_fd);
+}
+
+static void test_failure(void) {
+ RUN_TESTS(percpu_alloc_fail);
+}
+
+void test_percpu_alloc(void)
+{
+ if (test__start_subtest("array"))
+ test_array();
+ if (test__start_subtest("array_sleepable"))
+ test_array_sleepable();
+ if (test__start_subtest("cgrp_local_storage"))
+ test_cgrp_local_storage();
+ if (test__start_subtest("failure_tests"))
+ test_failure();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..3a2ec3923fca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <test_progs.h>
+
+#include "preempted_bpf_ma_op.skel.h"
+
+#define ALLOC_THREAD_NR 4
+#define ALLOC_LOOP_NR 512
+
+struct alloc_ctx {
+ /* output */
+ int run_err;
+ /* input */
+ int fd;
+ bool *nomem_err;
+};
+
+static void *run_alloc_prog(void *data)
+{
+ struct alloc_ctx *ctx = data;
+ cpu_set_t cpu_set;
+ int i;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+
+ for (i = 0; i < ALLOC_LOOP_NR && !*ctx->nomem_err; i++) {
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ err = bpf_prog_test_run_opts(ctx->fd, &topts);
+ ctx->run_err |= err | topts.retval;
+ }
+
+ return NULL;
+}
+
+void test_preempted_bpf_ma_op(void)
+{
+ struct alloc_ctx ctx[ALLOC_THREAD_NR];
+ struct preempted_bpf_ma_op *skel;
+ pthread_t tid[ALLOC_THREAD_NR];
+ int i, err;
+
+ skel = preempted_bpf_ma_op__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ err = preempted_bpf_ma_op__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(ctx); i++) {
+ struct bpf_program *prog;
+ char name[8];
+
+ snprintf(name, sizeof(name), "test%d", i);
+ prog = bpf_object__find_program_by_name(skel->obj, name);
+ if (!ASSERT_OK_PTR(prog, "no test prog"))
+ goto out;
+
+ ctx[i].run_err = 0;
+ ctx[i].fd = bpf_program__fd(prog);
+ ctx[i].nomem_err = &skel->bss->nomem_err;
+ }
+
+ memset(tid, 0, sizeof(tid));
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ err = pthread_create(&tid[i], NULL, run_alloc_prog, &ctx[i]);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ if (!tid[i])
+ break;
+ pthread_join(tid[i], NULL);
+ ASSERT_EQ(ctx[i].run_err, 0, "run prog err");
+ }
+
+ ASSERT_FALSE(skel->bss->nomem_err, "ENOMEM");
+out:
+ preempted_bpf_ma_op__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
index 722c5f2a7776..a043af9cd6d9 100644
--- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -14,7 +14,7 @@ static void test_queue_stack_map_by_type(int type)
int i, err, prog_fd, map_in_fd, map_out_fd;
char file[32], buf[128];
struct bpf_object *obj;
- struct iphdr iph;
+ struct iphdr iph = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index ac104dc652e3..48c5695b7abf 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -91,6 +91,9 @@ static void ringbuf_subtest(void)
int err, cnt, rb_fd;
int page_size = getpagesize();
void *mmap_ptr, *tmp_ptr;
+ struct ring *ring;
+ int map_fd;
+ unsigned long avail_data, ring_size, cons_pos, prod_pos;
skel = test_ringbuf_lskel__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
@@ -162,6 +165,13 @@ static void ringbuf_subtest(void)
trigger_samples();
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
+ goto cleanup;
+
+ map_fd = ring__map_fd(ring);
+ ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
+
/* 2 submitted + 1 discarded records */
CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n",
@@ -176,6 +186,18 @@ static void ringbuf_subtest(void)
"err_prod_pos", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->prod_pos);
+ /* verify getting this data directly via the ring object yields the same
+ * results
+ */
+ avail_data = ring__avail_data_size(ring);
+ ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
+ ring_size = ring__size(ring);
+ ASSERT_EQ(ring_size, page_size, "ring_ring_size");
+ cons_pos = ring__consumer_pos(ring);
+ ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
+ prod_pos = ring__producer_pos(ring);
+ ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
+
/* poll for samples */
err = ring_buffer__poll(ringbuf, -1);
@@ -282,6 +304,10 @@ static void ringbuf_subtest(void)
err = ring_buffer__consume(ringbuf);
CHECK(err < 0, "rb_consume", "failed: %d\b", err);
+ /* also consume using ring__consume to make sure it works the same */
+ err = ring__consume(ring);
+ ASSERT_GE(err, 0, "ring_consume");
+
/* 3 rounds, 2 samples each */
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index 1455911d9fcb..58522195081b 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -42,6 +42,8 @@ void test_ringbuf_multi(void)
{
struct test_ringbuf_multi *skel;
struct ring_buffer *ringbuf = NULL;
+ struct ring *ring_old;
+ struct ring *ring;
int err;
int page_size = getpagesize();
int proto_fd = -1;
@@ -84,11 +86,24 @@ void test_ringbuf_multi(void)
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
+ /* verify ring_buffer__ring returns expected results */
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
+ goto cleanup;
+ ring_old = ring;
+ ring = ring_buffer__ring(ringbuf, 1);
+ ASSERT_ERR_PTR(ring, "ring_buffer__ring_idx_1");
+
err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2),
process_sample, (void *)(long)2);
if (CHECK(err, "ringbuf_add", "failed to add another ring\n"))
goto cleanup;
+ /* verify adding a new ring didn't invalidate our older pointer */
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_EQ(ring, ring_old, "ring_buffer__ring_again"))
+ goto cleanup;
+
err = test_ringbuf_multi__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c
index 8b571890c57e..c3d78846f31a 100644
--- a/tools/testing/selftests/bpf/prog_tests/section_names.c
+++ b/tools/testing/selftests/bpf/prog_tests/section_names.c
@@ -124,6 +124,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_INET6_CONNECT},
},
{
+ "cgroup/connect_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT},
+ {0, BPF_CGROUP_UNIX_CONNECT},
+ },
+ {
"cgroup/sendmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
{0, BPF_CGROUP_UDP4_SENDMSG},
@@ -134,6 +139,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_UDP6_SENDMSG},
},
{
+ "cgroup/sendmsg_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG},
+ {0, BPF_CGROUP_UNIX_SENDMSG},
+ },
+ {
"cgroup/recvmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
{0, BPF_CGROUP_UDP4_RECVMSG},
@@ -144,6 +154,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_UDP6_RECVMSG},
},
{
+ "cgroup/recvmsg_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG},
+ {0, BPF_CGROUP_UNIX_RECVMSG},
+ },
+ {
"cgroup/sysctl",
{0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
{0, BPF_CGROUP_SYSCTL},
@@ -158,6 +173,36 @@ static struct sec_name_test tests[] = {
{0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
{0, BPF_CGROUP_SETSOCKOPT},
},
+ {
+ "cgroup/getpeername4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME},
+ {0, BPF_CGROUP_INET4_GETPEERNAME},
+ },
+ {
+ "cgroup/getpeername6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME},
+ {0, BPF_CGROUP_INET6_GETPEERNAME},
+ },
+ {
+ "cgroup/getpeername_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME},
+ {0, BPF_CGROUP_UNIX_GETPEERNAME},
+ },
+ {
+ "cgroup/getsockname4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME},
+ {0, BPF_CGROUP_INET4_GETSOCKNAME},
+ },
+ {
+ "cgroup/getsockname6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME},
+ {0, BPF_CGROUP_INET6_GETSOCKNAME},
+ },
+ {
+ "cgroup/getsockname_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME},
+ {0, BPF_CGROUP_UNIX_GETSOCKNAME},
+ },
};
static void test_prog_type_by_name(const struct sec_name_test *test)
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
new file mode 100644
index 000000000000..5fd617718991
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/un.h>
+
+#include "test_progs.h"
+
+#include "connect_unix_prog.skel.h"
+#include "sendmsg_unix_prog.skel.h"
+#include "recvmsg_unix_prog.skel.h"
+#include "getsockname_unix_prog.skel.h"
+#include "getpeername_unix_prog.skel.h"
+#include "network_helpers.h"
+
+#define SERVUN_ADDRESS "bpf_cgroup_unix_test"
+#define SERVUN_REWRITE_ADDRESS "bpf_cgroup_unix_test_rewrite"
+#define SRCUN_ADDRESS "bpf_cgroup_unix_test_src"
+
+enum sock_addr_test_type {
+ SOCK_ADDR_TEST_BIND,
+ SOCK_ADDR_TEST_CONNECT,
+ SOCK_ADDR_TEST_SENDMSG,
+ SOCK_ADDR_TEST_RECVMSG,
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ SOCK_ADDR_TEST_GETPEERNAME,
+};
+
+typedef void *(*load_fn)(int cgroup_fd);
+typedef void (*destroy_fn)(void *skel);
+
+struct sock_addr_test {
+ enum sock_addr_test_type type;
+ const char *name;
+ /* BPF prog properties */
+ load_fn loadfn;
+ destroy_fn destroyfn;
+ /* Socket properties */
+ int socket_family;
+ int socket_type;
+ /* IP:port pairs for BPF prog to override */
+ const char *requested_addr;
+ unsigned short requested_port;
+ const char *expected_addr;
+ unsigned short expected_port;
+ const char *expected_src_addr;
+};
+
+static void *connect_unix_prog_load(int cgroup_fd)
+{
+ struct connect_unix_prog *skel;
+
+ skel = connect_unix_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->links.connect_unix_prog = bpf_program__attach_cgroup(
+ skel->progs.connect_unix_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.connect_unix_prog, "prog_attach"))
+ goto cleanup;
+
+ return skel;
+cleanup:
+ connect_unix_prog__destroy(skel);
+ return NULL;
+}
+
+static void connect_unix_prog_destroy(void *skel)
+{
+ connect_unix_prog__destroy(skel);
+}
+
+static void *sendmsg_unix_prog_load(int cgroup_fd)
+{
+ struct sendmsg_unix_prog *skel;
+
+ skel = sendmsg_unix_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->links.sendmsg_unix_prog = bpf_program__attach_cgroup(
+ skel->progs.sendmsg_unix_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.sendmsg_unix_prog, "prog_attach"))
+ goto cleanup;
+
+ return skel;
+cleanup:
+ sendmsg_unix_prog__destroy(skel);
+ return NULL;
+}
+
+static void sendmsg_unix_prog_destroy(void *skel)
+{
+ sendmsg_unix_prog__destroy(skel);
+}
+
+static void *recvmsg_unix_prog_load(int cgroup_fd)
+{
+ struct recvmsg_unix_prog *skel;
+
+ skel = recvmsg_unix_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->links.recvmsg_unix_prog = bpf_program__attach_cgroup(
+ skel->progs.recvmsg_unix_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.recvmsg_unix_prog, "prog_attach"))
+ goto cleanup;
+
+ return skel;
+cleanup:
+ recvmsg_unix_prog__destroy(skel);
+ return NULL;
+}
+
+static void recvmsg_unix_prog_destroy(void *skel)
+{
+ recvmsg_unix_prog__destroy(skel);
+}
+
+static void *getsockname_unix_prog_load(int cgroup_fd)
+{
+ struct getsockname_unix_prog *skel;
+
+ skel = getsockname_unix_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->links.getsockname_unix_prog = bpf_program__attach_cgroup(
+ skel->progs.getsockname_unix_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.getsockname_unix_prog, "prog_attach"))
+ goto cleanup;
+
+ return skel;
+cleanup:
+ getsockname_unix_prog__destroy(skel);
+ return NULL;
+}
+
+static void getsockname_unix_prog_destroy(void *skel)
+{
+ getsockname_unix_prog__destroy(skel);
+}
+
+static void *getpeername_unix_prog_load(int cgroup_fd)
+{
+ struct getpeername_unix_prog *skel;
+
+ skel = getpeername_unix_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->links.getpeername_unix_prog = bpf_program__attach_cgroup(
+ skel->progs.getpeername_unix_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.getpeername_unix_prog, "prog_attach"))
+ goto cleanup;
+
+ return skel;
+cleanup:
+ getpeername_unix_prog__destroy(skel);
+ return NULL;
+}
+
+static void getpeername_unix_prog_destroy(void *skel)
+{
+ getpeername_unix_prog__destroy(skel);
+}
+
+static struct sock_addr_test tests[] = {
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix",
+ connect_unix_prog_load,
+ connect_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix",
+ sendmsg_unix_prog_load,
+ sendmsg_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg_unix-dgram",
+ recvmsg_unix_prog_load,
+ recvmsg_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_ADDRESS,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg_unix-stream",
+ recvmsg_unix_prog_load,
+ recvmsg_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_ADDRESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname_unix",
+ getsockname_unix_prog_load,
+ getsockname_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername_unix",
+ getpeername_unix_prog_load,
+ getpeername_unix_prog_destroy,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ },
+};
+
+typedef int (*info_fn)(int, struct sockaddr *, socklen_t *);
+
+static int cmp_addr(const struct sockaddr_storage *addr1, socklen_t addr1_len,
+ const struct sockaddr_storage *addr2, socklen_t addr2_len,
+ bool cmp_port)
+{
+ const struct sockaddr_in *four1, *four2;
+ const struct sockaddr_in6 *six1, *six2;
+ const struct sockaddr_un *un1, *un2;
+
+ if (addr1->ss_family != addr2->ss_family)
+ return -1;
+
+ if (addr1_len != addr2_len)
+ return -1;
+
+ if (addr1->ss_family == AF_INET) {
+ four1 = (const struct sockaddr_in *)addr1;
+ four2 = (const struct sockaddr_in *)addr2;
+ return !((four1->sin_port == four2->sin_port || !cmp_port) &&
+ four1->sin_addr.s_addr == four2->sin_addr.s_addr);
+ } else if (addr1->ss_family == AF_INET6) {
+ six1 = (const struct sockaddr_in6 *)addr1;
+ six2 = (const struct sockaddr_in6 *)addr2;
+ return !((six1->sin6_port == six2->sin6_port || !cmp_port) &&
+ !memcmp(&six1->sin6_addr, &six2->sin6_addr,
+ sizeof(struct in6_addr)));
+ } else if (addr1->ss_family == AF_UNIX) {
+ un1 = (const struct sockaddr_un *)addr1;
+ un2 = (const struct sockaddr_un *)addr2;
+ return memcmp(un1, un2, addr1_len);
+ }
+
+ return -1;
+}
+
+static int cmp_sock_addr(info_fn fn, int sock1,
+ const struct sockaddr_storage *addr2,
+ socklen_t addr2_len, bool cmp_port)
+{
+ struct sockaddr_storage addr1;
+ socklen_t len1 = sizeof(addr1);
+
+ memset(&addr1, 0, len1);
+ if (fn(sock1, (struct sockaddr *)&addr1, (socklen_t *)&len1) != 0)
+ return -1;
+
+ return cmp_addr(&addr1, len1, addr2, addr2_len, cmp_port);
+}
+
+static int cmp_local_addr(int sock1, const struct sockaddr_storage *addr2,
+ socklen_t addr2_len, bool cmp_port)
+{
+ return cmp_sock_addr(getsockname, sock1, addr2, addr2_len, cmp_port);
+}
+
+static int cmp_peer_addr(int sock1, const struct sockaddr_storage *addr2,
+ socklen_t addr2_len, bool cmp_port)
+{
+ return cmp_sock_addr(getpeername, sock1, addr2, addr2_len, cmp_port);
+}
+
+static void test_bind(struct sock_addr_test *test)
+{
+ struct sockaddr_storage expected_addr;
+ socklen_t expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->requested_addr, test->requested_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family,
+ test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_local_addr(serv, &expected_addr, expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+
+ /* Try to connect to server just in case */
+ client = connect_to_addr(&expected_addr, expected_addr_len, test->socket_type);
+ if (!ASSERT_GE(client, 0, "connect_to_addr"))
+ goto cleanup;
+
+cleanup:
+ if (client != -1)
+ close(client);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_connect(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, expected_addr, expected_src_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ expected_addr_len = sizeof(struct sockaddr_storage),
+ expected_src_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->expected_addr, test->expected_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ client = connect_to_addr(&addr, addr_len, test->socket_type);
+ if (!ASSERT_GE(client, 0, "connect_to_addr"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ if (test->expected_src_addr) {
+ err = make_sockaddr(test->socket_family, test->expected_src_addr, 0,
+ &expected_src_addr, &expected_src_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+ }
+
+ err = cmp_peer_addr(client, &expected_addr, expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_peer_addr"))
+ goto cleanup;
+
+ if (test->expected_src_addr) {
+ err = cmp_local_addr(client, &expected_src_addr, expected_src_addr_len, false);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+ }
+cleanup:
+ if (client != -1)
+ close(client);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_xmsg(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, src_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ src_addr_len = sizeof(struct sockaddr_storage);
+ struct msghdr hdr;
+ struct iovec iov;
+ char data = 'a';
+ int serv = -1, client = -1, err;
+
+ /* Unlike the other tests, here we test that we can rewrite the src addr
+ * with a recvmsg() hook.
+ */
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->expected_addr, test->expected_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ client = socket(test->socket_family, test->socket_type, 0);
+ if (!ASSERT_GE(client, 0, "socket"))
+ goto cleanup;
+
+ /* AF_UNIX sockets have to be bound to something to trigger the recvmsg bpf program. */
+ if (test->socket_family == AF_UNIX) {
+ err = make_sockaddr(AF_UNIX, SRCUN_ADDRESS, 0, &src_addr, &src_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = bind(client, (const struct sockaddr *) &src_addr, src_addr_len);
+ if (!ASSERT_OK(err, "bind"))
+ goto cleanup;
+ }
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ if (test->socket_type == SOCK_DGRAM) {
+ memset(&iov, 0, sizeof(iov));
+ iov.iov_base = &data;
+ iov.iov_len = sizeof(data);
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.msg_name = (void *)&addr;
+ hdr.msg_namelen = addr_len;
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+
+ err = sendmsg(client, &hdr, 0);
+ if (!ASSERT_EQ(err, sizeof(data), "sendmsg"))
+ goto cleanup;
+ } else {
+ /* Testing with connection-oriented sockets is only valid for
+ * recvmsg() tests.
+ */
+ if (!ASSERT_EQ(test->type, SOCK_ADDR_TEST_RECVMSG, "recvmsg"))
+ goto cleanup;
+
+ err = connect(client, (const struct sockaddr *)&addr, addr_len);
+ if (!ASSERT_OK(err, "connect"))
+ goto cleanup;
+
+ err = send(client, &data, sizeof(data), 0);
+ if (!ASSERT_EQ(err, sizeof(data), "send"))
+ goto cleanup;
+
+ err = listen(serv, 0);
+ if (!ASSERT_OK(err, "listen"))
+ goto cleanup;
+
+ err = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(err, 0, "accept"))
+ goto cleanup;
+
+ close(serv);
+ serv = err;
+ }
+
+ addr_len = src_addr_len = sizeof(struct sockaddr_storage);
+
+ err = recvfrom(serv, &data, sizeof(data), 0, (struct sockaddr *) &src_addr, &src_addr_len);
+ if (!ASSERT_EQ(err, sizeof(data), "recvfrom"))
+ goto cleanup;
+
+ ASSERT_EQ(data, 'a', "data mismatch");
+
+ if (test->expected_src_addr) {
+ err = make_sockaddr(test->socket_family, test->expected_src_addr, 0,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_addr(&src_addr, src_addr_len, &addr, addr_len, false);
+ if (!ASSERT_EQ(err, 0, "cmp_addr"))
+ goto cleanup;
+ }
+
+cleanup:
+ if (client != -1)
+ close(client);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_getsockname(struct sock_addr_test *test)
+{
+ struct sockaddr_storage expected_addr;
+ socklen_t expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->requested_addr, test->requested_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family,
+ test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_local_addr(serv, &expected_addr, expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+
+cleanup:
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_getpeername(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, expected_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->requested_addr, test->requested_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ client = connect_to_addr(&addr, addr_len, test->socket_type);
+ if (!ASSERT_GE(client, 0, "connect_to_addr"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_peer_addr(client, &expected_addr, expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_peer_addr"))
+ goto cleanup;
+
+cleanup:
+ if (client != -1)
+ close(client);
+ if (serv != -1)
+ close(serv);
+}
+
+void test_sock_addr(void)
+{
+ int cgroup_fd = -1;
+ void *skel;
+
+ cgroup_fd = test__join_cgroup("/sock_addr");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto cleanup;
+
+ for (size_t i = 0; i < ARRAY_SIZE(tests); ++i) {
+ struct sock_addr_test *test = &tests[i];
+
+ if (!test__start_subtest(test->name))
+ continue;
+
+ skel = test->loadfn(cgroup_fd);
+ if (!skel)
+ continue;
+
+ switch (test->type) {
+ /* Not exercised yet but we leave this code here for when the
+ * INET and INET6 sockaddr tests are migrated to this file in
+ * the future.
+ */
+ case SOCK_ADDR_TEST_BIND:
+ test_bind(test);
+ break;
+ case SOCK_ADDR_TEST_CONNECT:
+ test_connect(test);
+ break;
+ case SOCK_ADDR_TEST_SENDMSG:
+ case SOCK_ADDR_TEST_RECVMSG:
+ test_xmsg(test);
+ break;
+ case SOCK_ADDR_TEST_GETSOCKNAME:
+ test_getsockname(test);
+ break;
+ case SOCK_ADDR_TEST_GETPEERNAME:
+ test_getpeername(test);
+ break;
+ default:
+ ASSERT_TRUE(false, "Unknown sock addr test type");
+ break;
+ }
+
+ test->destroyfn(skel);
+ }
+
+cleanup:
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 064cc5e8d9ad..f75f84d0b3d7 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -359,7 +359,7 @@ out:
static void test_sockmap_skb_verdict_shutdown(void)
{
struct epoll_event ev, events[MAX_EVENTS];
- int n, err, map, verdict, s, c1, p1;
+ int n, err, map, verdict, s, c1 = -1, p1 = -1;
struct test_sockmap_pass_prog *skel;
int epollfd;
int zero = 0;
@@ -414,9 +414,9 @@ out:
static void test_sockmap_skb_verdict_fionread(bool pass_prog)
{
int expected, zero = 0, sent, recvd, avail;
- int err, map, verdict, s, c0, c1, p0, p1;
- struct test_sockmap_pass_prog *pass;
- struct test_sockmap_drop_prog *drop;
+ int err, map, verdict, s, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
+ struct test_sockmap_pass_prog *pass = NULL;
+ struct test_sockmap_drop_prog *drop = NULL;
char buf[256] = "0123456789";
if (pass_prog) {
@@ -475,6 +475,55 @@ out:
test_sockmap_drop_prog__destroy(drop);
}
+static void test_sockmap_skb_verdict_peek(void)
+{
+ int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail;
+ struct test_sockmap_pass_prog *pass;
+ char snd[256] = "0123456789";
+ char rcv[256] = "0";
+
+ pass = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(pass, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+ map = bpf_map__fd(pass->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ s = socket_loopback(AF_INET, SOCK_STREAM);
+ if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
+ goto out;
+
+ err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
+ if (!ASSERT_OK(err, "create_pairs(s)"))
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+
+ sent = xsend(p1, snd, sizeof(snd), 0);
+ ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
+ recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
+ recvd = recv(c1, rcv, sizeof(rcv), 0);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
+
+out_close:
+ close(c1);
+ close(p1);
+out:
+ test_sockmap_pass_prog__destroy(pass);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -515,4 +564,6 @@ void test_sockmap_basic(void)
test_sockmap_skb_verdict_fionread(true);
if (test__start_subtest("sockmap skb_verdict fionread on drop"))
test_sockmap_skb_verdict_fionread(false);
+ if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
+ test_sockmap_skb_verdict_peek();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
index 36d829a65aa4..e880f97bc44d 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
@@ -378,7 +378,7 @@ static inline int enable_reuseport(int s, int progfd)
static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
{
struct sockaddr_storage addr;
- socklen_t len;
+ socklen_t len = 0;
int err, s;
init_addr_loopback(family, &addr, &len);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 8df8cbb447f1..a92807bfcd13 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -73,7 +73,7 @@ static void test_insert_bound(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
- socklen_t len;
+ socklen_t len = 0;
u32 key = 0;
u64 value;
int err, s;
@@ -871,7 +871,7 @@ static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,
static void redir_partial(int family, int sotype, int sock_map, int parser_map)
{
- int s, c0, c1, p0, p1;
+ int s, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
int err, n, key, value;
char buf[] = "abc";
@@ -1336,53 +1336,66 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
}
}
-static void unix_redir_to_connected(int sotype, int sock_mapfd,
- int verd_mapfd, enum redir_mode mode)
+static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
+ int sock_mapfd, int nop_mapfd,
+ int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
- int c0, c1, p0, p1;
unsigned int pass;
int err, n;
- int sfd[2];
u32 key;
char b;
zero_verdict_count(verd_mapfd);
- if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
+ err = add_to_sockmap(sock_mapfd, peer0, peer1);
+ if (err)
return;
- c0 = sfd[0], p0 = sfd[1];
- if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
- goto close0;
- c1 = sfd[0], p1 = sfd[1];
-
- err = add_to_sockmap(sock_mapfd, p0, p1);
- if (err)
- goto close;
+ if (nop_mapfd >= 0) {
+ err = add_to_sockmap(nop_mapfd, cli0, cli1);
+ if (err)
+ return;
+ }
- n = write(c1, "a", 1);
+ n = write(cli1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
- goto close;
+ return;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
- goto close;
+ return;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
- n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
+ n = recv_timeout(mode == REDIR_INGRESS ? peer0 : cli0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
+}
+
+static void unix_redir_to_connected(int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ int c0, c1, p0, p1;
+ int sfd[2];
+
+ if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
+ return;
+ c0 = sfd[0], p0 = sfd[1];
+
+ if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
+ goto close0;
+ c1 = sfd[0], p1 = sfd[1];
+
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
-close:
xclose(c1);
xclose(p1);
close0:
@@ -1661,14 +1674,8 @@ close_peer0:
static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
enum redir_mode mode)
{
- const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
- unsigned int pass;
- int err, n;
- u32 key;
- char b;
-
- zero_verdict_count(verd_mapfd);
+ int err;
err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0);
if (err)
@@ -1677,32 +1684,8 @@ static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
if (err)
goto close_cli0;
- err = add_to_sockmap(sock_mapfd, p0, p1);
- if (err)
- goto close_cli1;
-
- n = write(c1, "a", 1);
- if (n < 0)
- FAIL_ERRNO("%s: write", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete write", log_prefix);
- if (n < 1)
- goto close_cli1;
-
- key = SK_PASS;
- err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
- if (err)
- goto close_cli1;
- if (pass != 1)
- FAIL("%s: want pass count 1, have %d", log_prefix, pass);
-
- n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
- if (n < 0)
- FAIL_ERRNO("%s: recv_timeout", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete recv", log_prefix);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
-close_cli1:
xclose(c1);
xclose(p1);
close_cli0:
@@ -1747,15 +1730,9 @@ static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map
static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
- const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
- unsigned int pass;
- int err, n;
int sfd[2];
- u32 key;
- char b;
-
- zero_verdict_count(verd_mapfd);
+ int err;
if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
return;
@@ -1765,32 +1742,8 @@ static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
if (err)
goto close;
- err = add_to_sockmap(sock_mapfd, p0, p1);
- if (err)
- goto close_cli1;
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
- n = write(c1, "a", 1);
- if (n < 0)
- FAIL_ERRNO("%s: write", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete write", log_prefix);
- if (n < 1)
- goto close_cli1;
-
- key = SK_PASS;
- err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
- if (err)
- goto close_cli1;
- if (pass != 1)
- FAIL("%s: want pass count 1, have %d", log_prefix, pass);
-
- n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
- if (n < 0)
- FAIL_ERRNO("%s: recv_timeout", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete recv", log_prefix);
-
-close_cli1:
xclose(c1);
xclose(p1);
close:
@@ -1824,18 +1777,14 @@ static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel,
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
-static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
- int verd_mapfd, enum redir_mode mode)
+static void unix_inet_redir_to_connected(int family, int type,
+ int sock_mapfd, int nop_mapfd,
+ int verd_mapfd,
+ enum redir_mode mode)
{
- const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
- unsigned int pass;
- int err, n;
int sfd[2];
- u32 key;
- char b;
-
- zero_verdict_count(verd_mapfd);
+ int err;
err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0);
if (err)
@@ -1845,32 +1794,9 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
goto close_cli0;
c1 = sfd[0], p1 = sfd[1];
- err = add_to_sockmap(sock_mapfd, p0, p1);
- if (err)
- goto close;
-
- n = write(c1, "a", 1);
- if (n < 0)
- FAIL_ERRNO("%s: write", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete write", log_prefix);
- if (n < 1)
- goto close;
+ pairs_redir_to_connected(c0, p0, c1, p1,
+ sock_mapfd, nop_mapfd, verd_mapfd, mode);
- key = SK_PASS;
- err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
- if (err)
- goto close;
- if (pass != 1)
- FAIL("%s: want pass count 1, have %d", log_prefix, pass);
-
- n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
- if (n < 0)
- FAIL_ERRNO("%s: recv_timeout", log_prefix);
- if (n == 0)
- FAIL("%s: incomplete recv", log_prefix);
-
-close:
xclose(c1);
xclose(p1);
close_cli0:
@@ -1883,6 +1809,7 @@ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int nop_map = bpf_map__fd(skel->maps.nop_map);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1892,14 +1819,32 @@ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
return;
skel->bss->test_ingress = false;
- unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
+ REDIR_EGRESS);
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
+ REDIR_EGRESS);
+
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, nop_map, verdict_map,
REDIR_EGRESS);
- unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
REDIR_EGRESS);
skel->bss->test_ingress = true;
- unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
+ REDIR_INGRESS);
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, -1, verdict_map,
+ REDIR_INGRESS);
+
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, nop_map, verdict_map,
REDIR_INGRESS);
- unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c
index 9e6a5e3ed4de..5a4491d4edfe 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <io_uring/mini_liburing.h>
#include "cgroup_helpers.h"
static char bpf_log_buf[4096];
@@ -38,6 +39,7 @@ static struct sockopt_test {
socklen_t get_optlen_ret;
enum sockopt_test_error error;
+ bool io_uring_support;
} tests[] = {
/* ==================== getsockopt ==================== */
@@ -251,7 +253,9 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ .get_level = SOL_SOCKET,
.get_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: deny bigger ctx->optlen",
@@ -276,6 +280,7 @@ static struct sockopt_test {
.get_optlen = 64,
.error = EFAULT_GETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: ignore >PAGE_SIZE optlen",
@@ -318,6 +323,7 @@ static struct sockopt_test {
.get_optval = {}, /* the changes are ignored */
.get_optlen = PAGE_SIZE + 1,
.error = EOPNOTSUPP_GETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: support smaller ctx->optlen",
@@ -337,8 +343,10 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ .get_level = SOL_SOCKET,
.get_optlen = 64,
.get_optlen_ret = 32,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: deny writing to ctx->optval",
@@ -518,6 +526,7 @@ static struct sockopt_test {
.set_level = 123,
.set_optlen = 1,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: allow changing ctx->level",
@@ -572,6 +581,7 @@ static struct sockopt_test {
.set_optname = 123,
.set_optlen = 1,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: allow changing ctx->optname",
@@ -624,6 +634,7 @@ static struct sockopt_test {
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: ctx->optlen == -1 is ok",
@@ -640,6 +651,7 @@ static struct sockopt_test {
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: deny ctx->optlen < 0 (except -1)",
@@ -658,6 +670,7 @@ static struct sockopt_test {
.set_optlen = 4,
.error = EFAULT_SETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: deny ctx->optlen > input optlen",
@@ -675,6 +688,7 @@ static struct sockopt_test {
.set_optlen = 64,
.error = EFAULT_SETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: ignore >PAGE_SIZE optlen",
@@ -940,7 +954,89 @@ static int load_prog(const struct bpf_insn *insns,
return fd;
}
-static int run_test(int cgroup_fd, struct sockopt_test *test)
+/* Core function that handles io_uring ring initialization,
+ * sending SQE with sockopt command and waiting for the CQE.
+ */
+static int uring_sockopt(int op, int fd, int level, int optname,
+ const void *optval, socklen_t optlen)
+{
+ struct io_uring_cqe *cqe;
+ struct io_uring_sqe *sqe;
+ struct io_uring ring;
+ int err;
+
+ err = io_uring_queue_init(1, &ring, 0);
+ if (!ASSERT_OK(err, "io_uring initialization"))
+ return err;
+
+ sqe = io_uring_get_sqe(&ring);
+ if (!ASSERT_NEQ(sqe, NULL, "Get an SQE")) {
+ err = -1;
+ goto fail;
+ }
+
+ io_uring_prep_cmd(sqe, op, fd, level, optname, optval, optlen);
+
+ err = io_uring_submit(&ring);
+ if (!ASSERT_EQ(err, 1, "Submit SQE"))
+ goto fail;
+
+ err = io_uring_wait_cqe(&ring, &cqe);
+ if (!ASSERT_OK(err, "Wait for CQE"))
+ goto fail;
+
+ err = cqe->res;
+
+fail:
+ io_uring_queue_exit(&ring);
+
+ return err;
+}
+
+static int uring_setsockopt(int fd, int level, int optname, const void *optval,
+ socklen_t optlen)
+{
+ return uring_sockopt(SOCKET_URING_OP_SETSOCKOPT, fd, level, optname,
+ optval, optlen);
+}
+
+static int uring_getsockopt(int fd, int level, int optname, void *optval,
+ socklen_t *optlen)
+{
+ int ret = uring_sockopt(SOCKET_URING_OP_GETSOCKOPT, fd, level, optname,
+ optval, *optlen);
+ if (ret < 0)
+ return ret;
+
+ /* Populate optlen back to be compatible with systemcall interface,
+ * and simplify the test.
+ */
+ *optlen = ret;
+
+ return 0;
+}
+
+/* Execute the setsocktopt operation */
+static int call_setsockopt(bool use_io_uring, int fd, int level, int optname,
+ const void *optval, socklen_t optlen)
+{
+ if (use_io_uring)
+ return uring_setsockopt(fd, level, optname, optval, optlen);
+
+ return setsockopt(fd, level, optname, optval, optlen);
+}
+
+/* Execute the getsocktopt operation */
+static int call_getsockopt(bool use_io_uring, int fd, int level, int optname,
+ void *optval, socklen_t *optlen)
+{
+ if (use_io_uring)
+ return uring_getsockopt(fd, level, optname, optval, optlen);
+
+ return getsockopt(fd, level, optname, optval, optlen);
+}
+
+static int run_test(int cgroup_fd, struct sockopt_test *test, bool use_io_uring)
{
int sock_fd, err, prog_fd;
void *optval = NULL;
@@ -980,8 +1076,9 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
}
- err = setsockopt(sock_fd, test->set_level, test->set_optname,
- test->set_optval, test->set_optlen);
+ err = call_setsockopt(use_io_uring, sock_fd, test->set_level,
+ test->set_optname, test->set_optval,
+ test->set_optlen);
if (err) {
if (errno == EPERM && test->error == EPERM_SETSOCKOPT)
goto close_sock_fd;
@@ -1008,8 +1105,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
socklen_t expected_get_optlen = test->get_optlen_ret ?:
test->get_optlen;
- err = getsockopt(sock_fd, test->get_level, test->get_optname,
- optval, &optlen);
+ err = call_getsockopt(use_io_uring, sock_fd, test->get_level,
+ test->get_optname, optval, &optlen);
if (err) {
if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT)
goto free_optval;
@@ -1063,7 +1160,11 @@ void test_sockopt(void)
if (!test__start_subtest(tests[i].descr))
continue;
- ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
+ ASSERT_OK(run_test(cgroup_fd, &tests[i], false),
+ tests[i].descr);
+ if (tests[i].io_uring_support)
+ ASSERT_OK(run_test(cgroup_fd, &tests[i], true),
+ tests[i].descr);
}
close(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 58fe2c586ed7..59993fc9c0d7 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
#include <test_progs.h>
#include <network_helpers.h>
+#include "tailcall_poke.skel.h"
+
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -218,12 +221,14 @@ out:
bpf_object__close(obj);
}
-static void test_tailcall_count(const char *which)
+static void test_tailcall_count(const char *which, bool test_fentry,
+ bool test_fexit)
{
+ struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
+ struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
- struct bpf_object *obj;
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
@@ -265,23 +270,105 @@ static void test_tailcall_count(const char *which)
if (CHECK_FAIL(err))
goto out;
+ if (test_fentry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+ }
+
+ if (test_fexit) {
+ fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
+ if (!ASSERT_OK_PTR(prog, "find fexit prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fexit_obj);
+ if (!ASSERT_OK(err, "load fexit_obj"))
+ goto out;
+
+ fexit_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
+ goto out;
+ }
+
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "tailcall count");
ASSERT_EQ(val, 33, "tailcall count");
+ if (test_fentry) {
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 33, "fentry count");
+ }
+
+ if (test_fexit) {
+ data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fexit.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fexit.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 33, "fexit count");
+ }
+
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
@@ -291,6 +378,10 @@ static void test_tailcall_count(const char *which)
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
out:
+ bpf_link__destroy(fentry_link);
+ bpf_link__destroy(fexit_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(fexit_obj);
bpf_object__close(obj);
}
@@ -299,7 +390,7 @@ out:
*/
static void test_tailcall_3(void)
{
- test_tailcall_count("tailcall3.bpf.o");
+ test_tailcall_count("tailcall3.bpf.o", false, false);
}
/* test_tailcall_6 checks that the count value of the tail call limit
@@ -307,7 +398,7 @@ static void test_tailcall_3(void)
*/
static void test_tailcall_6(void)
{
- test_tailcall_count("tailcall6.bpf.o");
+ test_tailcall_count("tailcall6.bpf.o", false, false);
}
/* test_tailcall_4 checks that the kernel properly selects indirect jump
@@ -352,11 +443,11 @@ static void test_tailcall_4(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
@@ -442,11 +533,11 @@ static void test_tailcall_5(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
@@ -631,11 +722,11 @@ static void test_tailcall_bpf2bpf_2(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
@@ -805,11 +896,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
val.noise = noise;
@@ -872,7 +963,7 @@ static void test_tailcall_bpf2bpf_6(void)
ASSERT_EQ(topts.retval, 0, "tailcall retval");
data_fd = bpf_map__fd(obj->maps.bss);
- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
+ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
goto out;
i = 0;
@@ -884,6 +975,218 @@ out:
tailcall_bpf2bpf6__destroy(obj);
}
+/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
+}
+
+/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fexit.
+ */
+static void test_tailcall_bpf2bpf_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
+ */
+static void test_tailcall_bpf2bpf_fentry_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry_entry(void)
+{
+ struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
+ int err, map_fd, prog_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_link *fentry_link = NULL;
+ struct bpf_program *prog;
+ char buff[128] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
+ BPF_PROG_TYPE_SCHED_CLS,
+ &tgt_obj, &prog_fd);
+ if (!ASSERT_OK(err, "load tgt_obj"))
+ return;
+
+ prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
+ if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
+ if (!ASSERT_OK(err, "set_attach_target classifier_0"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 34, "tailcall count");
+
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 1, "fentry count");
+
+out:
+ bpf_link__destroy(fentry_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(tgt_obj);
+}
+
+#define JMP_TABLE "/sys/fs/bpf/jmp_table"
+
+static int poke_thread_exit;
+
+static void *poke_update(void *arg)
+{
+ __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
+ struct tailcall_poke *call = arg;
+
+ map_fd = bpf_map__fd(call->maps.jmp_table);
+ prog1_fd = bpf_program__fd(call->progs.call1);
+ prog2_fd = bpf_program__fd(call->progs.call2);
+
+ while (!poke_thread_exit) {
+ bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
+ bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
+ }
+
+ return NULL;
+}
+
+/*
+ * We are trying to hit prog array update during another program load
+ * that shares the same prog array map.
+ *
+ * For that we share the jmp_table map between two skeleton instances
+ * by pinning the jmp_table to same path. Then first skeleton instance
+ * periodically updates jmp_table in 'poke update' thread while we load
+ * the second skeleton instance in the main thread.
+ */
+static void test_tailcall_poke(void)
+{
+ struct tailcall_poke *call, *test;
+ int err, cnt = 10;
+ pthread_t thread;
+
+ unlink(JMP_TABLE);
+
+ call = tailcall_poke__open_and_load();
+ if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
+ return;
+
+ err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin"))
+ goto out;
+
+ err = pthread_create(&thread, NULL, poke_update, call);
+ if (!ASSERT_OK(err, "new toggler"))
+ goto out;
+
+ while (cnt--) {
+ test = tailcall_poke__open();
+ if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
+ break;
+
+ err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin")) {
+ tailcall_poke__destroy(test);
+ break;
+ }
+
+ bpf_program__set_autoload(test->progs.test, true);
+ bpf_program__set_autoload(test->progs.call1, false);
+ bpf_program__set_autoload(test->progs.call2, false);
+
+ err = tailcall_poke__load(test);
+ tailcall_poke__destroy(test);
+ if (!ASSERT_OK(err, "tailcall_poke__load"))
+ break;
+ }
+
+ poke_thread_exit = 1;
+ ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
+
+out:
+ bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
+ tailcall_poke__destroy(call);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -910,4 +1213,14 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_4(true);
if (test__start_subtest("tailcall_bpf2bpf_6"))
test_tailcall_bpf2bpf_6();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry"))
+ test_tailcall_bpf2bpf_fentry();
+ if (test__start_subtest("tailcall_bpf2bpf_fexit"))
+ test_tailcall_bpf2bpf_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
+ test_tailcall_bpf2bpf_fentry_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
+ test_tailcall_bpf2bpf_fentry_entry();
+ if (test__start_subtest("tailcall_poke"))
+ test_tailcall_poke();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
index 4224727fb364..626d76fe43a2 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
@@ -30,8 +30,15 @@ void test_task_under_cgroup(void)
if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
goto cleanup;
- ret = test_task_under_cgroup__attach(skel);
- if (!ASSERT_OK(ret, "test_task_under_cgroup__attach"))
+ /* First, attach the LSM program, and then it will be triggered when the
+ * TP_BTF program is attached.
+ */
+ skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(skel->links.lsm_run, "attach_lsm"))
+ goto cleanup;
+
+ skel->links.tp_btf_run = bpf_program__attach_trace(skel->progs.tp_btf_run);
+ if (!ASSERT_OK_PTR(skel->links.tp_btf_run, "attach_tp_btf"))
goto cleanup;
pid = fork();
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
index 6c93215be8a3..924d0e25320c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
@@ -4,6 +4,10 @@
#define TC_HELPERS
#include <test_progs.h>
+#ifndef loopback
+# define loopback 1
+#endif
+
static inline __u32 id_from_prog_fd(int fd)
{
struct bpf_prog_info prog_info = {};
@@ -45,7 +49,7 @@ static inline __u32 ifindex_from_link_fd(int fd)
return link_info.tcx.ifindex;
}
-static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex)
+static inline void __assert_mprog_count(int target, int expected, int ifindex)
{
__u32 count = 0, attach_flags = 0;
int err;
@@ -53,20 +57,22 @@ static inline void __assert_mprog_count(int target, int expected, bool miniq, in
err = bpf_prog_query(ifindex, target, 0, &attach_flags,
NULL, &count);
ASSERT_EQ(count, expected, "count");
- if (!expected && !miniq)
- ASSERT_EQ(err, -ENOENT, "prog_query");
- else
- ASSERT_EQ(err, 0, "prog_query");
+ ASSERT_EQ(err, 0, "prog_query");
}
static inline void assert_mprog_count(int target, int expected)
{
- __assert_mprog_count(target, expected, false, loopback);
+ __assert_mprog_count(target, expected, loopback);
}
static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
{
- __assert_mprog_count(target, expected, false, ifindex);
+ __assert_mprog_count(target, expected, ifindex);
+}
+
+static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
+{
+ memset(skel->bss, 0, sizeof(*skel->bss));
}
#endif /* TC_HELPERS */
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
index 74fc1fe9ee26..bc9841144685 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
@@ -65,6 +65,7 @@ void serial_test_tc_links_basic(void)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -97,6 +98,7 @@ void serial_test_tc_links_basic(void)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -187,6 +189,7 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -194,9 +197,6 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -246,6 +246,7 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -342,6 +343,7 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -349,9 +351,6 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
.relative_fd = bpf_program__fd(skel->progs.tc1),
@@ -401,6 +400,7 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -502,6 +502,7 @@ static void test_tc_links_revision_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -581,22 +582,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -707,16 +706,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_REPLACE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -781,16 +777,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
@@ -812,16 +805,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
if (!ASSERT_OK(err, "link_update_self"))
goto cleanup;
@@ -843,6 +833,7 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1254,6 +1245,7 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1261,9 +1253,6 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
);
@@ -1311,6 +1300,7 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1411,6 +1401,7 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1418,9 +1409,6 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
);
@@ -1468,6 +1456,7 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1637,38 +1626,33 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
if (!ASSERT_OK(err, "link_update"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
err = bpf_link__detach(skel->links.tc6);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -1758,22 +1742,20 @@ static void test_tc_links_ingress(int target, bool chain_tc_old,
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
new file mode 100644
index 000000000000..15ee7b2fc410
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <uapi/linux/if_link.h>
+#include <net/if.h>
+#include <test_progs.h>
+
+#define netkit_peer "nk0"
+#define netkit_name "nk1"
+
+#define ping_addr_neigh 0x0a000002 /* 10.0.0.2 */
+#define ping_addr_noneigh 0x0a000003 /* 10.0.0.3 */
+
+#include "test_tc_link.skel.h"
+#include "netlink_helpers.h"
+#include "tc_helpers.h"
+
+#define ICMP_ECHO 8
+
+struct icmphdr {
+ __u8 type;
+ __u8 code;
+ __sum16 checksum;
+ struct {
+ __be16 id;
+ __be16 sequence;
+ } echo;
+};
+
+struct iplink_req {
+ struct nlmsghdr n;
+ struct ifinfomsg i;
+ char buf[1024];
+};
+
+static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
+ bool same_netns)
+{
+ struct rtnl_handle rth = { .fd = -1 };
+ struct iplink_req req = {};
+ struct rtattr *linkinfo, *data;
+ const char *type = "netkit";
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ req.n.nlmsg_type = RTM_NEWLINK;
+ req.i.ifi_family = AF_UNSPEC;
+
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, netkit_name,
+ strlen(netkit_name));
+ linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO);
+ addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type));
+ data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_POLICY, policy);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_POLICY, peer_policy);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode);
+ addattr_nest_end(&req.n, data);
+ addattr_nest_end(&req.n, linkinfo);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ *ifindex = if_nametoindex(netkit_name);
+
+ ASSERT_GT(*ifindex, 0, "retrieve_ifindex");
+ ASSERT_OK(system("ip netns add foo"), "create netns");
+ ASSERT_OK(system("ip link set dev " netkit_name " up"),
+ "up primary");
+ ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
+ "addr primary");
+ if (same_netns) {
+ ASSERT_OK(system("ip link set dev " netkit_peer " up"),
+ "up peer");
+ ASSERT_OK(system("ip addr add dev " netkit_peer " 10.0.0.2/24"),
+ "addr peer");
+ } else {
+ ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+ "move peer");
+ ASSERT_OK(system("ip netns exec foo ip link set dev "
+ netkit_peer " up"), "up peer");
+ ASSERT_OK(system("ip netns exec foo ip addr add dev "
+ netkit_peer " 10.0.0.2/24"), "addr peer");
+ }
+ return err;
+}
+
+static void destroy_netkit(void)
+{
+ ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
+ ASSERT_OK(system("ip netns del foo"), "delete netns");
+ ASSERT_EQ(if_nametoindex(netkit_name), 0, netkit_name "_ifindex");
+}
+
+static int __send_icmp(__u32 dest)
+{
+ struct sockaddr_in addr;
+ struct icmphdr icmp;
+ int sock, ret;
+
+ ret = write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0");
+ if (!ASSERT_OK(ret, "write_sysctl(net.ipv4.ping_group_range)"))
+ return ret;
+
+ sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);
+ if (!ASSERT_GE(sock, 0, "icmp_socket"))
+ return -errno;
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
+ netkit_name, strlen(netkit_name) + 1);
+ if (!ASSERT_OK(ret, "setsockopt(SO_BINDTODEVICE)"))
+ goto out;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(dest);
+
+ memset(&icmp, 0, sizeof(icmp));
+ icmp.type = ICMP_ECHO;
+ icmp.echo.id = 1234;
+ icmp.echo.sequence = 1;
+
+ ret = sendto(sock, &icmp, sizeof(icmp), 0,
+ (struct sockaddr *)&addr, sizeof(addr));
+ if (!ASSERT_GE(ret, 0, "icmp_sendto"))
+ ret = -errno;
+ else
+ ret = 0;
+out:
+ close(sock);
+ return ret;
+}
+
+static int send_icmp(void)
+{
+ return __send_icmp(ping_addr_neigh);
+}
+
+void serial_test_tc_netkit_basic(void)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, false);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ BPF_NETKIT_PEER), 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+ ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PEER, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+ destroy_netkit();
+}
+
+static void serial_test_tc_netkit_multi_links_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[3], link_ids[3];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, false);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ target), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ target), 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+ ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_multi_links(void)
+{
+ serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PEER);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PEER);
+}
+
+static void serial_test_tc_netkit_multi_opts_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 pid1, pid2, fd1, fd2;
+ __u32 prog_ids[3];
+ struct test_tc_link *skel;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, false);
+ if (err)
+ return;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+
+ pid1 = id_from_prog_fd(fd1);
+ pid2 = id_from_prog_fd(fd2);
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_fd1;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_fd1;
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_fd2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+
+cleanup_fd2:
+ err = bpf_prog_detach_opts(fd2, ifindex, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count_ifindex(ifindex, target, 1);
+cleanup_fd1:
+ err = bpf_prog_detach_opts(fd1, ifindex, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count_ifindex(ifindex, target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_multi_opts(void)
+{
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PEER);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PEER);
+}
+
+void serial_test_tc_netkit_device(void)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, pid2, lid1;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex, ifindex2;
+
+ err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, true);
+ if (err)
+ return;
+
+ ifindex2 = if_nametoindex(netkit_peer);
+ ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ BPF_NETKIT_PEER), 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3,
+ BPF_NETKIT_PRIMARY), 0, "tc3_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PRIMARY, &optq);
+ ASSERT_EQ(err, -EACCES, "prog_query_should_fail");
+
+ err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PEER, &optq);
+ ASSERT_EQ(err, -EACCES, "prog_query_should_fail");
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex2, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_netkit(skel->progs.tc3, ifindex2, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+ destroy_netkit();
+}
+
+static void serial_test_tc_netkit_neigh_links_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, lid1;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, false);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(__send_icmp(ping_addr_noneigh), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true /* L2: ARP */, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, mode == NETKIT_L3, "seen_eth");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_neigh_links(void)
+{
+ serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index 7a2ecd4eca5d..51883ccb8020 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -59,6 +59,7 @@ void serial_test_tc_opts_basic(void)
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -83,6 +84,7 @@ void serial_test_tc_opts_basic(void)
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -163,6 +165,7 @@ static void test_tc_opts_before_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -219,6 +222,7 @@ static void test_tc_opts_before_target(int target)
ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -313,6 +317,7 @@ static void test_tc_opts_after_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -369,6 +374,7 @@ static void test_tc_opts_after_target(int target)
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -514,6 +520,7 @@ static void test_tc_opts_revision_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -608,22 +615,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup_detach;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -635,7 +640,7 @@ cleanup_detach:
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
- __assert_mprog_count(target, 0, chain_tc_old, loopback);
+ assert_mprog_count(target, 0);
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
@@ -730,16 +735,13 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd2,
@@ -767,16 +769,13 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE | BPF_F_BEFORE,
.replace_prog_fd = fd3,
@@ -805,6 +804,7 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1084,6 +1084,7 @@ static void test_tc_opts_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1124,6 +1125,7 @@ static void test_tc_opts_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1222,6 +1224,7 @@ static void test_tc_opts_append_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1262,6 +1265,7 @@ static void test_tc_opts_append_target(int target)
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -2250,7 +2254,7 @@ static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
ASSERT_OK(err, "bpf_tc_hook_create");
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
}
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
@@ -2316,16 +2320,13 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd3,
@@ -2339,21 +2340,19 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
cleanup_opts:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -2378,3 +2377,442 @@ void serial_test_tc_opts_chain_mixed(void)
test_tc_chain_mixed(BPF_TCX_INGRESS);
test_tc_chain_mixed(BPF_TCX_EGRESS);
}
+
+static int generate_dummy_prog(void)
+{
+ const struct bpf_insn prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 256;
+ char *log_buf;
+ int fd = -1;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ return fd;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL",
+ prog_insns, prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "log_0");
+ ASSERT_GE(fd, 0, "prog_fd");
+ free(log_buf);
+ return fd;
+}
+
+static void test_tc_opts_max_target(int target, int flags, bool relative)
+{
+ int err, ifindex, i, prog_fd, last_fd = -1;
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ const int max_progs = 63;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ for (i = 0; i < max_progs; i++) {
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+ assert_mprog_count_ifindex(ifindex, target, i + 1);
+ if (i == max_progs - 1 && relative)
+ last_fd = prog_fd;
+ else
+ close(prog_fd);
+ }
+
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ opta.flags = flags;
+ if (last_fd > 0)
+ opta.relative_fd = last_fd;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_64_attach");
+ assert_mprog_count_ifindex(ifindex, target, max_progs);
+ close(prog_fd);
+cleanup:
+ if (last_fd > 0)
+ close(last_fd);
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void serial_test_tc_opts_max(void)
+{
+ test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
+}
+
+static void test_tc_opts_query_target(int target)
+{
+ const size_t attr_size = offsetofend(union bpf_attr, query);
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ union bpf_attr attr;
+ __u32 prog_ids[10];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ /* Test 1: Double query via libbpf API */
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ /* Test 2: Double query via bpf_attr & bpf(2) directly */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 3: Query with smaller prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 2;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail");
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 4: Query with larger prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 10;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 5: Query with NULL prog_ids array but with count > 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.count = sizeof(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 6: Query with non-NULL prog_ids array but with count == 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 7: Query with invalid flags */
+ attr.query.attach_flags = 0;
+ attr.query.query_flags = 1;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
+ attr.query.attach_flags = 1;
+ attr.query.query_flags = 0;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query(void)
+{
+ test_tc_opts_query_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_query_attach_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ struct test_tc_link *skel;
+ __u32 prog_ids[2];
+ __u32 fd1, id1;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ id1 = id_from_prog_fd(fd1);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 0, "count");
+ ASSERT_EQ(optq.revision, 1, "revision");
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = optq.revision,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup1;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query_attach(void)
+{
+ test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 6ee22c3b251a..518f143c5b0f 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -24,6 +24,7 @@
#include "test_progs.h"
#include "network_helpers.h"
+#include "netlink_helpers.h"
#include "test_tc_neigh_fib.skel.h"
#include "test_tc_neigh.skel.h"
#include "test_tc_peer.skel.h"
@@ -110,11 +111,17 @@ static void netns_setup_namespaces_nofail(const char *verb)
}
}
+enum dev_mode {
+ MODE_VETH,
+ MODE_NETKIT,
+};
+
struct netns_setup_result {
- int ifindex_veth_src;
- int ifindex_veth_src_fwd;
- int ifindex_veth_dst;
- int ifindex_veth_dst_fwd;
+ enum dev_mode dev_mode;
+ int ifindex_src;
+ int ifindex_src_fwd;
+ int ifindex_dst;
+ int ifindex_dst_fwd;
};
static int get_ifaddr(const char *name, char *ifaddr)
@@ -137,58 +144,110 @@ static int get_ifaddr(const char *name, char *ifaddr)
return 0;
}
+static int create_netkit(int mode, char *prim, char *peer)
+{
+ struct rtattr *linkinfo, *data, *peer_info;
+ struct rtnl_handle rth = { .fd = -1 };
+ const char *type = "netkit";
+ struct {
+ struct nlmsghdr n;
+ struct ifinfomsg i;
+ char buf[1024];
+ } req = {};
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ req.n.nlmsg_type = RTM_NEWLINK;
+ req.i.ifi_family = AF_UNSPEC;
+
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, prim, strlen(prim));
+ linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO);
+ addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type));
+ data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode);
+ peer_info = addattr_nest(&req.n, sizeof(req), IFLA_NETKIT_PEER_INFO);
+ req.n.nlmsg_len += sizeof(struct ifinfomsg);
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, peer, strlen(peer));
+ addattr_nest_end(&req.n, peer_info);
+ addattr_nest_end(&req.n, data);
+ addattr_nest_end(&req.n, linkinfo);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ return err;
+}
+
static int netns_setup_links_and_routes(struct netns_setup_result *result)
{
struct nstoken *nstoken = NULL;
- char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
-
- SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd");
- SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd");
+ char src_fwd_addr[IFADDR_STR_LEN+1] = {};
+ int err;
- SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD);
- SYS(fail, "ip link set veth_dst address " MAC_DST);
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip link add src type veth peer name src_fwd");
+ SYS(fail, "ip link add dst type veth peer name dst_fwd");
+
+ SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD);
+ SYS(fail, "ip link set dst address " MAC_DST);
+ } else if (result->dev_mode == MODE_NETKIT) {
+ err = create_netkit(NETKIT_L3, "src", "src_fwd");
+ if (!ASSERT_OK(err, "create_ifindex_src"))
+ goto fail;
+ err = create_netkit(NETKIT_L3, "dst", "dst_fwd");
+ if (!ASSERT_OK(err, "create_ifindex_dst"))
+ goto fail;
+ }
- if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
+ if (get_ifaddr("src_fwd", src_fwd_addr))
goto fail;
- result->ifindex_veth_src = if_nametoindex("veth_src");
- if (!ASSERT_GT(result->ifindex_veth_src, 0, "ifindex_veth_src"))
+ result->ifindex_src = if_nametoindex("src");
+ if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
goto fail;
- result->ifindex_veth_src_fwd = if_nametoindex("veth_src_fwd");
- if (!ASSERT_GT(result->ifindex_veth_src_fwd, 0, "ifindex_veth_src_fwd"))
+ result->ifindex_src_fwd = if_nametoindex("src_fwd");
+ if (!ASSERT_GT(result->ifindex_src_fwd, 0, "ifindex_src_fwd"))
goto fail;
- result->ifindex_veth_dst = if_nametoindex("veth_dst");
- if (!ASSERT_GT(result->ifindex_veth_dst, 0, "ifindex_veth_dst"))
+ result->ifindex_dst = if_nametoindex("dst");
+ if (!ASSERT_GT(result->ifindex_dst, 0, "ifindex_dst"))
goto fail;
- result->ifindex_veth_dst_fwd = if_nametoindex("veth_dst_fwd");
- if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd"))
+ result->ifindex_dst_fwd = if_nametoindex("dst_fwd");
+ if (!ASSERT_GT(result->ifindex_dst_fwd, 0, "ifindex_dst_fwd"))
goto fail;
- SYS(fail, "ip link set veth_src netns " NS_SRC);
- SYS(fail, "ip link set veth_src_fwd netns " NS_FWD);
- SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD);
- SYS(fail, "ip link set veth_dst netns " NS_DST);
+ SYS(fail, "ip link set src netns " NS_SRC);
+ SYS(fail, "ip link set src_fwd netns " NS_FWD);
+ SYS(fail, "ip link set dst_fwd netns " NS_FWD);
+ SYS(fail, "ip link set dst netns " NS_DST);
/** setup in 'src' namespace */
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto fail;
- SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src");
- SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad");
- SYS(fail, "ip link set dev veth_src up");
+ SYS(fail, "ip addr add " IP4_SRC "/32 dev src");
+ SYS(fail, "ip addr add " IP6_SRC "/128 dev src nodad");
+ SYS(fail, "ip link set dev src up");
- SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global");
- SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global");
- SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global");
+ SYS(fail, "ip route add " IP4_DST "/32 dev src scope global");
+ SYS(fail, "ip route add " IP4_NET "/16 dev src scope global");
+ SYS(fail, "ip route add " IP6_DST "/128 dev src scope global");
- SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s",
- veth_src_fwd_addr);
- SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s",
- veth_src_fwd_addr);
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_DST " dev src lladdr %s",
+ src_fwd_addr);
+ SYS(fail, "ip neigh add " IP6_DST " dev src lladdr %s",
+ src_fwd_addr);
+ }
close_netns(nstoken);
@@ -201,15 +260,15 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
* needs v4 one in order to start ARP probing. IP4_NET route is added
* to the endpoints so that the ARP processing will reply.
*/
- SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd");
- SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
- SYS(fail, "ip link set dev veth_src_fwd up");
- SYS(fail, "ip link set dev veth_dst_fwd up");
+ SYS(fail, "ip addr add " IP4_SLL "/32 dev src_fwd");
+ SYS(fail, "ip addr add " IP4_DLL "/32 dev dst_fwd");
+ SYS(fail, "ip link set dev src_fwd up");
+ SYS(fail, "ip link set dev dst_fwd up");
- SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
- SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
- SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
- SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
+ SYS(fail, "ip route add " IP4_SRC "/32 dev src_fwd scope global");
+ SYS(fail, "ip route add " IP6_SRC "/128 dev src_fwd scope global");
+ SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
+ SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
close_netns(nstoken);
@@ -218,16 +277,18 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
if (!ASSERT_OK_PTR(nstoken, "setns dst"))
goto fail;
- SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst");
- SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad");
- SYS(fail, "ip link set dev veth_dst up");
+ SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
+ SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
+ SYS(fail, "ip link set dev dst up");
- SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global");
- SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global");
- SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global");
+ SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
+ SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
+ SYS(fail, "ip route add " IP6_SRC "/128 dev dst scope global");
- SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
- SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev dst lladdr " MAC_DST_FWD);
+ SYS(fail, "ip neigh add " IP6_SRC " dev dst lladdr " MAC_DST_FWD);
+ }
close_netns(nstoken);
@@ -293,23 +354,23 @@ static int netns_load_bpf(const struct bpf_program *src_prog,
const struct bpf_program *chk_prog,
const struct netns_setup_result *setup_result)
{
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
int err;
- /* tc qdisc add dev veth_src_fwd clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
- /* tc filter add dev veth_src_fwd ingress bpf da src_prog */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, src_prog, 0);
- /* tc filter add dev veth_src_fwd egress bpf da chk_prog */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
+ /* tc qdisc add dev src_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
+ /* tc filter add dev src_fwd ingress bpf da src_prog */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, src_prog, 0);
+ /* tc filter add dev src_fwd egress bpf da chk_prog */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
- /* tc qdisc add dev veth_dst_fwd clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
- /* tc filter add dev veth_dst_fwd ingress bpf da dst_prog */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
- /* tc filter add dev veth_dst_fwd egress bpf da chk_prog */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress bpf da dst_prog */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
+ /* tc filter add dev dst_fwd egress bpf da chk_prog */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
return 0;
fail:
@@ -539,10 +600,10 @@ done:
static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
const struct netns_setup_result *setup_result)
{
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src);
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst);
struct nstoken *nstoken;
int err;
@@ -550,58 +611,58 @@ static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
return -1;
- /* tc qdisc add dev veth_src clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_src, setup_result->ifindex_veth_src);
- /* tc filter add dev veth_src ingress bpf da ingress_host */
- XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
- /* tc filter add dev veth_src egress bpf da egress_host */
- XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ /* tc qdisc add dev src clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src, setup_result->ifindex_src);
+ /* tc filter add dev src ingress bpf da ingress_host */
+ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+ /* tc filter add dev src egress bpf da egress_host */
+ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
close_netns(nstoken);
/* setup ns_dst tc progs */
nstoken = open_netns(NS_DST);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST))
return -1;
- /* tc qdisc add dev veth_dst clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_dst, setup_result->ifindex_veth_dst);
- /* tc filter add dev veth_dst ingress bpf da ingress_host */
- XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
- /* tc filter add dev veth_dst egress bpf da egress_host */
- XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ /* tc qdisc add dev dst clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst, setup_result->ifindex_dst);
+ /* tc filter add dev dst ingress bpf da ingress_host */
+ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+ /* tc filter add dev dst egress bpf da egress_host */
+ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
close_netns(nstoken);
/* setup ns_fwd tc progs */
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
return -1;
- /* tc qdisc add dev veth_dst_fwd clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
- /* tc filter add dev veth_dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio100, 100);
- /* tc filter add dev veth_dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
+ /* tc filter add dev dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio101, 101);
- /* tc filter add dev veth_dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
+ /* tc filter add dev dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio100, 100);
- /* tc filter add dev veth_dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
+ /* tc filter add dev dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio101, 101);
- /* tc qdisc add dev veth_src_fwd clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
- /* tc filter add dev veth_src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
+ /* tc qdisc add dev src_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
+ /* tc filter add dev src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio100, 100);
- /* tc filter add dev veth_src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
+ /* tc filter add dev src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio101, 101);
- /* tc filter add dev veth_src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
+ /* tc filter add dev src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio100, 100);
- /* tc filter add dev veth_src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
+ /* tc filter add dev src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio101, 101);
close_netns(nstoken);
return 0;
@@ -777,8 +838,8 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
return;
- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
err = test_tc_dtime__load(skel);
if (!ASSERT_OK(err, "test_tc_dtime__load"))
@@ -868,8 +929,8 @@ static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
goto done;
- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
err = test_tc_neigh__load(skel);
if (!ASSERT_OK(err, "test_tc_neigh__load"))
@@ -904,8 +965,8 @@ static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
goto done;
- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
err = test_tc_peer__load(skel);
if (!ASSERT_OK(err, "test_tc_peer__load"))
@@ -996,7 +1057,7 @@ static int tun_relay_loop(int src_fd, int target_fd)
static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd);
- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
struct test_tc_peer *skel = NULL;
struct nstoken *nstoken = NULL;
int err;
@@ -1045,7 +1106,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
goto fail;
skel->rodata->IFINDEX_SRC = ifindex;
- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
err = test_tc_peer__load(skel);
if (!ASSERT_OK(err, "test_tc_peer__load"))
@@ -1053,19 +1114,19 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
/* Load "tc_src_l3" to the tun_fwd interface to redirect packets
* towards dst, and "tc_dst" to redirect packets
- * and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
+ * and "tc_chk" on dst_fwd to drop non-redirected packets.
*/
/* tc qdisc add dev tun_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex);
/* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */
XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0);
- /* tc qdisc add dev veth_dst_fwd clsact */
- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
- /* tc filter add dev veth_dst_fwd ingress bpf da tc_dst_l3 */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
- /* tc filter add dev veth_dst_fwd egress bpf da tc_chk */
- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress bpf da tc_dst_l3 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
+ /* tc filter add dev dst_fwd egress bpf da tc_chk */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
/* Setup route and neigh tables */
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
@@ -1074,17 +1135,17 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
- SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
+ SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev src scope global");
SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
" dev tun_src scope global");
- SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
- SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
+ SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev dst scope global");
+ SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev src scope global");
SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
" dev tun_src scope global");
- SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
+ SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev dst scope global");
- SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
- SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
+ SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
goto fail;
@@ -1106,9 +1167,9 @@ fail:
close_netns(nstoken);
}
-#define RUN_TEST(name) \
+#define RUN_TEST(name, mode) \
({ \
- struct netns_setup_result setup_result; \
+ struct netns_setup_result setup_result = { .dev_mode = mode, }; \
if (test__start_subtest(#name)) \
if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
@@ -1122,11 +1183,13 @@ static void *test_tc_redirect_run_tests(void *arg)
{
netns_setup_namespaces_nofail("delete");
- RUN_TEST(tc_redirect_peer);
- RUN_TEST(tc_redirect_peer_l3);
- RUN_TEST(tc_redirect_neigh);
- RUN_TEST(tc_redirect_neigh_fib);
- RUN_TEST(tc_redirect_dtime);
+ RUN_TEST(tc_redirect_peer, MODE_VETH);
+ RUN_TEST(tc_redirect_peer, MODE_NETKIT);
+ RUN_TEST(tc_redirect_peer_l3, MODE_VETH);
+ RUN_TEST(tc_redirect_peer_l3, MODE_NETKIT);
+ RUN_TEST(tc_redirect_neigh, MODE_VETH);
+ RUN_TEST(tc_redirect_neigh_fib, MODE_VETH);
+ RUN_TEST(tc_redirect_dtime, MODE_VETH);
return NULL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c
new file mode 100644
index 000000000000..d3491a84b3b9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <bpf/btf.h>
+#include <test_progs.h>
+
+#include "test_bpf_ma.skel.h"
+
+static void do_bpf_ma_test(const char *name)
+{
+ struct test_bpf_ma *skel;
+ struct bpf_program *prog;
+ struct btf *btf;
+ int i, err;
+
+ skel = test_bpf_ma__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ btf = bpf_object__btf(skel->obj);
+ if (!ASSERT_OK_PTR(btf, "btf"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) {
+ char name[32];
+ int id;
+
+ snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]);
+ id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT);
+ if (!ASSERT_GT(id, 0, "bin_data"))
+ goto out;
+ skel->rodata->data_btf_ids[i] = id;
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, name);
+ if (!ASSERT_OK_PTR(prog, "invalid prog name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+
+ err = test_bpf_ma__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto out;
+
+ err = test_bpf_ma__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+ ASSERT_OK(skel->bss->err, "test error");
+out:
+ test_bpf_ma__destroy(skel);
+}
+
+void test_test_bpf_ma(void)
+{
+ if (test__start_subtest("batch_alloc_free"))
+ do_bpf_ma_test("test_batch_alloc_free");
+ if (test__start_subtest("free_through_map_free"))
+ do_bpf_ma_test("test_free_through_map_free");
+ if (test__start_subtest("batch_percpu_alloc_free"))
+ do_bpf_ma_test("test_batch_percpu_alloc_free");
+ if (test__start_subtest("percpu_free_through_map_free"))
+ do_bpf_ma_test("test_percpu_free_through_map_free");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
index 214d9f4a94a5..ea933fd151c3 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
@@ -8,7 +8,8 @@
#include <sys/types.h>
#include <test_progs.h>
-#define TDIR "/sys/kernel/debug"
+/* TDIR must be in a location we can create a directory in. */
+#define TDIR "/tmp/test_bpffs_testdir"
static int read_iter(char *file)
{
@@ -43,8 +44,11 @@ static int fn(void)
if (!ASSERT_OK(err, "mount /"))
goto out;
- err = umount(TDIR);
- if (!ASSERT_OK(err, "umount " TDIR))
+ err = mkdir(TDIR, 0777);
+ /* If the directory already exists we can carry on. It may be left over
+ * from a previous run.
+ */
+ if ((err && errno != EEXIST) && !ASSERT_OK(err, "mkdir " TDIR))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
@@ -138,6 +142,7 @@ out:
rmdir(TDIR "/fs1");
rmdir(TDIR "/fs2");
umount(TDIR);
+ rmdir(TDIR);
exit(err);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
index 290c21dbe65a..760ad96b4be0 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "timer.skel.h"
+#include "timer_failure.skel.h"
static int timer(struct timer *timer_skel)
{
@@ -14,6 +15,7 @@ static int timer(struct timer *timer_skel)
ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1");
ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1");
+ ASSERT_EQ(timer_skel->bss->pinned_callback_check, 0, "pinned_callback_check1");
prog_fd = bpf_program__fd(timer_skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
@@ -32,6 +34,9 @@ static int timer(struct timer *timer_skel)
/* check that timer_cb3() was executed twice */
ASSERT_EQ(timer_skel->bss->abs_data, 12, "abs_data");
+ /* check that timer_cb_pinned() was executed twice */
+ ASSERT_EQ(timer_skel->bss->pinned_callback_check, 2, "pinned_callback_check");
+
/* check that there were no errors in timer execution */
ASSERT_EQ(timer_skel->bss->err, 0, "err");
@@ -49,10 +54,11 @@ void serial_test_timer(void)
timer_skel = timer__open_and_load();
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
- goto cleanup;
+ return;
err = timer(timer_skel);
ASSERT_OK(err, "timer");
-cleanup:
timer__destroy(timer_skel);
+
+ RUN_TESTS(timer_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe.c b/tools/testing/selftests/bpf/prog_tests/uprobe.c
new file mode 100644
index 000000000000..cf3e0e7a64fa
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Hengqi Chen */
+
+#include <test_progs.h>
+#include "test_uprobe.skel.h"
+
+static FILE *urand_spawn(int *pid)
+{
+ FILE *f;
+
+ /* urandom_read's stdout is wired into f */
+ f = popen("./urandom_read 1 report-pid", "r");
+ if (!f)
+ return NULL;
+
+ if (fscanf(f, "%d", pid) != 1) {
+ pclose(f);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ return f;
+}
+
+static int urand_trigger(FILE **urand_pipe)
+{
+ int exit_code;
+
+ /* pclose() waits for child process to exit and returns their exit code */
+ exit_code = pclose(*urand_pipe);
+ *urand_pipe = NULL;
+
+ return exit_code;
+}
+
+void test_uprobe(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+ FILE *urand_pipe = NULL;
+ int urand_pid = 0, err;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ urand_pipe = urand_spawn(&urand_pid);
+ if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
+ goto cleanup;
+
+ skel->bss->my_pid = urand_pid;
+
+ /* Manual attach uprobe to urandlib_api
+ * There are two `urandlib_api` symbols in .dynsym section:
+ * - urandlib_api@LIBURANDOM_READ_1.0.0
+ * - urandlib_api@@LIBURANDOM_READ_2.0.0
+ * Both are global bind and would cause a conflict if user
+ * specify the symbol name without a version suffix
+ */
+ uprobe_opts.func_name = "urandlib_api";
+ skel->links.test4 = bpf_program__attach_uprobe_opts(skel->progs.test4,
+ urand_pid,
+ "./liburandom_read.so",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_ERR_PTR(skel->links.test4, "urandlib_api_attach_conflict"))
+ goto cleanup;
+
+ uprobe_opts.func_name = "urandlib_api@LIBURANDOM_READ_1.0.0";
+ skel->links.test4 = bpf_program__attach_uprobe_opts(skel->progs.test4,
+ urand_pid,
+ "./liburandom_read.so",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test4, "urandlib_api_attach_ok"))
+ goto cleanup;
+
+ /* Auto attach 3 u[ret]probes to urandlib_api_sameoffset */
+ err = test_uprobe__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger urandom_read */
+ ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "urandlib_api_sameoffset");
+ ASSERT_EQ(skel->bss->test2_result, 1, "urandlib_api_sameoffset@v1");
+ ASSERT_EQ(skel->bss->test3_result, 3, "urandlib_api_sameoffset@@v2");
+ ASSERT_EQ(skel->bss->test4_result, 1, "urandlib_api");
+
+cleanup:
+ if (urand_pipe)
+ pclose(urand_pipe);
+ test_uprobe__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index e3e68c97b40c..5cfa7a6316b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -31,6 +31,7 @@
#include "verifier_helper_restricted.skel.h"
#include "verifier_helper_value_access.skel.h"
#include "verifier_int_ptr.skel.h"
+#include "verifier_iterating_callbacks.skel.h"
#include "verifier_jeq_infer_not_null.skel.h"
#include "verifier_ld_ind.skel.h"
#include "verifier_ldsx.skel.h"
@@ -46,6 +47,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
#include "verifier_raw_tp_writable.skel.h"
@@ -138,6 +140,7 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
+void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); }
void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
@@ -153,6 +156,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c
new file mode 100644
index 000000000000..7dd18c6d06c6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/if.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#define LOCAL_NETNS "xdp_dev_bound_only_netns"
+
+static int load_dummy_prog(char *name, __u32 ifindex, __u32 flags)
+{
+ struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.prog_flags = flags;
+ opts.prog_ifindex = ifindex;
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, name, "GPL", insns, ARRAY_SIZE(insns), &opts);
+}
+
+/* A test case for bpf_offload_netdev->offload handling bug:
+ * - create a veth device (does not support offload);
+ * - create a device bound XDP program with BPF_F_XDP_DEV_BOUND_ONLY flag
+ * (such programs are not offloaded);
+ * - create a device bound XDP program without flags (such programs are offloaded).
+ * This might lead to 'BUG: kernel NULL pointer dereference'.
+ */
+void test_xdp_dev_bound_only_offdev(void)
+{
+ struct nstoken *tok = NULL;
+ __u32 ifindex;
+ int fd1 = -1;
+ int fd2 = -1;
+
+ SYS(out, "ip netns add " LOCAL_NETNS);
+ tok = open_netns(LOCAL_NETNS);
+ if (!ASSERT_OK_PTR(tok, "open_netns"))
+ goto out;
+ SYS(out, "ip link add eth42 type veth");
+ ifindex = if_nametoindex("eth42");
+ if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) {
+ perror("if_nametoindex");
+ goto out;
+ }
+ fd1 = load_dummy_prog("dummy1", ifindex, BPF_F_XDP_DEV_BOUND_ONLY);
+ if (!ASSERT_GE(fd1, 0, "load_dummy_prog #1")) {
+ perror("load_dummy_prog #1");
+ goto out;
+ }
+ /* Program with ifindex is considered offloaded, however veth
+ * does not support offload => error should be reported.
+ */
+ fd2 = load_dummy_prog("dummy2", ifindex, 0);
+ ASSERT_EQ(fd2, -EINVAL, "load_dummy_prog #2 (offloaded)");
+
+out:
+ close(fd1);
+ close(fd2);
+ close_netns(tok);
+ /* eth42 was added inside netns, removing the netns will
+ * also remove eth42 veth pair.
+ */
+ SYS_NOFAIL("ip netns del " LOCAL_NETNS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
index 626c461fa34d..4439ba9392f8 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -226,7 +226,7 @@ static int verify_xsk_metadata(struct xsk *xsk)
__u64 comp_addr;
void *data;
__u64 addr;
- __u32 idx;
+ __u32 idx = 0;
int ret;
ret = recvfrom(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, NULL);
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index d9660e7200e2..c997e3e3d3fb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -490,6 +490,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
}
}
+int bpf_cubic_acked_called = 0;
+
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
const struct ack_sample *sample)
{
@@ -497,6 +499,7 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
struct bictcp *ca = inet_csk_ca(sk);
__u32 delay;
+ bpf_cubic_acked_called = 1;
/* Some calls are for duplicates without timetamps */
if (sample->rtt_us < 0)
return;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
index dd923dc637d5..dd923dc637d5 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
index 96131b9a1caa..96131b9a1caa 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
index 4ce76eb064c4..d461746fd3c1 100644
--- a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
+++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
@@ -15,13 +15,16 @@ static int empty_callback(__u32 index, void *data)
return 0;
}
+static int outer_loop(__u32 index, void *data)
+{
+ bpf_loop(nr_loops, empty_callback, NULL, 0);
+ __sync_add_and_fetch(&hits, nr_loops);
+ return 0;
+}
+
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int benchmark(void *ctx)
{
- for (int i = 0; i < 1000; i++) {
- bpf_loop(nr_loops, empty_callback, NULL, 0);
-
- __sync_add_and_fetch(&hits, nr_loops);
- }
+ bpf_loop(1000, outer_loop, NULL, 0);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 38a57a2e70db..799fff4995d8 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -99,6 +99,9 @@
#elif defined(__TARGET_ARCH_arm64)
#define SYSCALL_WRAPPER 1
#define SYS_PREFIX "__arm64_"
+#elif defined(__TARGET_ARCH_riscv)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__riscv_"
#else
#define SYSCALL_WRAPPER 0
#define SYS_PREFIX "__se_"
diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
index 76d661b20e87..56c764df8196 100644
--- a/tools/testing/selftests/bpf/progs/cb_refs.c
+++ b/tools/testing/selftests/bpf/progs/cb_refs.c
@@ -33,6 +33,7 @@ int underflow_prog(void *ctx)
if (!p)
return 0;
bpf_for_each_map_elem(&array_map, cb1, &p, 0);
+ bpf_kfunc_call_test_release(p);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
new file mode 100644
index 000000000000..ca8aa2f116b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/connect_unix")
+int connect_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
+ bpf_core_type_id_kernel(struct sockaddr_un));
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c
new file mode 100644
index 000000000000..2811ee842b01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#ifndef ETH_P_IP
+#define ETH_P_IP 0x0800
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline int static_func(u64 i)
+{
+ bpf_throw(32);
+ return i;
+}
+
+__noinline int global2static_simple(u64 i)
+{
+ static_func(i + 2);
+ return i - 1;
+}
+
+__noinline int global2static(u64 i)
+{
+ if (i == ETH_P_IP)
+ bpf_throw(16);
+ return static_func(i);
+}
+
+static __noinline int static2global(u64 i)
+{
+ return global2static(i) + i;
+}
+
+SEC("tc")
+int exception_throw_always_1(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+/* In this case, the global func will never be seen executing after call to
+ * static subprog, hence verifier will DCE the remaining instructions. Ensure we
+ * are resilient to that.
+ */
+SEC("tc")
+int exception_throw_always_2(struct __sk_buff *ctx)
+{
+ return global2static_simple(ctx->protocol);
+}
+
+SEC("tc")
+int exception_throw_unwind_1(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol));
+}
+
+SEC("tc")
+int exception_throw_unwind_2(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol) - 1);
+}
+
+SEC("tc")
+int exception_throw_default(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 1;
+}
+
+SEC("tc")
+int exception_throw_default_value(struct __sk_buff *ctx)
+{
+ bpf_throw(5);
+ return 1;
+}
+
+SEC("tc")
+int exception_tail_call_target(struct __sk_buff *ctx)
+{
+ bpf_throw(16);
+ return 0;
+}
+
+static __noinline
+int exception_tail_call_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 10;
+
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int exception_tail_call(struct __sk_buff *ctx) {
+ volatile int ret = 0;
+
+ ret = exception_tail_call_subprog(ctx);
+ return ret + 8;
+}
+
+__noinline int exception_ext_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+static __noinline int exception_ext_static(struct __sk_buff *ctx)
+{
+ return exception_ext_global(ctx);
+}
+
+SEC("tc")
+int exception_ext(struct __sk_buff *ctx)
+{
+ return exception_ext_static(ctx);
+}
+
+__noinline int exception_cb_mod_global(u64 cookie)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+/* Example of how the exception callback supplied during verification can still
+ * introduce extensions by calling to dummy global functions, and alter runtime
+ * behavior.
+ *
+ * Right now we don't allow freplace attachment to exception callback itself,
+ * but if the need arises this restriction is technically feasible to relax in
+ * the future.
+ */
+__noinline int exception_cb_mod(u64 cookie)
+{
+ return exception_cb_mod_global(cookie) + cookie + 10;
+}
+
+SEC("tc")
+__exception_cb(exception_cb_mod)
+int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
+{
+ bpf_throw(25);
+ return 0;
+}
+
+__noinline static int subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+__noinline int global_subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline int throwing_global_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+SEC("tc")
+int exception_throw_subprog(struct __sk_buff *ctx)
+{
+ switch (ctx->protocol) {
+ case 1:
+ return subprog(ctx);
+ case 2:
+ return global_subprog(ctx);
+ case 3:
+ return throwing_subprog(ctx);
+ case 4:
+ return throwing_global_subprog(ctx);
+ default:
+ break;
+ }
+ bpf_throw(1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(cookie != 0);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_eq(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_lt(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_gt(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_le(cookie, -1);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_ge(cookie, 1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(cookie != 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_eq_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_lt_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_gt_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_le_with(cookie, -1, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_ge_with(cookie, 1, cookie + 100);
+ return 0;
+}
+
+#define check_assert(name, cookie, tag) \
+SEC("tc") \
+int exception##tag##name(struct __sk_buff *ctx) \
+{ \
+ return name(cookie) + 1; \
+}
+
+check_assert(assert_nz_gfunc, 5, _);
+check_assert(assert_zero_gfunc, 0, _);
+check_assert(assert_neg_gfunc, -100, _);
+check_assert(assert_pos_gfunc, 100, _);
+check_assert(assert_negeq_gfunc, -1, _);
+check_assert(assert_poseq_gfunc, 1, _);
+
+check_assert(assert_nz_gfunc_with, 5, _);
+check_assert(assert_zero_gfunc_with, 0, _);
+check_assert(assert_neg_gfunc_with, -100, _);
+check_assert(assert_pos_gfunc_with, 100, _);
+check_assert(assert_negeq_gfunc_with, -1, _);
+check_assert(assert_poseq_gfunc_with, 1, _);
+
+check_assert(assert_nz_gfunc, 0, _bad_);
+check_assert(assert_zero_gfunc, 5, _bad_);
+check_assert(assert_neg_gfunc, 100, _bad_);
+check_assert(assert_pos_gfunc, -100, _bad_);
+check_assert(assert_negeq_gfunc, 1, _bad_);
+check_assert(assert_poseq_gfunc, -1, _bad_);
+
+check_assert(assert_nz_gfunc_with, 0, _bad_);
+check_assert(assert_zero_gfunc_with, 5, _bad_);
+check_assert(assert_neg_gfunc_with, 100, _bad_);
+check_assert(assert_pos_gfunc_with, -100, _bad_);
+check_assert(assert_negeq_gfunc_with, 1, _bad_);
+check_assert(assert_poseq_gfunc_with, -1, _bad_);
+
+SEC("tc")
+int exception_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, 0, ~0ULL);
+ return 1;
+}
+
+SEC("tc")
+int exception_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, 0, ~0ULL, 10);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, -100, 100);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, -1000, 1000, 10);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
new file mode 100644
index 000000000000..e1e5c54a6a11
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <limits.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define check_assert(type, op, name, value) \
+ SEC("?tc") \
+ __log_level(2) __failure \
+ int check_assert_##op##_##name(void *ctx) \
+ { \
+ type num = bpf_ktime_get_ns(); \
+ bpf_assert_##op(num, value); \
+ return *(u64 *)num; \
+ }
+
+__msg(": R0_w=-2147483648 R10=fp0")
+check_assert(s64, eq, int_min, INT_MIN);
+__msg(": R0_w=2147483647 R10=fp0")
+check_assert(s64, eq, int_max, INT_MAX);
+__msg(": R0_w=0 R10=fp0")
+check_assert(s64, eq, zero, 0);
+__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
+check_assert(s64, eq, llong_min, LLONG_MIN);
+__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
+check_assert(s64, eq, llong_max, LLONG_MAX);
+
+__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
+check_assert(s64, lt, pos, INT_MAX);
+__msg(": R0_w=scalar(smax=-1,umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, lt, zero, 0);
+__msg(": R0_w=scalar(smax=-2147483649,umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, lt, neg, INT_MIN);
+
+__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
+check_assert(s64, le, pos, INT_MAX);
+__msg(": R0_w=scalar(smax=0) R10=fp0")
+check_assert(s64, le, zero, 0);
+__msg(": R0_w=scalar(smax=-2147483648,umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, le, neg, INT_MIN);
+
+__msg(": R0_w=scalar(smin=umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, gt, pos, INT_MAX);
+__msg(": R0_w=scalar(smin=umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, gt, zero, 0);
+__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
+check_assert(s64, gt, neg, INT_MIN);
+
+__msg(": R0_w=scalar(smin=umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, ge, pos, INT_MAX);
+__msg(": R0_w=scalar(smin=0,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
+check_assert(s64, ge, zero, 0);
+__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
+check_assert(s64, ge, neg, INT_MIN);
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=smin32=-2147483646,smax=smax32=2147483645) R10=fp0")
+int check_assert_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+ bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx(off=0,imm=0) R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
+int check_assert_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 8192);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+int check_assert_single_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+int check_assert_single_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
+int check_assert_generic(struct __sk_buff *ctx)
+{
+ u8 *data_end = (void *)(long)ctx->data_end;
+ u8 *data = (void *)(long)ctx->data;
+
+ bpf_assert(data + 64 <= data_end);
+ return data[128];
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
+int check_assert_with_return(void *ctx)
+{
+ bpf_assert_with(!ctx, 64);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_ext.c b/tools/testing/selftests/bpf/progs/exceptions_ext.c
new file mode 100644
index 000000000000..743c05185d9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_ext.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+SEC("?fentry")
+int pfentry(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry")
+int throwing_fentry(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline int exception_cb(u64 cookie)
+{
+ return cookie + 64;
+}
+
+SEC("?freplace")
+int extension(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_exception_cb_extension(u64 cookie)
+{
+ bpf_throw(32);
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_extension(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+SEC("?fexit")
+int pfexit(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fexit")
+int throwing_fexit(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?fmod_ret")
+int pfmod_ret(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fmod_ret")
+int throwing_fmod_ret(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
new file mode 100644
index 000000000000..8c0ef2742208
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+struct foo {
+ struct bpf_rb_node node;
+};
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root rbtree __contains(foo, node);
+
+__noinline void *exception_cb_bad_ret_type(u64 cookie)
+{
+ return NULL;
+}
+
+__noinline int exception_cb_bad_arg_0(void)
+{
+ return 0;
+}
+
+__noinline int exception_cb_bad_arg_2(int a, int b)
+{
+ return 0;
+}
+
+__noinline int exception_cb_ok_arg_small(int a)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_ret_type)
+__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
+int reject_exception_cb_type_1(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_0)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_2(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_2)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_3(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_ok_arg_small)
+__success
+int reject_exception_cb_type_4(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback subprog")
+int reject_async_callback_throw(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+ return bpf_timer_set_callback(&elem->timer, timer_cb);
+}
+
+__noinline static int subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_spin_lock(&lock);
+ if (ctx->len)
+ bpf_throw(0);
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_lock(void *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_subprog_with_lock(void *ctx)
+{
+ return subprog_lock(ctx);
+}
+
+SEC("?tc")
+__failure __msg("bpf_rcu_read_unlock is missing")
+int reject_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->len)
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rcu_read_unlock is missing")
+int reject_subprog_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ return throwing_subprog(ctx);
+}
+
+static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
+{
+ bpf_throw(0);
+ return true;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_rbtree_add_throw(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&rbtree, &f->node, rbless);
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_ref(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_cb_ref(u32 i, void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_cb_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ bpf_obj_drop(f);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_with_cb(void *ctx)
+{
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_subprog_reference(void *ctx)
+{
+ return subprog_ref(ctx) + 1;
+}
+
+__noinline int throwing_exception_cb(u64 c)
+{
+ bpf_throw(0);
+ return c;
+}
+
+__noinline int exception_cb1(u64 c)
+{
+ return c;
+}
+
+__noinline int exception_cb2(u64 c)
+{
+ return c;
+}
+
+static __noinline int static_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+__noinline int global_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+SEC("?tc")
+__exception_cb(throwing_exception_cb)
+__failure __msg("cannot be called from callback subprog")
+int reject_throwing_exception_cb(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
+{
+ return global_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
+{
+ return static_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__exception_cb(exception_cb2)
+__failure __msg("multiple exception callback tags for main subprog")
+int reject_multiple_exception_cb(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 16;
+}
+
+__noinline int exception_cb_bad_ret(u64 c)
+{
+ return c;
+}
+
+SEC("?fentry/bpf_check")
+__exception_cb(exception_cb_bad_ret)
+__failure __msg("At program exit the register R0 has unknown scalar value should")
+int reject_set_exception_cb_bad_ret1(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
+int reject_set_exception_cb_bad_ret2(void *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+__noinline static int loop_cb1(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int loop_cb2(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb(struct __sk_buff *ctx)
+{
+ bpf_loop(5, loop_cb1, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
+{
+ if (ctx->protocol)
+ bpf_loop(5, loop_cb1, NULL, 0);
+ else
+ bpf_loop(5, loop_cb2, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
new file mode 100644
index 000000000000..9c078f34bbb2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getpeername_unix")
+int getpeername_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
+ bpf_core_type_id_kernel(struct sockaddr_un));
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
new file mode 100644
index 000000000000..ac7145111497
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getsockname_unix")
+int getsockname_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
+ bpf_core_type_id_kernel(struct sockaddr_un));
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 6b9b3c56f009..c20c4e38b71c 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -14,6 +14,13 @@ int my_pid;
int arr[256];
int small_arr[16] SEC(".data.small_arr");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, int);
+ __type(value, int);
+} amap SEC(".maps");
+
#ifdef REAL_TEST
#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
#else
@@ -716,4 +723,692 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
return 0;
}
+SEC("?raw_tp")
+__failure
+__msg("R1 type=scalar expected=fp")
+__naked int delayed_read_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
+ * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read mark.
+ * Loop body with r7=0xdead would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of read mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r7 = &fp[-16]
+ * fp[-16] = 0
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r6++
+ * if (r6 != 42) {
+ * r7 = 0xdead
+ * continue;
+ * }
+ * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r7 = r10;"
+ "r7 += -16;"
+ "r0 = 0;"
+ "*(u64 *)(r7 + 0) = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "r6 += 1;"
+ "if r6 != 42 goto 3f;"
+ "r7 = 0xdead;"
+ "goto 1b;"
+ "3:"
+ "r1 = r7;"
+ "r2 = 8;"
+ "r3 = 0xdeadbeef;"
+ "call %[bpf_probe_read_user];"
+ "goto 1b;"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__naked int delayed_precision_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The test is similar to delayed_iter_mark but verifies that incomplete
+ * precision don't fool verifier.
+ * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
+ * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read
+ * and precision marks.
+ * Loop body with r7=-32 would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of precision mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r8 = 0
+ * fp[-16] = 0
+ * r7 = -16
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (r6 != 42) {
+ * r7 = -32
+ * r6 = bpf_get_prandom_u32()
+ * continue;
+ * }
+ * r0 = r10
+ * r0 += r7
+ * r8 = *(u64 *)(r0 + 0) // this is not safe
+ * r6 = bpf_get_prandom_u32()
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return r8
+ */
+ asm volatile (
+ "r8 = 0;"
+ "*(u64 *)(r10 - 16) = r8;"
+ "r7 = -16;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;\n"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "if r6 != 42 goto 3f;"
+ "r7 = -32;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "3:"
+ "r0 = r10;"
+ "r0 += r7;"
+ "r8 = *(u64 *)(r0 + 0);"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = r8;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps1(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with c=-25 are explored only on a second iteration
+ * of the outer loop;
+ * - states with read+precise mark on c are explored only on
+ * second iteration of the inner loop and in a state which
+ * is pushed to states stack first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * a = 0;
+ * b = 0;
+ * c = -25;
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -25;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with read+precise mark on c are explored only on a second
+ * iteration of the first inner loop and in a state which is pushed to
+ * states stack first.
+ * - states with c=-25 are explored only on a second iteration of the
+ * second inner loop and in a state which is pushed to states stack
+ * first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * a = 0;
+ * c = -25;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+
+ /* first inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ /* second inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i2_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i2_loop_end_%=;"
+ "check2_one_r6_%=:"
+ "if r6 != 1 goto check2_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i2_loop_%=;"
+ "check2_zero_r6_%=:"
+ "if r6 != 0 goto i2_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check2_one_r7_%=;"
+ "goto i2_loop_%=;"
+ "check2_one_r7_%=:"
+ "if r7 != 1 goto i2_loop_%=;"
+ "r6 = 0;"
+ "r8 = -25;"
+ "goto i2_loop_%=;"
+ "i2_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ "r6 = 0;"
+ "r7 = 0;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int triple_continue(void)
+{
+ /* This is equivalent to C program below.
+ * High branching factor of the loop body turned out to be
+ * problematic for one of the iterator convergence tracking
+ * algorithms explored.
+ *
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * r0 += 0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "r0 += 0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int widen_spill(void)
+{
+ /* This is equivalent to C program below.
+ * The counter is stored in fp[-16], if this counter is not widened
+ * verifier states representing loop iterations would never converge.
+ *
+ * fp[-16] = 0
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r0 = fp[-16];
+ * r0 += 1;
+ * fp[-16] = r0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "r0 = *(u64 *)(r10 - 16);"
+ "r0 += 1;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("raw_tp")
+__success
+__naked int checkpoint_states_deletion(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * int *a, *b, *c, *d, *e, *f;
+ * int i, sum = 0;
+ * bpf_for(i, 0, 10) {
+ * a = bpf_map_lookup_elem(&amap, &i);
+ * b = bpf_map_lookup_elem(&amap, &i);
+ * c = bpf_map_lookup_elem(&amap, &i);
+ * d = bpf_map_lookup_elem(&amap, &i);
+ * e = bpf_map_lookup_elem(&amap, &i);
+ * f = bpf_map_lookup_elem(&amap, &i);
+ * if (a) sum += 1;
+ * if (b) sum += 1;
+ * if (c) sum += 1;
+ * if (d) sum += 1;
+ * if (e) sum += 1;
+ * if (f) sum += 1;
+ * }
+ * return 0;
+ *
+ * The body of the loop spawns multiple simulation paths
+ * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
+ * Each combination is unique from states_equal() point of view.
+ * Explored states checkpoint is created after each iterator next call.
+ * Iterator convergence logic expects that eventually current state
+ * would get equal to one of the explored states and thus loop
+ * exploration would be finished (at-least for a specific path).
+ * Verifier evicts explored states with high miss to hit ratio
+ * to to avoid comparing current state with too many explored
+ * states per instruction.
+ * This test is designed to "stress test" eviction policy defined using formula:
+ *
+ * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
+ *
+ * Currently N is set to 64, which allows for 6 variables in this test.
+ */
+ asm volatile (
+ "r6 = 0;" /* a */
+ "r7 = 0;" /* b */
+ "r8 = 0;" /* c */
+ "*(u64 *)(r10 - 24) = r6;" /* d */
+ "*(u64 *)(r10 - 32) = r6;" /* e */
+ "*(u64 *)(r10 - 40) = r6;" /* f */
+ "r9 = 0;" /* sum */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+
+ "*(u64 *)(r10 - 16) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r6 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r7 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r8 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 24) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 32) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 40) = r0;"
+
+ "if r6 == 0 goto +1;"
+ "r9 += 1;"
+ "if r7 == 0 goto +1;"
+ "r9 += 1;"
+ "if r8 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 24);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 32);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 40);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm_addr(amap)
+ : __clobber_all
+ );
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_css.c b/tools/testing/selftests/bpf/progs/iters_css.c
new file mode 100644
index 000000000000..ec1f6c2f590b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+u64 root_cg_id, leaf_cg_id;
+u64 first_cg_id, last_cg_id;
+
+int pre_order_cnt, post_order_cnt, tree_high;
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_css_for_each(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *root_css, *leaf_css, *pos;
+ struct cgroup *root_cgrp, *leaf_cgrp, *cur_cgrp;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+
+ root_cgrp = bpf_cgroup_from_id(root_cg_id);
+
+ if (!root_cgrp)
+ return 0;
+
+ leaf_cgrp = bpf_cgroup_from_id(leaf_cg_id);
+
+ if (!leaf_cgrp) {
+ bpf_cgroup_release(root_cgrp);
+ return 0;
+ }
+ root_css = &root_cgrp->self;
+ leaf_css = &leaf_cgrp->self;
+ pre_order_cnt = post_order_cnt = tree_high = 0;
+ first_cg_id = last_cg_id = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ cur_cgrp = pos->cgroup;
+ post_order_cnt++;
+ last_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_PRE) {
+ cur_cgrp = pos->cgroup;
+ pre_order_cnt++;
+ if (!first_cg_id)
+ first_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, leaf_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high++;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high--;
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(root_cgrp);
+ bpf_cgroup_release(leaf_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c
new file mode 100644
index 000000000000..9ac758649cb8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css_task.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+
+pid_t target_pid;
+int css_task_cnt;
+u64 cg_id;
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (cur_task->pid != target_pid)
+ return ret;
+
+ cgrp = bpf_cgroup_from_id(cg_id);
+
+ if (!cgrp)
+ return -EPERM;
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS)
+ if (task->pid == target_pid)
+ css_task_cnt++;
+
+ bpf_cgroup_release(cgrp);
+
+ return -EPERM;
+}
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("?iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+ if (task->pid == target_pid)
+ css_task_cnt++;
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int BPF_PROG(iter_css_task_for_each_sleep)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c
new file mode 100644
index 000000000000..c9b4055cd410
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+int procs_cnt, threads_cnt, proc_threads_cnt;
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_task_for_each_sleep(void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct task_struct *pos;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+ procs_cnt = threads_cnt = proc_threads_cnt = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
+ if (pos->pid == target_pid)
+ procs_cnt++;
+
+ bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS)
+ proc_threads_cnt++;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS)
+ if (pos->tgid == target_pid)
+ threads_cnt++;
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c
new file mode 100644
index 000000000000..6b1588d70652
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+int BPF_PROG(iter_tasks_without_lock)
+{
+ struct task_struct *pos;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+
+ }
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+int BPF_PROG(iter_css_without_lock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+int BPF_PROG(iter_tasks_lock_and_unlock)
+{
+ struct task_struct *pos;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+int BPF_PROG(iter_css_lock_and_unlock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
+int BPF_PROG(iter_css_task_for_each)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_vma.c b/tools/testing/selftests/bpf/progs/iters_task_vma.c
new file mode 100644
index 000000000000..e085a51d153e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_vma.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+pid_t target_pid = 0;
+unsigned int vmas_seen = 0;
+
+struct {
+ __u64 vm_start;
+ __u64 vm_end;
+} vm_ranges[1000];
+
+SEC("raw_tp/sys_enter")
+int iter_task_vma_for_each(const void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct vm_area_struct *vma;
+ unsigned int seen = 0;
+
+ if (task->pid != target_pid)
+ return 0;
+
+ if (vmas_seen)
+ return 0;
+
+ bpf_for_each(task_vma, vma, task, 0) {
+ if (seen >= 1000)
+ break;
+ barrier_var(seen);
+
+ vm_ranges[seen].vm_start = vma->vm_start;
+ vm_ranges[seen].vm_end = vma->vm_end;
+ seen++;
+ }
+
+ vmas_seen = seen;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_override.c b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
new file mode 100644
index 000000000000..28f8487c9059
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi")
+int test_override(struct pt_regs *ctx)
+{
+ bpf_override_return(ctx, 123);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
index f4c63daba229..6438982b928b 100644
--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head))
n = op(&p->head);
bpf_spin_unlock(&p->lock);
- bpf_this_cpu_ptr(n);
+ if (!n)
+ return 0;
+ bpf_spin_lock((void *)n);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c
new file mode 100644
index 000000000000..7f9ef701f5de
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
new file mode 100644
index 000000000000..8ea71cbd6c45
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_kfunc_common_test")
+int test5(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_tp_recursion.c b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
new file mode 100644
index 000000000000..762385f827c5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_printk("test");
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
new file mode 100644
index 000000000000..37c2d2608ec0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -0,0 +1,190 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Update percpu data */
+SEC("?fentry/bpf_fentry_test2")
+int BPF_PROG(test_array_map_2)
+{
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data */
+SEC("?fentry/bpf_fentry_test3")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+/* Explicitly free allocated percpu data */
+SEC("?fentry/bpf_fentry_test4")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ /* delete */
+ p = bpf_kptr_xchg(&e->pc, NULL);
+ if (p) {
+ bpf_percpu_obj_drop(p);
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(test_array_map_10)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ bpf_rcu_read_lock();
+ p = e->pc;
+ if (!p) {
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ goto out;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1) {
+ /* race condition */
+ bpf_percpu_obj_drop(p1);
+ }
+ }
+
+ v = bpf_this_cpu_ptr(p);
+ v->c = 3;
+ v = bpf_this_cpu_ptr(p);
+ v->c = 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ goto out;
+ v->c = 1;
+ v->d = 2;
+
+ /* delete */
+ p1 = bpf_kptr_xchg(&e->pc, NULL);
+ if (!p1)
+ goto out;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ /* finally release p */
+ bpf_percpu_obj_drop(p1);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
new file mode 100644
index 000000000000..a2acf9aa6c24
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
@@ -0,0 +1,109 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int i;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
new file mode 100644
index 000000000000..1a891d30f1fe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -0,0 +1,164 @@
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct val2_t {
+ long b;
+};
+
+struct val_with_ptr_t {
+ char *p;
+};
+
+struct val_with_rb_root_t {
+ struct bpf_spin_lock lock;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+long ret;
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("store to referenced kptr disallowed")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ e->pc = (struct val_t __percpu_kptr *)ret;
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
+int BPF_PROG(test_array_map_2)
+{
+ struct val2_t __percpu_kptr *p2;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p2 = bpf_percpu_obj_new(struct val2_t);
+ if (!p2)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p2);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1)
+ bpf_percpu_obj_drop(p1);
+
+ v = bpf_this_cpu_ptr(p);
+ ret = v->b;
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+int BPF_PROG(test_array_map_5)
+{
+ struct val_t *p;
+
+ p = bpf_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
+int BPF_PROG(test_array_map_6)
+{
+ struct val_with_ptr_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_ptr_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
+int BPF_PROG(test_array_map_7)
+{
+ struct val_with_rb_root_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_rb_root_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..55907ef961bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+
+struct bin_data {
+ char data[256];
+ struct bpf_spin_lock lock;
+};
+
+struct map_value {
+ struct bin_data __kptr * data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2048);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+bool nomem_err = false;
+
+static int del_array(unsigned int i, int *from)
+{
+ struct map_value *value;
+ struct bin_data *old;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static int add_array(unsigned int i, int *from)
+{
+ struct bin_data *old, *new;
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new) {
+ nomem_err = true;
+ return 1;
+ }
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static void del_then_add_array(int from)
+{
+ int i;
+
+ i = from;
+ bpf_loop(512, del_array, &i, 0);
+
+ i = from;
+ bpf_loop(512, add_array, &i, 0);
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG2(test0, int, a)
+{
+ del_then_add_array(0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG2(test1, int, a, u64, b)
+{
+ del_then_add_array(512);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG2(test2, char, a, int, b, u64, c)
+{
+ del_then_add_array(1024);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d)
+{
+ del_then_add_array(1536);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index f799d87e8700..897061930cb7 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -609,7 +609,7 @@ out:
}
SEC("tracepoint/syscalls/sys_enter_kill")
-int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
+int tracepoint__syscalls__sys_enter_kill(struct syscall_trace_enter* ctx)
{
struct bpf_func_stats_ctx stats_ctx;
diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
new file mode 100644
index 000000000000..4dfbc8552558
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_ADDRESS[] = "\0bpf_cgroup_unix_test";
+
+SEC("cgroup/recvmsg_unix")
+int recvmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
+ bpf_core_type_id_kernel(struct sockaddr_un));
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
new file mode 100644
index 000000000000..1f67e832666e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/sendmsg_unix")
+int sendmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
+ bpf_core_type_id_kernel(struct sockaddr_un));
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index e02cfd380746..40df2cc26eaf 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -24,9 +24,11 @@ struct task_struct {};
#define STACK_TABLE_EPOCH_SHIFT 20
#define STROBE_MAX_STR_LEN 1
#define STROBE_MAX_CFGS 32
+#define READ_MAP_VAR_PAYLOAD_CAP \
+ ((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
#define STROBE_MAX_PAYLOAD \
(STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
- STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
+ STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP)
struct strobe_value_header {
/*
@@ -355,7 +357,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
size_t idx, void *tls_base,
struct strobe_value_generic *value,
struct strobemeta_payload *data,
- void *payload)
+ size_t off)
{
void *location;
uint64_t len;
@@ -366,7 +368,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
/*
* if bpf_probe_read_user_str returns error (<0), due to casting to
* unsinged int, it will become big number, so next check is
@@ -378,14 +380,14 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
data->str_lens[idx] = len;
- return len;
+ return off + len;
}
-static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
- size_t idx, void *tls_base,
- struct strobe_value_generic *value,
- struct strobemeta_payload *data,
- void *payload)
+static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data,
+ size_t off)
{
struct strobe_map_descr* descr = &data->map_descrs[idx];
struct strobe_map_raw map;
@@ -397,11 +399,11 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
location = calc_location(&cfg->map_locs[idx], tls_base);
if (!location)
- return payload;
+ return off;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
- return payload;
+ return off;
descr->id = map.id;
descr->cnt = map.cnt;
@@ -410,10 +412,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
data->req_meta_valid = 1;
}
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
if (len <= STROBE_MAX_STR_LEN) {
descr->tag_len = len;
- payload += len;
+ off += len;
}
#ifdef NO_UNROLL
@@ -426,22 +428,22 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
break;
descr->key_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].key);
if (len <= STROBE_MAX_STR_LEN) {
descr->key_lens[i] = len;
- payload += len;
+ off += len;
}
descr->val_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].val);
if (len <= STROBE_MAX_STR_LEN) {
descr->val_lens[i] = len;
- payload += len;
+ off += len;
}
}
- return payload;
+ return off;
}
#ifdef USE_BPF_LOOP
@@ -455,14 +457,20 @@ struct read_var_ctx {
struct strobemeta_payload *data;
void *tls_base;
struct strobemeta_cfg *cfg;
- void *payload;
+ size_t payload_off;
/* value gets mutated */
struct strobe_value_generic *value;
enum read_type type;
};
-static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
+static int read_var_callback(__u64 index, struct read_var_ctx *ctx)
{
+ /* lose precision info for ctx->payload_off, verifier won't track
+ * double xor, barrier_var() is needed to force clang keep both xors.
+ */
+ ctx->payload_off ^= index;
+ barrier_var(ctx->payload_off);
+ ctx->payload_off ^= index;
switch (ctx->type) {
case READ_INT_VAR:
if (index >= STROBE_MAX_INTS)
@@ -472,14 +480,18 @@ static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
case READ_MAP_VAR:
if (index >= STROBE_MAX_MAPS)
return 1;
- ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base,
- ctx->value, ctx->data, ctx->payload);
+ if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP)
+ return 1;
+ ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
break;
case READ_STR_VAR:
if (index >= STROBE_MAX_STRS)
return 1;
- ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base,
- ctx->value, ctx->data, ctx->payload);
+ if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN)
+ return 1;
+ ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
break;
}
return 0;
@@ -501,7 +513,8 @@ static void *read_strobe_meta(struct task_struct *task,
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct strobe_value_generic value = {0};
struct strobemeta_cfg *cfg;
- void *tls_base, *payload;
+ size_t payload_off;
+ void *tls_base;
cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
if (!cfg)
@@ -509,7 +522,7 @@ static void *read_strobe_meta(struct task_struct *task,
data->int_vals_set_mask = 0;
data->req_meta_valid = 0;
- payload = data->payload;
+ payload_off = 0;
/*
* we don't have struct task_struct definition, it should be:
* tls_base = (void *)task->thread.fsbase;
@@ -522,7 +535,7 @@ static void *read_strobe_meta(struct task_struct *task,
.tls_base = tls_base,
.value = &value,
.data = data,
- .payload = payload,
+ .payload_off = 0,
};
int err;
@@ -540,6 +553,11 @@ static void *read_strobe_meta(struct task_struct *task,
err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0);
if (err != STROBE_MAX_MAPS)
return NULL;
+
+ payload_off = ctx.payload_off;
+ /* this should not really happen, here only to satisfy verifer */
+ if (payload_off > sizeof(data->payload))
+ payload_off = sizeof(data->payload);
#else
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
@@ -555,7 +573,7 @@ static void *read_strobe_meta(struct task_struct *task,
#pragma unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
- payload += read_str_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
}
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
@@ -563,7 +581,7 @@ static void *read_strobe_meta(struct task_struct *task,
#pragma unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
- payload = read_map_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
}
#endif /* USE_BPF_LOOP */
@@ -571,7 +589,7 @@ static void *read_strobe_meta(struct task_struct *task,
* return pointer right after end of payload, so it's possible to
* calculate exact amount of useful data that needs to be sent
*/
- return payload;
+ return &data->payload[payload_off];
}
SEC("raw_tracepoint/kfree_skb")
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
new file mode 100644
index 000000000000..8436c6729167
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fentry/subprog_tail")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
new file mode 100644
index 000000000000..fe16412c6e6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fexit/subprog_tail")
+int BPF_PROG(fexit, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c
new file mode 100644
index 000000000000..c78b94b75e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_poke.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test, int a)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call1, int a)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call2, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
new file mode 100644
index 000000000000..b685a4aba6bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+struct generic_map_value {
+ void *data;
+};
+
+char _license[] SEC("license") = "GPL";
+
+const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
+const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+int err = 0;
+int pid = 0;
+
+#define DEFINE_ARRAY_WITH_KPTR(_size) \
+ struct bin_data_##_size { \
+ char data[_size - sizeof(void *)]; \
+ }; \
+ struct map_value_##_size { \
+ struct bin_data_##_size __kptr * data; \
+ /* To emit BTF info for bin_data_xx */ \
+ struct bin_data_##_size not_used; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_##_size); \
+ __uint(max_entries, 128); \
+ } array_##_size SEC(".maps")
+
+#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
+ struct map_value_percpu_##_size { \
+ struct bin_data_##_size __percpu_kptr * data; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_percpu_##_size); \
+ __uint(max_entries, 128); \
+ } array_percpu_##_size SEC(".maps")
+
+static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
+ if (!new) {
+ err = 2;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_obj_drop(old);
+ err = 3;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 4;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old) {
+ err = 5;
+ return;
+ }
+ bpf_obj_drop(old);
+ }
+}
+
+static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ /* per-cpu allocator may not be able to refill in time */
+ new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL);
+ if (!new)
+ continue;
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_percpu_obj_drop(old);
+ err = 2;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 3;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old)
+ continue;
+ bpf_percpu_obj_drop(old);
+ }
+}
+
+#define CALL_BATCH_ALLOC(size, batch, idx) \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
+
+#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
+ batch_free((struct bpf_map *)(&array_##size), batch, idx); \
+ } while (0)
+
+#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
+
+#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ } while (0)
+
+DEFINE_ARRAY_WITH_KPTR(8);
+DEFINE_ARRAY_WITH_KPTR(16);
+DEFINE_ARRAY_WITH_KPTR(32);
+DEFINE_ARRAY_WITH_KPTR(64);
+DEFINE_ARRAY_WITH_KPTR(96);
+DEFINE_ARRAY_WITH_KPTR(128);
+DEFINE_ARRAY_WITH_KPTR(192);
+DEFINE_ARRAY_WITH_KPTR(256);
+DEFINE_ARRAY_WITH_KPTR(512);
+DEFINE_ARRAY_WITH_KPTR(1024);
+DEFINE_ARRAY_WITH_KPTR(2048);
+DEFINE_ARRAY_WITH_KPTR(4096);
+
+/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */
+DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes objects in batch to trigger refilling,
+ * then free 128 8-bytes objects in batch to trigger freeing.
+ */
+ CALL_BATCH_ALLOC_FREE(8, 128, 0);
+ CALL_BATCH_ALLOC_FREE(16, 128, 1);
+ CALL_BATCH_ALLOC_FREE(32, 128, 2);
+ CALL_BATCH_ALLOC_FREE(64, 128, 3);
+ CALL_BATCH_ALLOC_FREE(96, 128, 4);
+ CALL_BATCH_ALLOC_FREE(128, 128, 5);
+ CALL_BATCH_ALLOC_FREE(192, 128, 6);
+ CALL_BATCH_ALLOC_FREE(256, 128, 7);
+ CALL_BATCH_ALLOC_FREE(512, 64, 8);
+ CALL_BATCH_ALLOC_FREE(1024, 32, 9);
+ CALL_BATCH_ALLOC_FREE(2048, 16, 10);
+ CALL_BATCH_ALLOC_FREE(4096, 8, 11);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes objects in batch to trigger refilling,
+ * then free these objects through map free.
+ */
+ CALL_BATCH_ALLOC(8, 128, 0);
+ CALL_BATCH_ALLOC(16, 128, 1);
+ CALL_BATCH_ALLOC(32, 128, 2);
+ CALL_BATCH_ALLOC(64, 128, 3);
+ CALL_BATCH_ALLOC(96, 128, 4);
+ CALL_BATCH_ALLOC(128, 128, 5);
+ CALL_BATCH_ALLOC(192, 128, 6);
+ CALL_BATCH_ALLOC(256, 128, 7);
+ CALL_BATCH_ALLOC(512, 64, 8);
+ CALL_BATCH_ALLOC(1024, 32, 9);
+ CALL_BATCH_ALLOC(2048, 16, 10);
+ CALL_BATCH_ALLOC(4096, 8, 11);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_percpu_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
+ * then free 128 16-bytes per-cpu objects in batch to trigger freeing.
+ */
+ CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
+ CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
+ CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
+ CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_percpu_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
+ * then free these object through map free.
+ */
+ CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
+ CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
+ CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
+ CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
index 67c14ba1e87b..2a2a942737d7 100644
--- a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
+++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
@@ -6,7 +6,9 @@
#include <bpf/bpf_tracing.h>
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
const volatile int skip = 0;
#else
const volatile int skip = 1;
@@ -104,7 +106,11 @@ int _tc(volatile struct __sk_buff *skb)
"%[tmp_mark] = r1"
: [tmp_mark]"=r"(tmp_mark)
: [ctx]"r"(skb),
- [off_mark]"i"(offsetof(struct __sk_buff, mark))
+ [off_mark]"i"(offsetof(struct __sk_buff, mark)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ + sizeof(skb->mark) - 1
+#endif
+ )
: "r1");
#else
tmp_mark = (char)skb->mark;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index 464d35bd57c7..b7250eb9c30c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -15,6 +15,13 @@ struct {
} sock_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
index 56cdc0a553f0..7e750309ce27 100644
--- a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
@@ -18,7 +18,7 @@ const volatile __u64 cgid;
int remote_pid;
SEC("tp_btf/task_newtask")
-int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags)
+int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags)
{
struct cgroup *cgrp = NULL;
struct task_struct *acquired;
@@ -48,4 +48,30 @@ out:
return 0;
}
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *task;
+ int ret = 0;
+
+ task = bpf_get_current_task_btf();
+ if (local_pid != task->pid)
+ return 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ /* 1 is the root cgroup */
+ cgrp = bpf_cgroup_from_id(1);
+ if (!cgrp)
+ goto out;
+ if (!bpf_task_under_cgroup(task, cgrp))
+ ret = -1;
+ bpf_cgroup_release(cgrp);
+
+out:
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c
index 30e7124c49a1..992400acb957 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_link.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_link.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <stdbool.h>
+
#include <linux/bpf.h>
+#include <linux/if_ether.h>
+
+#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
char LICENSE[] SEC("license") = "GPL";
@@ -12,10 +16,19 @@ bool seen_tc3;
bool seen_tc4;
bool seen_tc5;
bool seen_tc6;
+bool seen_eth;
SEC("tc/ingress")
int tc1(struct __sk_buff *skb)
{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+out:
seen_tc1 = true;
return TCX_NEXT;
}
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe.c b/tools/testing/selftests/bpf/progs/test_uprobe.c
new file mode 100644
index 000000000000..896c88a4960d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_uprobe.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+pid_t my_pid = 0;
+
+int test1_result = 0;
+int test2_result = 0;
+int test3_result = 0;
+int test4_result = 0;
+
+SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset")
+int BPF_UPROBE(test1)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test1_result = 1;
+ return 0;
+}
+
+SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset@LIBURANDOM_READ_1.0.0")
+int BPF_UPROBE(test2)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test2_result = 1;
+ return 0;
+}
+
+SEC("uretprobe/./liburandom_read.so:urandlib_api_sameoffset@@LIBURANDOM_READ_2.0.0")
+int BPF_URETPROBE(test3, int ret)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test3_result = ret;
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_UPROBE(test4)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ test4_result = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
index 4b8e37f7fd06..78b23934d9f8 100644
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -16,12 +16,12 @@ bool kprobe_called = false;
bool fentry_called = false;
SEC("tp/syscalls/sys_enter_nanosleep")
-int handle__tp(struct trace_event_raw_sys_enter *args)
+int handle__tp(struct syscall_trace_enter *args)
{
struct __kernel_timespec *ts;
long tv_nsec;
- if (args->id != __NR_nanosleep)
+ if (args->nr != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c
index 9a16d95213e1..8b946c8188c6 100644
--- a/tools/testing/selftests/bpf/progs/timer.c
+++ b/tools/testing/selftests/bpf/progs/timer.c
@@ -51,7 +51,7 @@ struct {
__uint(max_entries, 1);
__type(key, int);
__type(value, struct elem);
-} abs_timer SEC(".maps");
+} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps");
__u64 bss_data;
__u64 abs_data;
@@ -59,6 +59,8 @@ __u64 err;
__u64 ok;
__u64 callback_check = 52;
__u64 callback2_check = 52;
+__u64 pinned_callback_check;
+__s32 pinned_cpu;
#define ARRAY 1
#define HTAB 2
@@ -329,3 +331,62 @@ int BPF_PROG2(test3, int, a)
return 0;
}
+
+/* callback for pinned timer */
+static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
+{
+ __s32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu != pinned_cpu)
+ err |= 16384;
+
+ pinned_callback_check++;
+ return 0;
+}
+
+static void test_pinned_timer(bool soft)
+{
+ int key = 0;
+ void *map;
+ struct bpf_timer *timer;
+ __u64 flags = BPF_F_TIMER_CPU_PIN;
+ __u64 start_time;
+
+ if (soft) {
+ map = &soft_timer_pinned;
+ start_time = 0;
+ } else {
+ map = &abs_timer_pinned;
+ start_time = bpf_ktime_get_boot_ns();
+ flags |= BPF_F_TIMER_ABS;
+ }
+
+ timer = bpf_map_lookup_elem(map, &key);
+ if (timer) {
+ if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
+ err |= 4096;
+ bpf_timer_set_callback(timer, timer_cb_pinned);
+ pinned_cpu = bpf_get_smp_processor_id();
+ bpf_timer_start(timer, start_time + 1000, flags);
+ } else {
+ err |= 8192;
+ }
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test4, int, a)
+{
+ bpf_printk("test4");
+ test_pinned_timer(true);
+
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test5")
+int BPF_PROG2(test5, int, a)
+{
+ bpf_printk("test5");
+ test_pinned_timer(false);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
new file mode 100644
index 000000000000..226d33b5a05c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_failure.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer_map SEC(".maps");
+
+static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
+{
+ if (bpf_get_smp_processor_id() % 2)
+ return 1;
+ else
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__failure __msg("should have been in (0x0; 0x0)")
+int BPF_PROG2(test_ret_1, int, a)
+{
+ int key = 0;
+ struct bpf_timer *timer;
+
+ timer = bpf_map_lookup_elem(&timer_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb_ret1);
+ bpf_timer_start(timer, 1000, 0);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c
index 8893094725f0..e61755656e8d 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bswap.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c
@@ -5,7 +5,10 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("BSWAP, 16")
diff --git a/tools/testing/selftests/bpf/progs/verifier_cfg.c b/tools/testing/selftests/bpf/progs/verifier_cfg.c
index df7697b94007..c1f55e1d80a4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_cfg.c
+++ b/tools/testing/selftests/bpf/progs/verifier_cfg.c
@@ -97,4 +97,66 @@ l0_%=: r2 = r0; \
" ::: __clobber_all);
}
+SEC("socket")
+__description("conditional loop (2)")
+__success
+__failure_unpriv __msg_unpriv("back-edge from insn 10 to 11")
+__naked void conditional_loop2(void)
+{
+ asm volatile (" \
+ r9 = 2 ll; \
+ r3 = 0x20 ll; \
+ r4 = 0x35 ll; \
+ r8 = r4; \
+ goto l1_%=; \
+l0_%=: r9 -= r3; \
+ r9 -= r4; \
+ r9 -= r8; \
+l1_%=: r8 += r4; \
+ if r8 < 0x64 goto l0_%=; \
+ r0 = r9; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unconditional loop after conditional jump")
+__failure __msg("infinite loop detected")
+__failure_unpriv __msg_unpriv("back-edge from insn 3 to 2")
+__naked void uncond_loop_after_cond_jmp(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 > 0 goto l1_%=; \
+l0_%=: r0 = 1; \
+ goto l0_%=; \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
+
+__naked __noinline __used
+static unsigned long never_ending_subprog()
+{
+ asm volatile (" \
+ r0 = r1; \
+ goto -1; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unconditional loop after conditional jump")
+/* infinite loop is detected *after* check_cfg() */
+__failure __msg("infinite loop detected")
+__naked void uncond_loop_in_subprog_after_cond_jmp(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ if r0 > 0 goto l1_%=; \
+l0_%=: r0 += 1; \
+ call never_ending_subprog; \
+l1_%=: exit; \
+" ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
index 2dae5322a18e..d1edbcff9a18 100644
--- a/tools/testing/selftests/bpf/progs/verifier_gotol.c
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -5,7 +5,10 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("gotol, small_imm")
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
new file mode 100644
index 000000000000..5905e036e0ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 8);
+ __type(key, __u32);
+ __type(value, __u64);
+} map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+ __uint(max_entries, 8);
+} ringbuf SEC(".maps");
+
+struct vm_area_struct;
+struct bpf_map;
+
+struct buf_context {
+ char *buf;
+};
+
+struct num_context {
+ __u64 i;
+ __u64 j;
+};
+
+__u8 choice_arr[2] = { 0, 1 };
+
+static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
+{
+ if (idx == 0) {
+ ctx->buf = (char *)(0xDEAD);
+ return 0;
+ }
+
+ if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
+ return 1;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("R1 type=scalar expected=fp")
+int unsafe_on_2nd_iter(void *unused)
+{
+ char buf[4];
+ struct buf_context loop_ctx = { .buf = buf };
+
+ bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
+ return 0;
+}
+
+static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i = 0;
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_on_zero_iter(void *unused)
+{
+ struct num_context loop_ctx = { .i = 32 };
+
+ bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static int widening_cb(__u32 idx, struct num_context *ctx)
+{
+ ++ctx->i;
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int widening(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0, .j = 1 };
+
+ bpf_loop(100, widening_cb, &loop_ctx, 0);
+ /* loop_ctx.j is not changed during callback iteration,
+ * verifier should not apply widening to it.
+ */
+ return choice_arr[loop_ctx.j];
+}
+
+static int loop_detection_cb(__u32 idx, struct num_context *ctx)
+{
+ for (;;) {}
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("infinite loop detected")
+int loop_detection(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
+ return 0;
+}
+
+static __always_inline __u64 oob_state_machine(struct num_context *ctx)
+{
+ switch (ctx->i) {
+ case 0:
+ ctx->i = 1;
+ break;
+ case 1:
+ ctx->i = 32;
+ break;
+ }
+ return 0;
+}
+
+static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_for_each_map_elem(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_ringbuf_drain(void *unused)
+{
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
+{
+ return oob_state_machine(data);
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
+int unsafe_find_vma(void *unused)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct num_context loop_ctx = { .i = 0 };
+
+ bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
+ return choice_arr[loop_ctx.i];
+}
+
+static int iter_limit_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i++;
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int bpf_loop_iter_limit_ok(void *unused)
+{
+ struct num_context ctx = { .i = 0 };
+
+ bpf_loop(1, iter_limit_cb, &ctx, 0);
+ return choice_arr[ctx.i];
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
+int bpf_loop_iter_limit_overflow(void *unused)
+{
+ struct num_context ctx = { .i = 0 };
+
+ bpf_loop(2, iter_limit_cb, &ctx, 0);
+ return choice_arr[ctx.i];
+}
+
+static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 100;
+ return 0;
+}
+
+static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 10;
+ return 0;
+}
+
+static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
+{
+ ctx->i += 1;
+ bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
+ bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
+ return 0;
+}
+
+/* Check that path visiting every callback function once had been
+ * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
+ * with each decimal digit corresponding to a callback visit marker.
+ */
+SEC("socket")
+__success __retval(111111)
+int bpf_loop_iter_limit_nested(void *unused)
+{
+ struct num_context ctx1 = { .i = 0 };
+ struct num_context ctx2 = { .i = 0 };
+ __u64 a, b, c;
+
+ bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
+ bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
+ a = ctx1.i;
+ b = ctx2.i;
+ /* Force 'ctx1.i' and 'ctx2.i' precise. */
+ c = choice_arr[(a + b) % 2];
+ /* This makes 'c' zero, but neither clang nor verifier know it. */
+ c /= 10;
+ /* Make sure that verifier does not visit 'impossible' states:
+ * enumerate all possible callback visit masks.
+ */
+ if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
+ b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
+ asm volatile ("r0 /= 0;" ::: "r0");
+ return 1000 * a + b + c;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
index 0c638f45aaf1..d4427d8e1217 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -5,19 +5,26 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("LDSX, S8")
__success __success_unpriv __retval(-2)
__naked void ldsx_s8(void)
{
- asm volatile (" \
- r1 = 0x3fe; \
- *(u64 *)(r10 - 8) = r1; \
- r0 = *(s8 *)(r10 - 8); \
- exit; \
-" ::: __clobber_all);
+ asm volatile (
+ "r1 = 0x3fe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s8 *)(r10 - 8);"
+#else
+ "r0 = *(s8 *)(r10 - 1);"
+#endif
+ "exit;"
+ ::: __clobber_all);
}
SEC("socket")
@@ -25,12 +32,16 @@ __description("LDSX, S16")
__success __success_unpriv __retval(-2)
__naked void ldsx_s16(void)
{
- asm volatile (" \
- r1 = 0x3fffe; \
- *(u64 *)(r10 - 8) = r1; \
- r0 = *(s16 *)(r10 - 8); \
- exit; \
-" ::: __clobber_all);
+ asm volatile (
+ "r1 = 0x3fffe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s16 *)(r10 - 8);"
+#else
+ "r0 = *(s16 *)(r10 - 2);"
+#endif
+ "exit;"
+ ::: __clobber_all);
}
SEC("socket")
@@ -38,35 +49,43 @@ __description("LDSX, S32")
__success __success_unpriv __retval(-1)
__naked void ldsx_s32(void)
{
- asm volatile (" \
- r1 = 0xfffffffe; \
- *(u64 *)(r10 - 8) = r1; \
- r0 = *(s32 *)(r10 - 8); \
- r0 >>= 1; \
- exit; \
-" ::: __clobber_all);
+ asm volatile (
+ "r1 = 0xfffffffe;"
+ "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s32 *)(r10 - 8);"
+#else
+ "r0 = *(s32 *)(r10 - 4);"
+#endif
+ "r0 >>= 1;"
+ "exit;"
+ ::: __clobber_all);
}
SEC("socket")
__description("LDSX, S8 range checking, privileged")
__log_level(2) __success __retval(1)
-__msg("R1_w=scalar(smin=-128,smax=127)")
+__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
__naked void ldsx_s8_range_priv(void)
{
- asm volatile (" \
- call %[bpf_get_prandom_u32]; \
- *(u64 *)(r10 - 8) = r0; \
- r1 = *(s8 *)(r10 - 8); \
- /* r1 with s8 range */ \
- if r1 s> 0x7f goto l0_%=; \
- if r1 s< -0x80 goto l0_%=; \
- r0 = 1; \
-l1_%=: \
- exit; \
-l0_%=: \
- r0 = 2; \
- goto l1_%=; \
-" :
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s8 *)(r10 - 8);"
+#else
+ "r1 = *(s8 *)(r10 - 1);"
+#endif
+ /* r1 with s8 range */
+ "if r1 s> 0x7f goto l0_%=;"
+ "if r1 s< -0x80 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
@@ -76,20 +95,24 @@ __description("LDSX, S16 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s16_range(void)
{
- asm volatile (" \
- call %[bpf_get_prandom_u32]; \
- *(u64 *)(r10 - 8) = r0; \
- r1 = *(s16 *)(r10 - 8); \
- /* r1 with s16 range */ \
- if r1 s> 0x7fff goto l0_%=; \
- if r1 s< -0x8000 goto l0_%=; \
- r0 = 1; \
-l1_%=: \
- exit; \
-l0_%=: \
- r0 = 2; \
- goto l1_%=; \
-" :
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s16 *)(r10 - 8);"
+#else
+ "r1 = *(s16 *)(r10 - 2);"
+#endif
+ /* r1 with s16 range */
+ "if r1 s> 0x7fff goto l0_%=;"
+ "if r1 s< -0x8000 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
@@ -99,20 +122,24 @@ __description("LDSX, S32 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s32_range(void)
{
- asm volatile (" \
- call %[bpf_get_prandom_u32]; \
- *(u64 *)(r10 - 8) = r0; \
- r1 = *(s32 *)(r10 - 8); \
- /* r1 with s16 range */ \
- if r1 s> 0x7fffFFFF goto l0_%=; \
- if r1 s< -0x80000000 goto l0_%=; \
- r0 = 1; \
-l1_%=: \
- exit; \
-l0_%=: \
- r0 = 2; \
- goto l1_%=; \
-" :
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(s32 *)(r10 - 8);"
+#else
+ "r1 = *(s32 *)(r10 - 4);"
+#endif
+ /* r1 with s16 range */
+ "if r1 s> 0x7fffFFFF goto l0_%=;"
+ "if r1 s< -0x80000000 goto l0_%=;"
+ "r0 = 1;"
+"l1_%=:"
+ "exit;"
+"l0_%=:"
+ "r0 = 2;"
+ "goto l1_%=;"
+ :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index 5bc86af80a9a..71735dbf33d4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
" ::: __clobber_all);
}
-SEC("tracepoint")
+SEC("socket")
__description("bounded loop, start in the middle")
-__failure __msg("back-edge")
+__success
+__failure_unpriv __msg_unpriv("back-edge")
__naked void loop_start_in_the_middle(void)
{
asm volatile (" \
@@ -136,7 +137,9 @@ l0_%=: exit; \
SEC("tracepoint")
__description("bounded recursion")
-__failure __msg("back-edge")
+__failure
+/* verifier limitation in detecting max stack depth */
+__msg("the call stack of 8 frames is too deep !")
__naked void bounded_recursion(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index 3c8ac2c57b1b..cbb9d6714f53 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -5,7 +5,10 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("MOV32SX, S8")
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
new file mode 100644
index 000000000000..6b564d4c0986
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 SUSE LLC */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
+__naked int bpf_neg(void)
+{
+ asm volatile (
+ "r2 = 8;"
+ "r2 = -r2;"
+ "if r2 != -8 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_le(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = le16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_be(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = be16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
+ __clang_major__ >= 18
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_bswap(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = bswap16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#endif /* v4 instruction */
+
+SEC("?raw_tp")
+__success __log_level(2)
+/*
+ * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
+ * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
+ * expected log messages to the one specific mark_chain_precision operation.
+ *
+ * This is quite fragile: if verifier checkpointing heuristic changes, this
+ * might need adjusting.
+ */
+__msg("2: (07) r0 += 1 ; R0_w=6")
+__msg("3: (35) if r0 >= 0xa goto pc+1")
+__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
+__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
+__msg("3: R0_w=6")
+__naked int state_loop_first_last_equal(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "l0_%=:"
+ "r0 += 1;"
+ "r0 += 1;"
+ /* every few iterations we'll have a checkpoint here with
+ * first_idx == last_idx, potentially confusing precision
+ * backtracking logic
+ */
+ "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
+ "goto l0_%=;"
+ "l1_%=:"
+ "exit;"
+ ::: __clobber_common
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
index 0990f8825675..2a2271cf0294 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -5,7 +5,10 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 1")
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index db6b3143338b..f61d623b1ce8 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -119,15 +119,41 @@ __naked int global_subprog_result_precise(void)
SEC("?raw_tp")
__success __log_level(2)
+/* First simulated path does not include callback body,
+ * r1 and r4 are always precise for bpf_loop() calls.
+ */
+__msg("9: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: parent state regs=r4 stack=:")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: parent state regs=r1 stack=:")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* r6 precision propagation */
__msg("14: (0f) r1 += r6")
-__msg("mark_precise: frame0: last_idx 14 first_idx 10")
+__msg("mark_precise: frame0: last_idx 14 first_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
-__msg("mark_precise: frame0: parent state regs=r0 stack=:")
-__msg("mark_precise: frame0: last_idx 18 first_idx 0")
-__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
+__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
+/* State entering callback body popped from states stack */
+__msg("from 9 to 17: frame1:")
+__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: (b7) r0 = 0")
+__msg("18: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 9:")
+__msg("frame 0: propagating r1,r4")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
+__msg("from 18 to 9: safe")
__naked int callback_result_precise(void)
{
asm volatile (
@@ -233,20 +259,36 @@ __naked int parent_callee_saved_reg_precise_global(void)
SEC("?raw_tp")
__success __log_level(2)
+/* First simulated path does not include callback body */
__msg("12: (0f) r1 += r6")
-__msg("mark_precise: frame0: last_idx 12 first_idx 10")
+__msg("mark_precise: frame0: last_idx 12 first_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
__msg("mark_precise: frame0: parent state regs=r6 stack=:")
-__msg("mark_precise: frame0: last_idx 16 first_idx 0")
-__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
-__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
-__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* State entering callback body popped from states stack */
+__msg("from 9 to 15: frame1:")
+__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("15: (b7) r0 = 0")
+__msg("16: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 9:")
+/* r1, r4 are always precise for bpf_loop(),
+ * r6 was marked before backtracking to callback body.
+ */
+__msg("frame 0: propagating r1,r4,r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
+__msg("mark_precise: frame0: parent state regs= stack=:")
+__msg("from 16 to 9: safe")
__naked int parent_callee_saved_reg_precise_with_callback(void)
{
asm volatile (
@@ -373,22 +415,38 @@ __naked int parent_stack_slot_precise_global(void)
SEC("?raw_tp")
__success __log_level(2)
+/* First simulated path does not include callback body */
__msg("14: (0f) r1 += r6")
-__msg("mark_precise: frame0: last_idx 14 first_idx 11")
+__msg("mark_precise: frame0: last_idx 14 first_idx 10")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
__msg("mark_precise: frame0: parent state regs= stack=-8:")
-__msg("mark_precise: frame0: last_idx 18 first_idx 0")
-__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
-__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
-__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+/* State entering callback body popped from states stack */
+__msg("from 10 to 17: frame1:")
+__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: (b7) r0 = 0")
+__msg("18: (95) exit")
+__msg("returning from callee:")
+__msg("to caller at 10:")
+/* r1, r4 are always precise for bpf_loop(),
+ * fp-8 was marked before backtracking to callback body.
+ */
+__msg("frame 0: propagating r1,r4,fp-8")
+__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: parent state regs= stack=:")
+__msg("from 18 to 10: safe")
__naked int parent_stack_slot_precise_with_callback(void)
{
asm volatile (
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
index b2dfd7066c6e..f6d1cc9ad892 100644
--- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -21,7 +21,7 @@ extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
enum xdp_rss_hash_type *rss_type) __ksym;
-SEC("xdp")
+SEC("xdp.frags")
int rx(struct xdp_md *ctx)
{
void *data, *data_meta, *data_end;
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 07d786329105..80f620602d50 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -53,6 +53,8 @@
#define DEFAULT_TTL 64
#define MAX_ALLOWED_PORTS 8
+#define MAX_PACKET_OFF 0xffff
+
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
@@ -177,69 +179,82 @@ static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
return ns / (NSEC_PER_SEC / TCP_TS_HZ);
}
-static __always_inline __u32 tcp_time_stamp_raw(void)
+static __always_inline __u32 tcp_clock_ms(void)
{
return tcp_ns_to_ts(tcp_clock_ns());
}
struct tcpopt_context {
- __u8 *ptr;
- __u8 *end;
+ void *data;
void *data_end;
__be32 *tsecr;
__u8 wscale;
bool option_timestamp;
bool option_sack;
+ __u32 off;
};
-static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
+static __always_inline u8 *next(struct tcpopt_context *ctx, __u32 sz)
{
- __u8 opcode, opsize;
+ __u64 off = ctx->off;
+ __u8 *data;
- if (ctx->ptr >= ctx->end)
- return 1;
- if (ctx->ptr >= ctx->data_end)
- return 1;
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
- opcode = ctx->ptr[0];
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
- if (opcode == TCPOPT_EOL)
- return 1;
- if (opcode == TCPOPT_NOP) {
- ++ctx->ptr;
- return 0;
- }
+ ctx->off += sz;
+ return data;
+}
- if (ctx->ptr + 1 >= ctx->end)
- return 1;
- if (ctx->ptr + 1 >= ctx->data_end)
+static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
+{
+ __u8 *opcode, *opsize, *wscale, *tsecr;
+ __u32 off = ctx->off;
+
+ opcode = next(ctx, 1);
+ if (!opcode)
return 1;
- opsize = ctx->ptr[1];
- if (opsize < 2)
+
+ if (*opcode == TCPOPT_EOL)
return 1;
+ if (*opcode == TCPOPT_NOP)
+ return 0;
- if (ctx->ptr + opsize > ctx->end)
+ opsize = next(ctx, 1);
+ if (!opsize || *opsize < 2)
return 1;
- switch (opcode) {
+ switch (*opcode) {
case TCPOPT_WINDOW:
- if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
- ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
+ wscale = next(ctx, 1);
+ if (!wscale)
+ return 1;
+ if (*opsize == TCPOLEN_WINDOW)
+ ctx->wscale = *wscale < TCP_MAX_WSCALE ? *wscale : TCP_MAX_WSCALE;
break;
case TCPOPT_TIMESTAMP:
- if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
+ tsecr = next(ctx, 4);
+ if (!tsecr)
+ return 1;
+ if (*opsize == TCPOLEN_TIMESTAMP) {
ctx->option_timestamp = true;
/* Client's tsval becomes our tsecr. */
- *ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
+ *ctx->tsecr = get_unaligned((__be32 *)tsecr);
}
break;
case TCPOPT_SACK_PERM:
- if (opsize == TCPOLEN_SACK_PERM)
+ if (*opsize == TCPOLEN_SACK_PERM)
ctx->option_sack = true;
break;
}
- ctx->ptr += opsize;
+ ctx->off = off + *opsize;
return 0;
}
@@ -256,16 +271,21 @@ static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
__u16 tcp_len, __be32 *tsval,
- __be32 *tsecr, void *data_end)
+ __be32 *tsecr, void *data, void *data_end)
{
struct tcpopt_context loop_ctx = {
- .ptr = (__u8 *)(tcp_header + 1),
- .end = (__u8 *)tcp_header + tcp_len,
+ .data = data,
.data_end = data_end,
.tsecr = tsecr,
.wscale = TS_OPT_WSCALE_MASK,
.option_timestamp = false,
.option_sack = false,
+ /* Note: currently verifier would track .off as unbound scalar.
+ * In case if verifier would at some point get smarter and
+ * compute bounded value for this var, beware that it might
+ * hinder bpf_loop() convergence validation.
+ */
+ .off = (__u8 *)(tcp_header + 1) - (__u8 *)data,
};
u32 cookie;
@@ -274,7 +294,7 @@ static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
if (!loop_ctx.option_timestamp)
return false;
- cookie = tcp_time_stamp_raw() & ~TSMASK;
+ cookie = tcp_clock_ms() & ~TSMASK;
cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
if (loop_ctx.option_sack)
cookie |= TS_OPT_SACK;
@@ -635,7 +655,7 @@ static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
cookie = (__u32)value;
if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
- &tsopt_buf[0], &tsopt_buf[1], data_end))
+ &tsopt_buf[0], &tsopt_buf[1], data, data_end))
tsopt = tsopt_buf;
/* Check that there is enough space for a SYNACK. It also covers
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
index 24369f242853..ccde6a4c6319 100644
--- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -3,11 +3,12 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "xsk_xdp_metadata.h"
+#include <linux/if_ether.h>
+#include "xsk_xdp_common.h"
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
- __uint(max_entries, 1);
+ __uint(max_entries, 2);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
} xsk SEC(".maps");
@@ -52,4 +53,21 @@ SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp)
return bpf_redirect_map(&xsk, 0, XDP_DROP);
}
+SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ /* Redirecting packets based on the destination MAC address */
+ idx = ((unsigned int)(eth->h_dest[5])) / 2;
+ if (idx > MAX_SOCKETS)
+ return XDP_DROP;
+
+ return bpf_redirect_map(&xsk, idx, XDP_DROP);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index 0cfece7ff4f8..0ed67b6b31dd 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -509,6 +509,15 @@ def main():
source_map_types.remove('cgroup_storage_deprecated')
source_map_types.add('cgroup_storage')
+ # The same applied to BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
+ # BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE which share the same enum value
+ # and source_map_types picks
+ # BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED/percpu_cgroup_storage_deprecated.
+ # Replace 'percpu_cgroup_storage_deprecated' with 'percpu_cgroup_storage'
+ # so it aligns with what `bpftool map help` shows.
+ source_map_types.remove('percpu_cgroup_storage_deprecated')
+ source_map_types.add('percpu_cgroup_storage')
+
help_map_types = map_info.get_map_help()
help_map_options = map_info.get_options()
map_info.close()
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index b4edd8454934..37ffa57f28a1 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -69,7 +69,7 @@ static int tester_init(struct test_loader *tester)
{
if (!tester->log_buf) {
tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
- tester->log_buf = malloc(tester->log_buf_sz);
+ tester->log_buf = calloc(tester->log_buf_sz, 1);
if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
return -ENOMEM;
}
@@ -538,7 +538,7 @@ void run_subtest(struct test_loader *tester,
bool unpriv)
{
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
- struct bpf_program *tprog, *tprog_iter;
+ struct bpf_program *tprog = NULL, *tprog_iter;
struct test_spec *spec_iter;
struct cap_state caps = {};
struct bpf_object *tobj;
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 4d582cac2c09..1b9387890148 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -255,7 +255,7 @@ static void print_subtest_name(int test_num, int subtest_num,
const char *test_name, char *subtest_name,
char *result)
{
- char test_num_str[TEST_NUM_WIDTH + 1];
+ char test_num_str[32];
snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 77bd492c6024..2f9f6f250f17 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -417,6 +417,8 @@ int get_bpf_max_tramp_links(void);
#define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
#elif defined(__aarch64__)
#define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
+#elif defined(__riscv)
+#define SYS_NANOSLEEP_KPROBE_NAME "__riscv_sys_nanosleep"
#else
#define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
#endif
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 31f1c935cd07..98107e0452d3 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1880,7 +1880,7 @@ int main(int argc, char **argv)
}
}
- get_unpriv_disabled();
+ unpriv_disabled = get_unpriv_disabled();
if (unpriv && unpriv_disabled) {
printf("Cannot run as unprivileged user with sysctl %s.\n",
UNPRIV_SYSCTL);
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index 2aa5a3445056..65aafe0003db 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -73,17 +73,33 @@
#
# Run test suite for physical device in loopback mode
# sudo ./test_xsk.sh -i IFACE
+#
+# Run test suite in a specific mode only [skb,drv,zc]
+# sudo ./test_xsk.sh -m MODE
+#
+# List available tests
+# ./test_xsk.sh -l
+#
+# Run a specific test from the test suite
+# sudo ./test_xsk.sh -t TEST_NAME
+#
+# Display the available command line options
+# ./test_xsk.sh -h
. xsk_prereqs.sh
ETH=""
-while getopts "vi:d" flag
+while getopts "vi:dm:lt:h" flag
do
case "${flag}" in
v) verbose=1;;
d) debug=1;;
i) ETH=${OPTARG};;
+ m) MODE=${OPTARG};;
+ l) list=1;;
+ t) TEST=${OPTARG};;
+ h) help=1;;
esac
done
@@ -131,6 +147,16 @@ setup_vethPairs() {
ip link set ${VETH0} up
}
+if [[ $list -eq 1 ]]; then
+ ./${XSKOBJ} -l
+ exit
+fi
+
+if [[ $help -eq 1 ]]; then
+ ./${XSKOBJ}
+ exit
+fi
+
if [ ! -z $ETH ]; then
VETH0=${ETH}
VETH1=${ETH}
@@ -153,6 +179,14 @@ if [[ $verbose -eq 1 ]]; then
ARGS+="-v "
fi
+if [ -n "$MODE" ]; then
+ ARGS+="-m ${MODE} "
+fi
+
+if [ -n "$TEST" ]; then
+ ARGS+="-t ${TEST} "
+fi
+
retval=$?
test_status $retval "${TEST_NAME}"
@@ -175,6 +209,10 @@ else
cleanup_iface ${ETH} ${MTU}
fi
+if [[ $list -eq 1 ]]; then
+ exit
+fi
+
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
busy_poll=1
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index f83d9f65c65b..4faa898ff7fc 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
+#include <pthread.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
@@ -14,104 +15,165 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
+#include "bpf/libbpf_internal.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
-#define MAX_SYMS 400000
-static struct ksym syms[MAX_SYMS];
-static int sym_cnt;
+struct ksyms {
+ struct ksym *syms;
+ size_t sym_cap;
+ size_t sym_cnt;
+};
+
+static struct ksyms *ksyms;
+static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static int ksyms__add_symbol(struct ksyms *ksyms, const char *name,
+ unsigned long addr)
+{
+ void *tmp;
+
+ tmp = strdup(name);
+ if (!tmp)
+ return -ENOMEM;
+ ksyms->syms[ksyms->sym_cnt].addr = addr;
+ ksyms->syms[ksyms->sym_cnt].name = tmp;
+ ksyms->sym_cnt++;
+ return 0;
+}
+
+void free_kallsyms_local(struct ksyms *ksyms)
+{
+ unsigned int i;
+
+ if (!ksyms)
+ return;
+
+ if (!ksyms->syms) {
+ free(ksyms);
+ return;
+ }
+
+ for (i = 0; i < ksyms->sym_cnt; i++)
+ free(ksyms->syms[i].name);
+ free(ksyms->syms);
+ free(ksyms);
+}
static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
-int load_kallsyms_refresh(void)
+struct ksyms *load_kallsyms_local(void)
{
FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
- int i = 0;
-
- sym_cnt = 0;
+ int ret;
+ struct ksyms *ksyms;
f = fopen("/proc/kallsyms", "r");
if (!f)
- return -ENOENT;
+ return NULL;
+
+ ksyms = calloc(1, sizeof(struct ksyms));
+ if (!ksyms) {
+ fclose(f);
+ return NULL;
+ }
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
- if (i >= MAX_SYMS)
- return -EFBIG;
- syms[i].addr = (long) addr;
- syms[i].name = strdup(func);
- i++;
+ ret = libbpf_ensure_mem((void **) &ksyms->syms, &ksyms->sym_cap,
+ sizeof(struct ksym), ksyms->sym_cnt + 1);
+ if (ret)
+ goto error;
+ ret = ksyms__add_symbol(ksyms, func, (unsigned long)addr);
+ if (ret)
+ goto error;
}
fclose(f);
- sym_cnt = i;
- qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
- return 0;
+ qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
+ return ksyms;
+
+error:
+ fclose(f);
+ free_kallsyms_local(ksyms);
+ return NULL;
}
int load_kallsyms(void)
{
- /*
- * This is called/used from multiplace places,
- * load symbols just once.
- */
- if (sym_cnt)
- return 0;
- return load_kallsyms_refresh();
+ pthread_mutex_lock(&ksyms_mutex);
+ if (!ksyms)
+ ksyms = load_kallsyms_local();
+ pthread_mutex_unlock(&ksyms_mutex);
+ return ksyms ? 0 : 1;
}
-struct ksym *ksym_search(long key)
+struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
{
- int start = 0, end = sym_cnt;
+ int start = 0, end = ksyms->sym_cnt;
int result;
/* kallsyms not loaded. return NULL */
- if (sym_cnt <= 0)
+ if (ksyms->sym_cnt <= 0)
return NULL;
while (start < end) {
size_t mid = start + (end - start) / 2;
- result = key - syms[mid].addr;
+ result = key - ksyms->syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
- return &syms[mid];
+ return &ksyms->syms[mid];
}
- if (start >= 1 && syms[start - 1].addr < key &&
- key < syms[start].addr)
+ if (start >= 1 && ksyms->syms[start - 1].addr < key &&
+ key < ksyms->syms[start].addr)
/* valid ksym */
- return &syms[start - 1];
+ return &ksyms->syms[start - 1];
/* out of range. return _stext */
- return &syms[0];
+ return &ksyms->syms[0];
}
-long ksym_get_addr(const char *name)
+struct ksym *ksym_search(long key)
+{
+ if (!ksyms)
+ return NULL;
+ return ksym_search_local(ksyms, key);
+}
+
+long ksym_get_addr_local(struct ksyms *ksyms, const char *name)
{
int i;
- for (i = 0; i < sym_cnt; i++) {
- if (strcmp(syms[i].name, name) == 0)
- return syms[i].addr;
+ for (i = 0; i < ksyms->sym_cnt; i++) {
+ if (strcmp(ksyms->syms[i].name, name) == 0)
+ return ksyms->syms[i].addr;
}
return 0;
}
+long ksym_get_addr(const char *name)
+{
+ if (!ksyms)
+ return 0;
+ return ksym_get_addr_local(ksyms, name);
+}
+
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
* this is faster than load + find.
*/
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 876f3e711df6..04fd1da7079d 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -11,13 +11,17 @@ struct ksym {
long addr;
char *name;
};
+struct ksyms;
int load_kallsyms(void);
-int load_kallsyms_refresh(void);
-
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
+struct ksyms *load_kallsyms_local(void);
+struct ksym *ksym_search_local(struct ksyms *ksyms, long key);
+long ksym_get_addr_local(struct ksyms *ksyms, const char *name);
+void free_kallsyms_local(struct ksyms *ksyms);
+
/* open kallsyms and find addresses on the fly, faster than load + search. */
int kallsyms_find(const char *sym, unsigned long long *addr);
diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c
index 2a6efbd0401e..b6d016461fb0 100644
--- a/tools/testing/selftests/bpf/unpriv_helpers.c
+++ b/tools/testing/selftests/bpf/unpriv_helpers.c
@@ -4,9 +4,40 @@
#include <stdlib.h>
#include <error.h>
#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
#include "unpriv_helpers.h"
+static bool get_mitigations_off(void)
+{
+ char cmdline[4096], *c;
+ int fd, ret = false;
+
+ fd = open("/proc/cmdline", O_RDONLY);
+ if (fd < 0) {
+ perror("open /proc/cmdline");
+ return false;
+ }
+
+ if (read(fd, cmdline, sizeof(cmdline) - 1) < 0) {
+ perror("read /proc/cmdline");
+ goto out;
+ }
+
+ cmdline[sizeof(cmdline) - 1] = '\0';
+ for (c = strtok(cmdline, " \n"); c; c = strtok(NULL, " \n")) {
+ if (strncmp(c, "mitigations=off", strlen(c)))
+ continue;
+ ret = true;
+ break;
+ }
+out:
+ close(fd);
+ return ret;
+}
+
bool get_unpriv_disabled(void)
{
bool disabled;
@@ -22,5 +53,5 @@ bool get_unpriv_disabled(void)
disabled = true;
}
- return disabled;
+ return disabled ? true : get_mitigations_off();
}
diff --git a/tools/testing/selftests/bpf/urandom_read.c b/tools/testing/selftests/bpf/urandom_read.c
index e92644d0fa75..4ed795655b9f 100644
--- a/tools/testing/selftests/bpf/urandom_read.c
+++ b/tools/testing/selftests/bpf/urandom_read.c
@@ -11,6 +11,9 @@
#define _SDT_HAS_SEMAPHORES 1
#include "sdt.h"
+#define SHARED 1
+#include "bpf/libbpf_internal.h"
+
#define SEC(name) __attribute__((section(name), used))
#define BUF_SIZE 256
@@ -21,10 +24,14 @@ void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz);
void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz);
void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz);
+int urandlib_api(void);
+COMPAT_VERSION(urandlib_api_old, urandlib_api, LIBURANDOM_READ_1.0.0)
+int urandlib_api_old(void);
+int urandlib_api_sameoffset(void);
+
unsigned short urand_read_with_sema_semaphore SEC(".probes");
-static __attribute__((noinline))
-void urandom_read(int fd, int count)
+static noinline void urandom_read(int fd, int count)
{
char buf[BUF_SIZE];
int i;
@@ -83,6 +90,10 @@ int main(int argc, char *argv[])
urandom_read(fd, count);
+ urandlib_api();
+ urandlib_api_old();
+ urandlib_api_sameoffset();
+
close(fd);
return 0;
}
diff --git a/tools/testing/selftests/bpf/urandom_read_lib1.c b/tools/testing/selftests/bpf/urandom_read_lib1.c
index 86186e24b740..8c1356d8b4ee 100644
--- a/tools/testing/selftests/bpf/urandom_read_lib1.c
+++ b/tools/testing/selftests/bpf/urandom_read_lib1.c
@@ -3,6 +3,9 @@
#define _SDT_HAS_SEMAPHORES 1
#include "sdt.h"
+#define SHARED 1
+#include "bpf/libbpf_internal.h"
+
#define SEC(name) __attribute__((section(name), used))
unsigned short urandlib_read_with_sema_semaphore SEC(".probes");
@@ -11,3 +14,22 @@ void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz)
{
STAP_PROBE3(urandlib, read_with_sema, iter_num, iter_cnt, read_sz);
}
+
+COMPAT_VERSION(urandlib_api_v1, urandlib_api, LIBURANDOM_READ_1.0.0)
+int urandlib_api_v1(void)
+{
+ return 1;
+}
+
+DEFAULT_VERSION(urandlib_api_v2, urandlib_api, LIBURANDOM_READ_2.0.0)
+int urandlib_api_v2(void)
+{
+ return 2;
+}
+
+COMPAT_VERSION(urandlib_api_sameoffset, urandlib_api_sameoffset, LIBURANDOM_READ_1.0.0)
+DEFAULT_VERSION(urandlib_api_sameoffset, urandlib_api_sameoffset, LIBURANDOM_READ_2.0.0)
+int urandlib_api_sameoffset(void)
+{
+ return 3;
+}
diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
index 3af2501082b2..b616575c3b00 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
@@ -65,3 +65,35 @@
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
+{
+ "BPF_ST_MEM stack imm sign",
+ /* Check if verifier correctly reasons about sign of an
+ * immediate spilled to stack by BPF_ST instruction.
+ *
+ * fp[-8] = -44;
+ * r0 = fp[-8];
+ * if r0 s< 0 goto ret0;
+ * r0 = -1;
+ * exit;
+ * ret0:
+ * r0 = 0;
+ * exit;
+ */
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, -44),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* Use prog type that requires return value in range [0, 1] */
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .result = VERBOSE_ACCEPT,
+ .runs = -1,
+ .errstr = "0: (7a) *(u64 *)(r10 -8) = -44 ; R10=fp0 fp-8_w=-44\
+ 2: (c5) if r0 s< 0x0 goto pc+2\
+ R0_w=-44",
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 1bdf2b43e49e..3d5cd51071f0 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -442,7 +442,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn 0 to 0",
+ .errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
@@ -799,7 +799,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
+ .errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
@@ -811,7 +811,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
+ .errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
index f9297900cea6..78f19c255f20 100644
--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -9,8 +9,8 @@
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
+ .errstr = "jump into the middle of ldimm64 insn 1",
+ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
@@ -23,8 +23,8 @@
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
+ .errstr = "jump into the middle of ldimm64 insn 1",
+ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/xdp_features.c b/tools/testing/selftests/bpf/xdp_features.c
index b449788fbd39..595c79141cf3 100644
--- a/tools/testing/selftests/bpf/xdp_features.c
+++ b/tools/testing/selftests/bpf/xdp_features.c
@@ -360,9 +360,9 @@ static int recv_msg(int sockfd, void *buf, size_t bufsize, void *val,
static int dut_run(struct xdp_features *skel)
{
int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
- int state, err, *sockfd, ctrl_sockfd, echo_sockfd;
+ int state, err = 0, *sockfd, ctrl_sockfd, echo_sockfd;
struct sockaddr_storage ctrl_addr;
- pthread_t dut_thread;
+ pthread_t dut_thread = 0;
socklen_t addrlen;
sockfd = start_reuseport_server(AF_INET6, SOCK_STREAM, NULL,
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 613321eb84c1..c3ba40d0b9de 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -26,6 +26,7 @@
#include <linux/sockios.h>
#include <sys/mman.h>
#include <net/if.h>
+#include <ctype.h>
#include <poll.h>
#include <time.h>
@@ -47,6 +48,7 @@ struct xsk {
};
struct xdp_hw_metadata *bpf_obj;
+__u16 bind_flags = XDP_COPY;
struct xsk *rx_xsk;
const char *ifname;
int ifindex;
@@ -60,7 +62,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
const struct xsk_socket_config socket_config = {
.rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
- .bind_flags = XDP_COPY,
+ .bind_flags = bind_flags,
};
const struct xsk_umem_config umem_config = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
@@ -234,7 +236,7 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t
struct pollfd fds[rxq + 1];
__u64 comp_addr;
__u64 addr;
- __u32 idx;
+ __u32 idx = 0;
int ret;
int i;
@@ -263,11 +265,14 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t
verify_skb_metadata(server_fd);
for (i = 0; i < rxq; i++) {
+ bool first_seg = true;
+ bool is_eop = true;
+
if (fds[i].revents == 0)
continue;
struct xsk *xsk = &rx_xsk[i];
-
+peek:
ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
printf("xsk_ring_cons__peek: %d\n", ret);
if (ret != 1)
@@ -276,12 +281,19 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t
rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
comp_addr = xsk_umem__extract_addr(rx_desc->addr);
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
- printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
- xsk, idx, rx_desc->addr, addr, comp_addr);
- verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
- clock_id);
+ is_eop = !(rx_desc->options & XDP_PKT_CONTD);
+ printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx%s\n",
+ xsk, idx, rx_desc->addr, addr, comp_addr, is_eop ? " EoP" : "");
+ if (first_seg) {
+ verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
+ clock_id);
+ first_seg = false;
+ }
+
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
+ if (!is_eop)
+ goto peek;
}
}
@@ -404,6 +416,53 @@ static void timestamping_enable(int fd, int val)
error(1, errno, "setsockopt(SO_TIMESTAMPING)");
}
+static void print_usage(void)
+{
+ const char *usage =
+ "Usage: xdp_hw_metadata [OPTIONS] [IFNAME]\n"
+ " -m Enable multi-buffer XDP for larger MTU\n"
+ " -h Display this help and exit\n\n"
+ "Generate test packets on the other machine with:\n"
+ " echo -n xdp | nc -u -q1 <dst_ip> 9091\n";
+
+ printf("%s", usage);
+}
+
+static void read_args(int argc, char *argv[])
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "mh")) != -1) {
+ switch (opt) {
+ case 'm':
+ bind_flags |= XDP_USE_SG;
+ break;
+ case 'h':
+ print_usage();
+ exit(0);
+ case '?':
+ if (isprint(optopt))
+ fprintf(stderr, "Unknown option: -%c\n", optopt);
+ fallthrough;
+ default:
+ print_usage();
+ error(-1, opterr, "Command line options error");
+ }
+ }
+
+ if (optind >= argc) {
+ fprintf(stderr, "No device name provided\n");
+ print_usage();
+ exit(-1);
+ }
+
+ ifname = argv[optind];
+ ifindex = if_nametoindex(ifname);
+
+ if (!ifname)
+ error(-1, errno, "Invalid interface name");
+}
+
int main(int argc, char *argv[])
{
clockid_t clock_id = CLOCK_TAI;
@@ -413,13 +472,8 @@ int main(int argc, char *argv[])
struct bpf_program *prog;
- if (argc != 2) {
- fprintf(stderr, "pass device name\n");
- return -1;
- }
+ read_args(argc, argv);
- ifname = argv[1];
- ifindex = if_nametoindex(ifname);
rxq = rxq_num(ifname);
printf("rxq: %d\n", rxq);
diff --git a/tools/testing/selftests/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c
index d9fb2b730a2c..e574711eeb84 100644
--- a/tools/testing/selftests/bpf/xsk.c
+++ b/tools/testing/selftests/bpf/xsk.c
@@ -442,10 +442,9 @@ void xsk_clear_xskmap(struct bpf_map *map)
bpf_map_delete_elem(map_fd, &index);
}
-int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index)
{
int map_fd, sock_fd;
- u32 index = 0;
map_fd = bpf_map__fd(map);
sock_fd = xsk_socket__fd(xsk);
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index d93200fdaa8d..771570bc3731 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -204,7 +204,7 @@ struct xsk_umem_config {
int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags);
void xsk_detach_xdp_program(int ifindex, u32 xdp_flags);
-int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk);
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index);
void xsk_clear_xskmap(struct bpf_map *map);
bool xsk_is_in_mode(u32 ifindex, int mode);
diff --git a/tools/testing/selftests/bpf/xsk_prereqs.sh b/tools/testing/selftests/bpf/xsk_prereqs.sh
index 29175682c44d..47c7b8064f38 100755
--- a/tools/testing/selftests/bpf/xsk_prereqs.sh
+++ b/tools/testing/selftests/bpf/xsk_prereqs.sh
@@ -83,9 +83,11 @@ exec_xskxceiver()
fi
./${XSKOBJ} -i ${VETH0} -i ${VETH1} ${ARGS}
-
retval=$?
- test_status $retval "${TEST_NAME}"
- statusList+=($retval)
- nameList+=(${TEST_NAME})
+
+ if [[ $list -ne 1 ]]; then
+ test_status $retval "${TEST_NAME}"
+ statusList+=($retval)
+ nameList+=(${TEST_NAME})
+ fi
}
diff --git a/tools/testing/selftests/bpf/xsk_xdp_common.h b/tools/testing/selftests/bpf/xsk_xdp_common.h
new file mode 100644
index 000000000000..5a6f36f07383
--- /dev/null
+++ b/tools/testing/selftests/bpf/xsk_xdp_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef XSK_XDP_COMMON_H_
+#define XSK_XDP_COMMON_H_
+
+#define MAX_SOCKETS 2
+
+struct xdp_info {
+ __u64 count;
+} __attribute__((aligned(32)));
+
+#endif /* XSK_XDP_COMMON_H_ */
diff --git a/tools/testing/selftests/bpf/xsk_xdp_metadata.h b/tools/testing/selftests/bpf/xsk_xdp_metadata.h
deleted file mode 100644
index 943133da378a..000000000000
--- a/tools/testing/selftests/bpf/xsk_xdp_metadata.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-struct xdp_info {
- __u64 count;
-} __attribute__((aligned(32)));
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 2827f2d7cf30..b604c570309a 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -80,6 +80,7 @@
#include <linux/if_ether.h>
#include <linux/mman.h>
#include <linux/netdev.h>
+#include <linux/bitmap.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
@@ -102,10 +103,12 @@
#include <bpf/bpf.h>
#include <linux/filter.h>
#include "../kselftest.h"
-#include "xsk_xdp_metadata.h"
+#include "xsk_xdp_common.h"
-static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
-static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
+static bool opt_verbose;
+static bool opt_print_tests;
+static enum test_mode opt_mode = TEST_MODE_ALL;
+static u32 opt_run_test = RUN_ALL_TESTS;
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
@@ -154,10 +157,10 @@ static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
ptr[i] = htonl(pkt_nb << 16 | (i + start));
}
-static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
+static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
{
- memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
- memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
}
@@ -255,7 +258,7 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i
cfg.bind_flags = ifobject->bind_flags;
if (shared)
cfg.bind_flags |= XDP_SHARED_UMEM;
- if (ifobject->pkt_stream && ifobject->mtu > MAX_ETH_PKT_SIZE)
+ if (ifobject->mtu > MAX_ETH_PKT_SIZE)
cfg.bind_flags |= XDP_USE_SG;
txr = ifobject->tx_on ? &xsk->tx : NULL;
@@ -310,19 +313,28 @@ static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
{"verbose", no_argument, 0, 'v'},
+ {"mode", required_argument, 0, 'm'},
+ {"list", no_argument, 0, 'l'},
+ {"test", required_argument, 0, 't'},
+ {"help", no_argument, 0, 'h'},
{0, 0, 0, 0}
};
-static void usage(const char *prog)
+static void print_usage(char **argv)
{
const char *str =
- " Usage: %s [OPTIONS]\n"
+ " Usage: xskxceiver [OPTIONS]\n"
" Options:\n"
" -i, --interface Use interface\n"
" -v, --verbose Verbose output\n"
- " -b, --busy-poll Enable busy poll\n";
+ " -b, --busy-poll Enable busy poll\n"
+ " -m, --mode Run only mode skb, drv, or zc\n"
+ " -l, --list List all available tests\n"
+ " -t, --test Run a specific test. Enter number from -l option.\n"
+ " -h, --help Display this help and exit\n";
- ksft_print_msg(str, prog);
+ ksft_print_msg(str, basename(argv[0]));
+ ksft_exit_xfail();
}
static bool validate_interface(struct ifobject *ifobj)
@@ -342,7 +354,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:vbm:lt:", long_options, &option_index);
if (c == -1)
break;
@@ -371,9 +383,28 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
ifobj_tx->busy_poll = true;
ifobj_rx->busy_poll = true;
break;
+ case 'm':
+ if (!strncmp("skb", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_SKB;
+ else if (!strncmp("drv", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_DRV;
+ else if (!strncmp("zc", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_ZC;
+ else
+ print_usage(argv);
+ break;
+ case 'l':
+ opt_print_tests = true;
+ break;
+ case 't':
+ errno = 0;
+ opt_run_test = strtol(optarg, NULL, 0);
+ if (errno)
+ print_usage(argv);
+ break;
+ case 'h':
default:
- usage(basename(argv[0]));
- ksft_exit_xfail();
+ print_usage(argv);
}
}
}
@@ -396,11 +427,9 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
if (i == 0) {
ifobj->rx_on = false;
ifobj->tx_on = true;
- ifobj->pkt_stream = test->tx_pkt_stream_default;
} else {
ifobj->rx_on = true;
ifobj->tx_on = false;
- ifobj->pkt_stream = test->rx_pkt_stream_default;
}
memset(ifobj->umem, 0, sizeof(*ifobj->umem));
@@ -410,6 +439,15 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ if (i == 0)
+ ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
+ else
+ ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
+
+ memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
+ memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
+ ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
+ ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
}
}
@@ -427,7 +465,8 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
}
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
- struct ifobject *ifobj_rx, enum test_mode mode)
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run)
{
struct pkt_stream *tx_pkt_stream;
struct pkt_stream *rx_pkt_stream;
@@ -449,6 +488,8 @@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->bind_flags |= XDP_COPY;
}
+ strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
+ test->test_func = test_to_run->test_func;
test->mode = mode;
__test_spec_init(test, ifobj_tx, ifobj_rx);
}
@@ -458,11 +499,6 @@ static void test_spec_reset(struct test_spec *test)
__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
}
-static void test_spec_set_name(struct test_spec *test, const char *name)
-{
- strncpy(test->name, name, MAX_TEST_NAME_SIZE);
-}
-
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
struct bpf_map *xskmap_tx)
@@ -495,8 +531,10 @@ static int test_spec_set_mtu(struct test_spec *test, int mtu)
static void pkt_stream_reset(struct pkt_stream *pkt_stream)
{
- if (pkt_stream)
+ if (pkt_stream) {
pkt_stream->current_pkt_nb = 0;
+ pkt_stream->nb_rx_pkts = 0;
+ }
}
static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
@@ -526,17 +564,17 @@ static void pkt_stream_delete(struct pkt_stream *pkt_stream)
static void pkt_stream_restore_default(struct test_spec *test)
{
- struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
- struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
+ struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
if (tx_pkt_stream != test->tx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_tx->pkt_stream);
- test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
+ pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
+ test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
}
if (rx_pkt_stream != test->rx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_rx->pkt_stream);
- test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+ test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
}
}
@@ -596,14 +634,16 @@ static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pk
return nb_frags;
}
-static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
+static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
{
pkt->offset = offset;
pkt->len = len;
- if (len > MAX_ETH_JUMBO_SIZE)
+ if (len > MAX_ETH_JUMBO_SIZE) {
pkt->valid = false;
- else
+ } else {
pkt->valid = true;
+ pkt_stream->nb_valid_entries++;
+ }
}
static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
@@ -611,7 +651,7 @@ static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
return ceil_u32(len, umem->frame_size) * umem->frame_size;
}
-static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
+static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
{
struct pkt_stream *pkt_stream;
u32 i;
@@ -625,41 +665,45 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
for (i = 0; i < nb_pkts; i++) {
struct pkt *pkt = &pkt_stream->pkts[i];
- pkt_set(umem, pkt, 0, pkt_len);
- pkt->pkt_nb = i;
+ pkt_set(pkt_stream, pkt, 0, pkt_len);
+ pkt->pkt_nb = nb_start + i * nb_off;
}
return pkt_stream;
}
-static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
- struct pkt_stream *pkt_stream)
+static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
{
- return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
+ return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
+}
+
+static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
+{
+ return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
}
static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
- pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
- test->ifobj_tx->pkt_stream = pkt_stream;
- pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
- test->ifobj_rx->pkt_stream = pkt_stream;
+ pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
+ pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
}
static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
int offset)
{
- struct xsk_umem_info *umem = ifobj->umem;
struct pkt_stream *pkt_stream;
u32 i;
- pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
- for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
- pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
+ pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
+ for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
+ pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
- ifobj->pkt_stream = pkt_stream;
+ ifobj->xsk->pkt_stream = pkt_stream;
+ pkt_stream->nb_valid_entries /= 2;
}
static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
@@ -670,15 +714,34 @@ static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int off
static void pkt_stream_receive_half(struct test_spec *test)
{
- struct xsk_umem_info *umem = test->ifobj_rx->umem;
- struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
+ struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
u32 i;
- test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
- pkt_stream->pkts[0].len);
- pkt_stream = test->ifobj_rx->pkt_stream;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
+ pkt_stream->pkts[0].len);
+ pkt_stream = test->ifobj_rx->xsk->pkt_stream;
for (i = 1; i < pkt_stream->nb_pkts; i += 2)
pkt_stream->pkts[i].valid = false;
+
+ pkt_stream->nb_valid_entries /= 2;
+}
+
+static void pkt_stream_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
+
+ pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
+ }
}
static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
@@ -693,16 +756,16 @@ static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
pkt_stream->current_pkt_nb--;
}
-static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb,
- u32 bytes_written)
+static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
+ u32 pkt_nb, u32 bytes_written)
{
- void *data = xsk_umem__get_data(ifobject->umem->buffer, addr);
+ void *data = xsk_umem__get_data(umem->buffer, addr);
if (len < MIN_PKT_SIZE)
return;
if (!bytes_written) {
- gen_eth_hdr(ifobject, data);
+ gen_eth_hdr(xsk, data);
len -= PKT_HDR_SIZE;
data += PKT_HDR_SIZE;
@@ -747,8 +810,15 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s
len = 0;
}
+ print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
+ pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
+
if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
pkt_stream->max_pkt_len = pkt->len;
+
+ if (pkt->valid)
+ pkt_stream->nb_valid_entries++;
+
pkt_nb++;
}
@@ -762,10 +832,10 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts,
struct pkt_stream *pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
- test->ifobj_tx->pkt_stream = pkt_stream;
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
- test->ifobj_rx->pkt_stream = pkt_stream;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
}
static void pkt_print_data(u32 *data, u32 cnt)
@@ -777,7 +847,7 @@ static void pkt_print_data(u32 *data, u32 cnt)
seqnum = ntohl(*data) & 0xffff;
pkt_nb = ntohl(*data) >> 16;
- fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
+ ksft_print_msg("%u:%u ", pkt_nb, seqnum);
data++;
}
}
@@ -789,13 +859,13 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
if (eth_header) {
/*extract L2 frame */
- fprintf(stdout, "DEBUG>> L2: dst mac: ");
+ ksft_print_msg("DEBUG>> L2: dst mac: ");
for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_dest[i]);
+ ksft_print_msg("%02X", ethhdr->h_dest[i]);
- fprintf(stdout, "\nDEBUG>> L2: src mac: ");
+ ksft_print_msg("\nDEBUG>> L2: src mac: ");
for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_source[i]);
+ ksft_print_msg("%02X", ethhdr->h_source[i]);
data = pkt + PKT_HDR_SIZE;
} else {
@@ -803,15 +873,15 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
}
/*extract L5 frame */
- fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
+ ksft_print_msg("\nDEBUG>> L5: seqnum: ");
pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
- fprintf(stdout, "....");
+ ksft_print_msg("....");
if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
- fprintf(stdout, "\n.... ");
+ ksft_print_msg("\n.... ");
pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
PKT_DUMP_NB_TO_PRINT);
}
- fprintf(stdout, "\n---------------------------------------\n");
+ ksft_print_msg("\n---------------------------------------\n");
}
static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
@@ -838,8 +908,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
struct xdp_info *meta = data - sizeof(struct xdp_info);
if (meta->count != pkt->pkt_nb) {
- ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
- __func__, pkt->pkt_nb, meta->count);
+ ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
+ __func__, pkt->pkt_nb,
+ (unsigned long long)meta->count);
return false;
}
@@ -856,11 +927,13 @@ static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 exp
if (addr >= umem->num_frames * umem->frame_size ||
addr + len > umem->num_frames * umem->frame_size) {
- ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len);
+ ksft_print_msg("Frag invalid addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
return false;
}
if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
- ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len);
+ ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
return false;
}
@@ -916,36 +989,42 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
return true;
}
-static void kick_tx(struct xsk_socket_info *xsk)
+static int kick_tx(struct xsk_socket_info *xsk)
{
int ret;
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (ret >= 0)
- return;
+ return TEST_PASS;
if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
usleep(100);
- return;
+ return TEST_PASS;
}
- exit_with_error(errno);
+ return TEST_FAILURE;
}
-static void kick_rx(struct xsk_socket_info *xsk)
+static int kick_rx(struct xsk_socket_info *xsk)
{
int ret;
ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
if (ret < 0)
- exit_with_error(errno);
+ return TEST_FAILURE;
+
+ return TEST_PASS;
}
static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
{
unsigned int rcvd;
u32 idx;
+ int ret;
- if (xsk_ring_prod__needs_wakeup(&xsk->tx))
- kick_tx(xsk);
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ }
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
if (rcvd) {
@@ -953,7 +1032,8 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
ksft_print_msg("[%s] Too many packets completed\n", __func__);
- ksft_print_msg("Last completion address: %llx\n", addr);
+ ksft_print_msg("Last completion address: %llx\n",
+ (unsigned long long)addr);
return TEST_FAILURE;
}
@@ -964,153 +1044,207 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
return TEST_PASS;
}
-static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
{
- struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
- struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
- struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
+ u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
struct ifobject *ifobj = test->ifobj_rx;
struct xsk_umem_info *umem = xsk->umem;
+ struct pollfd fds = { };
struct pkt *pkt;
+ u64 first_addr = 0;
int ret;
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- timeradd(&tv_now, &tv_timeout, &tv_end);
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLIN;
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
- while (pkt) {
- u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
- u64 first_addr;
+ ret = kick_rx(xsk);
+ if (ret)
+ return TEST_FAILURE;
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- if (timercmp(&tv_now, &tv_end, >)) {
- ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ if (ifobj->use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
return TEST_FAILURE;
- }
-
- kick_rx(xsk);
- if (ifobj->use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret < 0)
- exit_with_error(errno);
- if (!ret) {
- if (!is_umem_valid(test->ifobj_tx))
- return TEST_PASS;
-
- ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
- return TEST_FAILURE;
- }
+ if (!ret) {
+ if (!is_umem_valid(test->ifobj_tx))
+ return TEST_PASS;
- if (!(fds->revents & POLLIN))
- continue;
+ ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
+ return TEST_CONTINUE;
}
- rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
- if (!rcvd)
- continue;
+ if (!(fds.revents & POLLIN))
+ return TEST_CONTINUE;
+ }
- if (ifobj->use_fill_ring) {
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
- while (ret != rcvd) {
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
+ if (!rcvd)
+ return TEST_CONTINUE;
+
+ if (ifobj->use_fill_ring) {
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ ret = poll(&fds, 1, POLL_TMOUT);
if (ret < 0)
- exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret < 0)
- exit_with_error(errno);
- }
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ return TEST_FAILURE;
}
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
+ }
- while (frags_processed < rcvd) {
- const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
- u64 addr = desc->addr, orig;
+ while (frags_processed < rcvd) {
+ const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
+ u64 addr = desc->addr, orig;
- orig = xsk_umem__extract_addr(addr);
- addr = xsk_umem__add_offset_to_addr(addr);
+ orig = xsk_umem__extract_addr(addr);
+ addr = xsk_umem__add_offset_to_addr(addr);
+ if (!nb_frags) {
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
if (!pkt) {
ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
__func__, addr, desc->len);
return TEST_FAILURE;
}
+ }
- if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
- !is_offset_correct(umem, pkt, addr) ||
- (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
- return TEST_FAILURE;
+ print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
+ addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
- if (!nb_frags++)
- first_addr = addr;
- frags_processed++;
- pkt_len += desc->len;
- if (ifobj->use_fill_ring)
- *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
+ if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
+ !is_metadata_correct(pkt, umem->buffer, addr)))
+ return TEST_FAILURE;
- if (pkt_continues(desc->options))
- continue;
+ if (!nb_frags++)
+ first_addr = addr;
+ frags_processed++;
+ pkt_len += desc->len;
+ if (ifobj->use_fill_ring)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
- /* The complete packet has been received */
- if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
- !is_offset_correct(umem, pkt, addr))
- return TEST_FAILURE;
+ if (pkt_continues(desc->options))
+ continue;
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
- nb_frags = 0;
- pkt_len = 0;
- }
+ /* The complete packet has been received */
+ if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr))
+ return TEST_FAILURE;
- if (nb_frags) {
- /* In the middle of a packet. Start over from beginning of packet. */
- idx_rx -= nb_frags;
- xsk_ring_cons__cancel(&xsk->rx, nb_frags);
- if (ifobj->use_fill_ring) {
- idx_fq -= nb_frags;
- xsk_ring_prod__cancel(&umem->fq, nb_frags);
- }
- frags_processed -= nb_frags;
+ pkt_stream->nb_rx_pkts++;
+ nb_frags = 0;
+ pkt_len = 0;
+ }
+
+ if (nb_frags) {
+ /* In the middle of a packet. Start over from beginning of packet. */
+ idx_rx -= nb_frags;
+ xsk_ring_cons__cancel(&xsk->rx, nb_frags);
+ if (ifobj->use_fill_ring) {
+ idx_fq -= nb_frags;
+ xsk_ring_prod__cancel(&umem->fq, nb_frags);
}
+ frags_processed -= nb_frags;
+ }
- if (ifobj->use_fill_ring)
- xsk_ring_prod__submit(&umem->fq, frags_processed);
- if (ifobj->release_rx)
- xsk_ring_cons__release(&xsk->rx, frags_processed);
+ if (ifobj->use_fill_ring)
+ xsk_ring_prod__submit(&umem->fq, frags_processed);
+ if (ifobj->release_rx)
+ xsk_ring_cons__release(&xsk->rx, frags_processed);
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight -= pkts_sent;
+ pthread_mutex_unlock(&pacing_mutex);
+ pkts_sent = 0;
+
+return TEST_CONTINUE;
+}
+
+bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
+ unsigned long *bitmap)
+{
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+
+ if (!pkt_stream) {
+ __set_bit(sock_num, bitmap);
+ return false;
+ }
+
+ if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
+ __set_bit(sock_num, bitmap);
+ if (bitmap_full(bitmap, test->nb_sockets))
+ return true;
+ }
+
+ return false;
+}
+
+static int receive_pkts(struct test_spec *test)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ struct xsk_socket_info *xsk;
+ u32 sock_num = 0;
+ int res, ret;
- pthread_mutex_lock(&pacing_mutex);
- pkts_in_flight -= pkts_sent;
- pthread_mutex_unlock(&pacing_mutex);
- pkts_sent = 0;
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (1) {
+ xsk = &test->ifobj_rx->xsk_arr[sock_num];
+
+ if ((all_packets_received(test, xsk, sock_num, bitmap)))
+ break;
+
+ res = __receive_pkts(test, xsk);
+ if (!(res == TEST_PASS || res == TEST_CONTINUE))
+ return res;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+ sock_num = (sock_num + 1) % test->nb_sockets;
}
return TEST_PASS;
}
-static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
+static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
{
u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
- struct pkt_stream *pkt_stream = ifobject->pkt_stream;
- struct xsk_socket_info *xsk = ifobject->xsk;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
struct xsk_umem_info *umem = ifobject->umem;
bool use_poll = ifobject->use_poll;
+ struct pollfd fds = { };
int ret;
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
/* pkts_in_flight might be negative if many invalid packets are sent */
if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
- kick_tx(xsk);
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
return TEST_CONTINUE;
}
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLOUT;
+
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
+ ret = poll(&fds, 1, POLL_TMOUT);
if (timeout) {
if (ret < 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
@@ -1161,10 +1295,13 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
tx_desc->options = 0;
}
if (pkt->valid)
- pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
+ pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
bytes_written);
bytes_written += tx_desc->len;
+ print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
+ tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
+
if (nb_frags_left) {
i++;
if (pkt_stream->verbatim)
@@ -1186,7 +1323,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
xsk->outstanding_tx += valid_frags;
if (use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
+ ret = poll(&fds, 1, POLL_TMOUT);
if (ret <= 0) {
if (ret == 0 && timeout)
return TEST_PASS;
@@ -1207,33 +1344,67 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
return TEST_CONTINUE;
}
-static void wait_for_tx_completion(struct xsk_socket_info *xsk)
+static int wait_for_tx_completion(struct xsk_socket_info *xsk)
{
- while (xsk->outstanding_tx)
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ int ret;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (xsk->outstanding_tx) {
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+
complete_pkts(xsk, BATCH_SIZE);
+ }
+
+ return TEST_PASS;
+}
+
+bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
+{
+ return bitmap_full(bitmap, test->nb_sockets);
}
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
- struct pkt_stream *pkt_stream = ifobject->pkt_stream;
bool timeout = !is_umem_valid(test->ifobj_rx);
- struct pollfd fds = { };
- u32 ret;
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ u32 i, ret;
- fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
- fds.events = POLLOUT;
+ while (!(all_packets_sent(test, bitmap))) {
+ for (i = 0; i < test->nb_sockets; i++) {
+ struct pkt_stream *pkt_stream;
- while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
- ret = __send_pkts(ifobject, &fds, timeout);
- if (ret == TEST_CONTINUE && !test->fail)
- continue;
- if ((ret || test->fail) && !timeout)
- return TEST_FAILURE;
- if (ret == TEST_PASS && timeout)
- return ret;
+ pkt_stream = ifobject->xsk_arr[i].pkt_stream;
+ if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
+ __set_bit(i, bitmap);
+ continue;
+ }
+ ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
+ if (ret == TEST_CONTINUE && !test->fail)
+ continue;
+
+ if ((ret || test->fail) && !timeout)
+ return TEST_FAILURE;
+
+ if (ret == TEST_PASS && timeout)
+ return ret;
+
+ ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
+ if (ret)
+ return TEST_FAILURE;
+ }
}
- wait_for_tx_completion(ifobject->xsk);
return TEST_PASS;
}
@@ -1266,7 +1437,9 @@ static int validate_rx_dropped(struct ifobject *ifobject)
struct xdp_statistics stats;
int err;
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1278,8 +1451,8 @@ static int validate_rx_dropped(struct ifobject *ifobject)
* packet being invalid). Since the last packet may or may not have
* been dropped already, both outcomes must be allowed.
*/
- if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
- stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
+ if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
+ stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
return TEST_PASS;
return TEST_FAILURE;
@@ -1292,7 +1465,9 @@ static int validate_rx_full(struct ifobject *ifobject)
int err;
usleep(1000);
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1311,7 +1486,9 @@ static int validate_fill_empty(struct ifobject *ifobject)
int err;
usleep(1000);
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1339,9 +1516,11 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject)
return TEST_FAILURE;
}
- if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
- ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
- __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
+ if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
+ ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
+ __func__,
+ (unsigned long long)stats.tx_invalid_descs,
+ ifobject->xsk->pkt_stream->nb_pkts);
return TEST_FAILURE;
}
@@ -1433,6 +1612,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
void *bufs;
int ret;
+ u32 i;
if (ifobject->umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
@@ -1455,11 +1635,14 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return;
- xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
+ xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
- ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
- if (ret)
- exit_with_error(errno);
+ for (i = 0; i < test->nb_sockets; i++) {
+ ifobject->xsk = &ifobject->xsk_arr[i];
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
+ if (ret)
+ exit_with_error(errno);
+ }
}
static void *worker_testapp_validate_tx(void *arg)
@@ -1475,8 +1658,6 @@ static void *worker_testapp_validate_tx(void *arg)
thread_common_ops_tx(test, ifobject);
}
- print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
- ifobject->ifname);
err = send_pkts(test, ifobject);
if (!err && ifobject->validation_func)
@@ -1491,26 +1672,23 @@ static void *worker_testapp_validate_rx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx;
- struct pollfd fds = { };
int err;
if (test->current_step == 1) {
thread_common_ops(test, ifobject);
} else {
xsk_clear_xskmap(ifobject->xskmap);
- err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
+ err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
if (err) {
- printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
+ ksft_print_msg("Error: Failed to update xskmap, error %s\n",
+ strerror(-err));
exit_with_error(-err);
}
}
- fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
- fds.events = POLLIN;
-
pthread_barrier_wait(&barr);
- err = receive_pkts(test, &fds);
+ err = receive_pkts(test);
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
@@ -1564,7 +1742,7 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro
xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
if (err) {
- printf("Error attaching XDP program\n");
+ ksft_print_msg("Error attaching XDP program\n");
exit_with_error(-err);
}
@@ -1619,11 +1797,11 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i
if (ifobj2) {
if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno);
- pkt_stream_reset(ifobj2->pkt_stream);
+ pkt_stream_reset(ifobj2->xsk->pkt_stream);
}
test->current_step++;
- pkt_stream_reset(ifobj1->pkt_stream);
+ pkt_stream_reset(ifobj1->xsk->pkt_stream);
pkts_in_flight = 0;
signal(SIGUSR1, handler);
@@ -1647,9 +1825,15 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i
pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
+ u32 i;
+
if (ifobj2)
- xsk_socket__delete(ifobj2->xsk->xsk);
- xsk_socket__delete(ifobj1->xsk->xsk);
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj2->xsk_arr[i].xsk);
+
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj1->xsk_arr[i].xsk);
+
testapp_clean_xsk_umem(ifobj1);
if (ifobj2 && !ifobj2->shared_umem)
testapp_clean_xsk_umem(ifobj2);
@@ -1682,7 +1866,6 @@ static int testapp_teardown(struct test_spec *test)
{
int i;
- test_spec_set_name(test, "TEARDOWN");
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
if (testapp_validate_traffic(test))
return TEST_FAILURE;
@@ -1704,18 +1887,17 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
*ifobj2 = tmp_ifobj;
}
-static int testapp_bidi(struct test_spec *test)
+static int testapp_bidirectional(struct test_spec *test)
{
int res;
- test_spec_set_name(test, "BIDIRECTIONAL");
test->ifobj_tx->rx_on = true;
test->ifobj_rx->tx_on = true;
test->total_steps = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
- print_verbose("Switching Tx/Rx vectors\n");
+ print_verbose("Switching Tx/Rx direction\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
@@ -1723,42 +1905,44 @@ static int testapp_bidi(struct test_spec *test)
return res;
}
-static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
+static int swap_xsk_resources(struct test_spec *test)
{
int ret;
- xsk_socket__delete(ifobj_tx->xsk->xsk);
- xsk_socket__delete(ifobj_rx->xsk->xsk);
- ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
- ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
+ test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
+ test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
+ test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
+ test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
- ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
+ ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
if (ret)
- exit_with_error(errno);
+ return TEST_FAILURE;
+
+ return TEST_PASS;
}
-static int testapp_bpf_res(struct test_spec *test)
+static int testapp_xdp_prog_cleanup(struct test_spec *test)
{
- test_spec_set_name(test, "BPF_RES");
test->total_steps = 2;
test->nb_sockets = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
- swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
+ if (swap_xsk_resources(test))
+ return TEST_FAILURE;
return testapp_validate_traffic(test);
}
static int testapp_headroom(struct test_spec *test)
{
- test_spec_set_name(test, "UMEM_HEADROOM");
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
return testapp_validate_traffic(test);
}
static int testapp_stats_rx_dropped(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_DROPPED");
if (test->mode == TEST_MODE_ZC) {
ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
return TEST_SKIP;
@@ -1774,7 +1958,6 @@ static int testapp_stats_rx_dropped(struct test_spec *test)
static int testapp_stats_tx_invalid_descs(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_TX_INVALID");
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
return testapp_validate_traffic(test);
@@ -1782,10 +1965,8 @@ static int testapp_stats_tx_invalid_descs(struct test_spec *test)
static int testapp_stats_rx_full(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_FULL");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
@@ -1795,19 +1976,16 @@ static int testapp_stats_rx_full(struct test_spec *test)
static int testapp_stats_fill_empty(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
return testapp_validate_traffic(test);
}
-static int testapp_unaligned(struct test_spec *test)
+static int testapp_send_receive_unaligned(struct test_spec *test)
{
- test_spec_set_name(test, "UNALIGNED_MODE");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
/* Let half of the packets straddle a 4K buffer boundary */
@@ -1816,9 +1994,8 @@ static int testapp_unaligned(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_unaligned_mb(struct test_spec *test)
+static int testapp_send_receive_unaligned_mb(struct test_spec *test)
{
- test_spec_set_name(test, "UNALIGNED_MODE_9K");
test->mtu = MAX_ETH_JUMBO_SIZE;
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
@@ -1834,9 +2011,8 @@ static int testapp_single_pkt(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_multi_buffer(struct test_spec *test)
+static int testapp_send_receive_mb(struct test_spec *test)
{
- test_spec_set_name(test, "RUN_TO_COMPLETION_9K_PACKETS");
test->mtu = MAX_ETH_JUMBO_SIZE;
pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
@@ -1933,7 +2109,6 @@ static int testapp_xdp_drop(struct test_spec *test)
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
- test_spec_set_name(test, "XDP_DROP_HALF");
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
skel_rx->maps.xsk, skel_tx->maps.xsk);
@@ -1941,7 +2116,7 @@ static int testapp_xdp_drop(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_xdp_metadata_count(struct test_spec *test)
+static int testapp_xdp_metadata_copy(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
@@ -1955,19 +2130,38 @@ static int testapp_xdp_metadata_count(struct test_spec *test)
test->ifobj_rx->use_metadata = true;
data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
- if (!data_map || !bpf_map__is_internal(data_map))
- exit_with_error(ENOMEM);
+ if (!data_map || !bpf_map__is_internal(data_map)) {
+ ksft_print_msg("Error: could not find bss section of XDP program\n");
+ return TEST_FAILURE;
+ }
- if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
- exit_with_error(errno);
+ if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
+ ksft_print_msg("Error: could not update count element\n");
+ return TEST_FAILURE;
+ }
return testapp_validate_traffic(test);
}
-static int testapp_poll_txq_tmout(struct test_spec *test)
+static int testapp_xdp_shared_umem(struct test_spec *test)
{
- test_spec_set_name(test, "POLL_TXQ_FULL");
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test->total_steps = 1;
+ test->nb_sockets = 2;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
+ skel_tx->progs.xsk_xdp_shared_umem,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ pkt_stream_even_odd_sequence(test);
+
+ return testapp_validate_traffic(test);
+}
+static int testapp_poll_txq_tmout(struct test_spec *test)
+{
test->ifobj_tx->use_poll = true;
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
test->ifobj_tx->umem->frame_size = 2048;
@@ -1977,7 +2171,6 @@ static int testapp_poll_txq_tmout(struct test_spec *test)
static int testapp_poll_rxq_tmout(struct test_spec *test)
{
- test_spec_set_name(test, "POLL_RXQ_EMPTY");
test->ifobj_rx->use_poll = true;
return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
}
@@ -1987,7 +2180,6 @@ static int testapp_too_many_frags(struct test_spec *test)
struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
u32 max_frags, i;
- test_spec_set_name(test, "TOO_MANY_FRAGS");
if (test->mode == TEST_MODE_ZC)
max_frags = test->ifobj_tx->xdp_zc_max_segs;
else
@@ -2054,20 +2246,16 @@ static bool hugepages_present(void)
return true;
}
-static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
- thread_func_t func_ptr)
+static void init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
{
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
int err;
- memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
- memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
-
ifobj->func_ptr = func_ptr;
err = xsk_load_xdp_programs(ifobj);
if (err) {
- printf("Error loading XDP program\n");
+ ksft_print_msg("Error loading XDP program\n");
exit_with_error(err);
}
@@ -2091,138 +2279,98 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
}
}
-static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
-{
- int ret = TEST_SKIP;
-
- switch (type) {
- case TEST_TYPE_STATS_RX_DROPPED:
- ret = testapp_stats_rx_dropped(test);
- break;
- case TEST_TYPE_STATS_TX_INVALID_DESCS:
- ret = testapp_stats_tx_invalid_descs(test);
- break;
- case TEST_TYPE_STATS_RX_FULL:
- ret = testapp_stats_rx_full(test);
- break;
- case TEST_TYPE_STATS_FILL_EMPTY:
- ret = testapp_stats_fill_empty(test);
- break;
- case TEST_TYPE_TEARDOWN:
- ret = testapp_teardown(test);
- break;
- case TEST_TYPE_BIDI:
- ret = testapp_bidi(test);
- break;
- case TEST_TYPE_BPF_RES:
- ret = testapp_bpf_res(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION:
- test_spec_set_name(test, "RUN_TO_COMPLETION");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_MB:
- ret = testapp_multi_buffer(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
- test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
- ret = testapp_single_pkt(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
- test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_RX_POLL:
- test->ifobj_rx->use_poll = true;
- test_spec_set_name(test, "POLL_RX");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_TX_POLL:
- test->ifobj_tx->use_poll = true;
- test_spec_set_name(test, "POLL_TX");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_POLL_TXQ_TMOUT:
- ret = testapp_poll_txq_tmout(test);
- break;
- case TEST_TYPE_POLL_RXQ_TMOUT:
- ret = testapp_poll_rxq_tmout(test);
- break;
- case TEST_TYPE_ALIGNED_INV_DESC:
- test_spec_set_name(test, "ALIGNED_INV_DESC");
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
- test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC:
- test_spec_set_name(test, "UNALIGNED_INV_DESC");
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
- u64 page_size, umem_size;
-
- test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
- /* Odd frame size so the UMEM doesn't end near a page boundary. */
- test->ifobj_tx->umem->frame_size = 4001;
- test->ifobj_rx->umem->frame_size = 4001;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- /* This test exists to test descriptors that staddle the end of
- * the UMEM but not a page.
- */
- page_size = sysconf(_SC_PAGESIZE);
- umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
- assert(umem_size % page_size > MIN_PKT_SIZE);
- assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
- ret = testapp_invalid_desc(test);
- break;
- }
- case TEST_TYPE_ALIGNED_INV_DESC_MB:
- test_spec_set_name(test, "ALIGNED_INV_DESC_MULTI_BUFF");
- ret = testapp_invalid_desc_mb(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC_MB:
- test_spec_set_name(test, "UNALIGNED_INV_DESC_MULTI_BUFF");
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- ret = testapp_invalid_desc_mb(test);
- break;
- case TEST_TYPE_UNALIGNED:
- ret = testapp_unaligned(test);
- break;
- case TEST_TYPE_UNALIGNED_MB:
- ret = testapp_unaligned_mb(test);
- break;
- case TEST_TYPE_HEADROOM:
- ret = testapp_headroom(test);
- break;
- case TEST_TYPE_XDP_DROP_HALF:
- ret = testapp_xdp_drop(test);
- break;
- case TEST_TYPE_XDP_METADATA_COUNT:
- test_spec_set_name(test, "XDP_METADATA_COUNT");
- ret = testapp_xdp_metadata_count(test);
- break;
- case TEST_TYPE_XDP_METADATA_COUNT_MB:
- test_spec_set_name(test, "XDP_METADATA_COUNT_MULTI_BUFF");
- test->mtu = MAX_ETH_JUMBO_SIZE;
- ret = testapp_xdp_metadata_count(test);
- break;
- case TEST_TYPE_TOO_MANY_FRAGS:
- ret = testapp_too_many_frags(test);
- break;
- default:
- break;
- }
+static int testapp_send_receive(struct test_spec *test)
+{
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_send_receive_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_poll_rx(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_poll_tx(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_aligned_inv_desc(struct test_spec *test)
+{
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_unaligned_inv_desc(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
+{
+ u64 page_size, umem_size;
+
+ /* Odd frame size so the UMEM doesn't end near a page boundary. */
+ test->ifobj_tx->umem->frame_size = 4001;
+ test->ifobj_rx->umem->frame_size = 4001;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* This test exists to test descriptors that staddle the end of
+ * the UMEM but not a page.
+ */
+ page_size = sysconf(_SC_PAGESIZE);
+ umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ assert(umem_size % page_size > MIN_PKT_SIZE);
+ assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
+
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_aligned_inv_desc_mb(struct test_spec *test)
+{
+ return testapp_invalid_desc_mb(test);
+}
+
+static int testapp_unaligned_inv_desc_mb(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc_mb(test);
+}
+
+static int testapp_xdp_metadata(struct test_spec *test)
+{
+ return testapp_xdp_metadata_copy(test);
+}
+
+static int testapp_xdp_metadata_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ return testapp_xdp_metadata_copy(test);
+}
+
+static void run_pkt_test(struct test_spec *test)
+{
+ int ret;
+
+ ret = test->test_func(test);
if (ret == TEST_PASS)
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
@@ -2290,13 +2438,56 @@ static bool is_xdp_supported(int ifindex)
return true;
}
+static const struct test_spec tests[] = {
+ {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
+ {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
+ {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
+ {.name = "POLL_RX", .test_func = testapp_poll_rx},
+ {.name = "POLL_TX", .test_func = testapp_poll_tx},
+ {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
+ {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
+ {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
+ {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
+ {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
+ {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
+ {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
+ .test_func = testapp_unaligned_inv_desc_4001_frame},
+ {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
+ {.name = "TEARDOWN", .test_func = testapp_teardown},
+ {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
+ {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
+ {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
+ {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
+ {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
+ {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
+ {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
+ {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
+ {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
+ {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
+ {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
+ {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
+ .test_func = testapp_send_receive_unaligned_mb},
+ {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
+ {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
+ {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
+};
+
+static void print_tests(void)
+{
+ u32 i;
+
+ printf("Tests:\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++)
+ printf("%u: %s\n", i, tests[i].name);
+}
+
int main(int argc, char **argv)
{
struct pkt_stream *rx_pkt_stream_default;
struct pkt_stream *tx_pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
+ u32 i, j, failed_tests = 0, nb_tests;
int modes = TEST_MODE_SKB + 1;
- u32 i, j, failed_tests = 0;
struct test_spec test;
bool shared_netdev;
@@ -2314,14 +2505,21 @@ int main(int argc, char **argv)
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
+ if (opt_print_tests) {
+ print_tests();
+ ksft_exit_xpass();
+ }
+ if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) {
+ ksft_print_msg("Error: test %u does not exist.\n", opt_run_test);
+ ksft_exit_xfail();
+ }
+
shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
ifobj_tx->shared_umem = shared_netdev;
ifobj_rx->shared_umem = shared_netdev;
- if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
- usage(basename(argv[0]));
- ksft_exit_xfail();
- }
+ if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx))
+ print_usage(argv);
if (is_xdp_supported(ifobj_tx->ifindex)) {
modes++;
@@ -2329,23 +2527,46 @@ int main(int argc, char **argv)
modes++;
}
- init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
- init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
+ init_iface(ifobj_rx, worker_testapp_validate_rx);
+ init_iface(ifobj_tx, worker_testapp_validate_tx);
- test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
- tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
- rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
+ tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
exit_with_error(ENOMEM);
test.tx_pkt_stream_default = tx_pkt_stream_default;
test.rx_pkt_stream_default = rx_pkt_stream_default;
- ksft_set_plan(modes * TEST_TYPE_MAX);
+ if (opt_run_test == RUN_ALL_TESTS)
+ nb_tests = ARRAY_SIZE(tests);
+ else
+ nb_tests = 1;
+ if (opt_mode == TEST_MODE_ALL) {
+ ksft_set_plan(modes * nb_tests);
+ } else {
+ if (opt_mode == TEST_MODE_DRV && modes <= TEST_MODE_DRV) {
+ ksft_print_msg("Error: XDP_DRV mode not supported.\n");
+ ksft_exit_xfail();
+ }
+ if (opt_mode == TEST_MODE_ZC && modes <= TEST_MODE_ZC) {
+ ksft_print_msg("Error: zero-copy mode not supported.\n");
+ ksft_exit_xfail();
+ }
+
+ ksft_set_plan(nb_tests);
+ }
for (i = 0; i < modes; i++) {
- for (j = 0; j < TEST_TYPE_MAX; j++) {
- test_spec_init(&test, ifobj_tx, ifobj_rx, i);
- run_pkt_test(&test, i, j);
+ if (opt_mode != TEST_MODE_ALL && i != opt_mode)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(tests); j++) {
+ if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test)
+ continue;
+
+ test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
+ run_pkt_test(&test);
usleep(USLEEP_MAX);
if (test.fail)
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 233b66cef64a..f174df2d693f 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -5,7 +5,10 @@
#ifndef XSKXCEIVER_H_
#define XSKXCEIVER_H_
+#include <limits.h>
+
#include "xsk_xdp_progs.skel.h"
+#include "xsk_xdp_common.h"
#ifndef SOL_XDP
#define SOL_XDP 283
@@ -33,8 +36,7 @@
#define TEST_SKIP 2
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 16
-#define MAX_SOCKETS 2
-#define MAX_TEST_NAME_SIZE 32
+#define MAX_TEST_NAME_SIZE 48
#define MAX_TEARDOWN_ITER 10
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
#define MIN_PKT_SIZE 64
@@ -56,6 +58,8 @@
#define XSK_DESC__MAX_SKB_FRAGS 18
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
#define PKT_DUMP_NB_TO_PRINT 16
+#define RUN_ALL_TESTS UINT_MAX
+#define NUM_MAC_ADDRESSES 4
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -63,43 +67,9 @@ enum test_mode {
TEST_MODE_SKB,
TEST_MODE_DRV,
TEST_MODE_ZC,
- TEST_MODE_MAX
-};
-
-enum test_type {
- TEST_TYPE_RUN_TO_COMPLETION,
- TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
- TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
- TEST_TYPE_RX_POLL,
- TEST_TYPE_TX_POLL,
- TEST_TYPE_POLL_RXQ_TMOUT,
- TEST_TYPE_POLL_TXQ_TMOUT,
- TEST_TYPE_UNALIGNED,
- TEST_TYPE_ALIGNED_INV_DESC,
- TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
- TEST_TYPE_UNALIGNED_INV_DESC,
- TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME,
- TEST_TYPE_HEADROOM,
- TEST_TYPE_TEARDOWN,
- TEST_TYPE_BIDI,
- TEST_TYPE_STATS_RX_DROPPED,
- TEST_TYPE_STATS_TX_INVALID_DESCS,
- TEST_TYPE_STATS_RX_FULL,
- TEST_TYPE_STATS_FILL_EMPTY,
- TEST_TYPE_BPF_RES,
- TEST_TYPE_XDP_DROP_HALF,
- TEST_TYPE_XDP_METADATA_COUNT,
- TEST_TYPE_XDP_METADATA_COUNT_MB,
- TEST_TYPE_RUN_TO_COMPLETION_MB,
- TEST_TYPE_UNALIGNED_MB,
- TEST_TYPE_ALIGNED_INV_DESC_MB,
- TEST_TYPE_UNALIGNED_INV_DESC_MB,
- TEST_TYPE_TOO_MANY_FRAGS,
- TEST_TYPE_MAX
+ TEST_MODE_ALL
};
-static bool opt_verbose;
-
struct xsk_umem_info {
struct xsk_ring_prod fq;
struct xsk_ring_cons cq;
@@ -118,8 +88,11 @@ struct xsk_socket_info {
struct xsk_ring_prod tx;
struct xsk_umem_info *umem;
struct xsk_socket *xsk;
+ struct pkt_stream *pkt_stream;
u32 outstanding_tx;
u32 rxqsize;
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
};
struct pkt {
@@ -135,12 +108,16 @@ struct pkt_stream {
u32 current_pkt_nb;
struct pkt *pkts;
u32 max_pkt_len;
+ u32 nb_rx_pkts;
+ u32 nb_valid_entries;
bool verbatim;
};
struct ifobject;
+struct test_spec;
typedef int (*validation_func_t)(struct ifobject *ifobj);
typedef void *(*thread_func_t)(void *arg);
+typedef int (*test_func_t)(struct test_spec *test);
struct ifobject {
char ifname[MAX_INTERFACE_NAME_CHARS];
@@ -149,7 +126,6 @@ struct ifobject {
struct xsk_umem_info *umem;
thread_func_t func_ptr;
validation_func_t validation_func;
- struct pkt_stream *pkt_stream;
struct xsk_xdp_progs *xdp_progs;
struct bpf_map *xskmap;
struct bpf_program *xdp_prog;
@@ -169,8 +145,6 @@ struct ifobject {
bool unaligned_supp;
bool multi_buff_supp;
bool multi_buff_zc_supp;
- u8 dst_mac[ETH_ALEN];
- u8 src_mac[ETH_ALEN];
};
struct test_spec {
@@ -182,6 +156,7 @@ struct test_spec {
struct bpf_program *xdp_prog_tx;
struct bpf_map *xskmap_rx;
struct bpf_map *xskmap_tx;
+ test_func_t test_func;
int mtu;
u16 total_steps;
u16 current_step;
@@ -196,4 +171,6 @@ pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
int pkts_in_flight;
+static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
+
#endif /* XSKXCEIVER_H_ */