diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
103 files changed, 5860 insertions, 462 deletions
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST index 1789a61d0a9b..f748f2c33b22 100644 --- a/tools/testing/selftests/bpf/DENYLIST +++ b/tools/testing/selftests/bpf/DENYLIST @@ -1,6 +1,5 @@ # TEMPORARY # Alphabetical order -dynptr/test_probe_read_user_str_dynptr # disabled until https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ makes it into the bpf-next get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge stacktrace_build_id stacktrace_build_id_nmi diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 deleted file mode 100644 index 12e99c0277a8..000000000000 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ /dev/null @@ -1 +0,0 @@ -tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 910d8d6402ef..4863106034df 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -109,6 +109,7 @@ TEST_PROGS := test_kmod.sh \ test_xdping.sh \ test_bpftool_build.sh \ test_bpftool.sh \ + test_bpftool_map.sh \ test_bpftool_metadata.sh \ test_doc_build.sh \ test_xsk.sh \ @@ -840,6 +841,11 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ +# This works around GCC warning about snprintf truncating strings like: +# +# char a[PATH_MAX], b[PATH_MAX]; +# snprintf(a, "%s/foo", b); // triggers -Wformat-truncation +$(OUTPUT)/veristat.o: CFLAGS += -Wno-format-truncation $(OUTPUT)/veristat.o: $(BPFOBJ) $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h index 68a51dcc0669..16f8ce832004 100644 --- a/tools/testing/selftests/bpf/bpf_arena_common.h +++ b/tools/testing/selftests/bpf/bpf_arena_common.h @@ -46,8 +46,11 @@ void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, int node_id, __u64 flags) __ksym __weak; +int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak; void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; +#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start) + #else /* when compiled as user space code */ #define __arena diff --git a/tools/testing/selftests/bpf/bpf_atomic.h b/tools/testing/selftests/bpf/bpf_atomic.h index a9674e544322..c550e5711967 100644 --- a/tools/testing/selftests/bpf/bpf_atomic.h +++ b/tools/testing/selftests/bpf/bpf_atomic.h @@ -61,7 +61,7 @@ extern bool CONFIG_X86_64 __kconfig __weak; #define smp_mb() \ ({ \ - unsigned long __val; \ + volatile unsigned long __val; \ __sync_fetch_and_add(&__val, 0); \ }) diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index 8215c9b3115e..9386dfe8b884 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -69,7 +69,7 @@ extern int bpf_get_file_xattr(struct file *file, const char *name, struct bpf_dynptr *value_ptr) __ksym; extern int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_ptr) __ksym; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index e4535451322e..15f626014872 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -4,6 +4,7 @@ #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> +#include <sys/xattr.h> #include <linux/limits.h> #include <stdio.h> #include <stdlib.h> @@ -319,6 +320,26 @@ int join_parent_cgroup(const char *relative_path) } /** + * set_cgroup_xattr() - Set xattr on a cgroup dir + * @relative_path: The cgroup path, relative to the workdir, to set xattr + * @name: xattr name + * @value: xattr value + * + * This function set xattr on cgroup dir. + * + * On success, it returns 0, otherwise on failure it returns -1. + */ +int set_cgroup_xattr(const char *relative_path, + const char *name, + const char *value) +{ + char cgroup_path[PATH_MAX + 1]; + + format_cgroup_path(cgroup_path, relative_path); + return setxattr(cgroup_path, name, value, strlen(value) + 1, 0); +} + +/** * __cleanup_cgroup_environment() - Delete temporary cgroups * * This is a helper for cleanup_cgroup_environment() that is responsible for diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 502845160d88..182e1ac36c95 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -26,6 +26,10 @@ int join_cgroup(const char *relative_path); int join_root_cgroup(void); int join_parent_cgroup(const char *relative_path); +int set_cgroup_xattr(const char *relative_path, + const char *name, + const char *value); + int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index f74e1ea0ad3b..8916ab814a3e 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -97,6 +97,9 @@ CONFIG_NF_TABLES_NETDEV=y CONFIG_NF_TABLES_IPV4=y CONFIG_NF_TABLES_IPV6=y CONFIG_NETFILTER_INGRESS=y +CONFIG_IP_NF_IPTABLES_LEGACY=y +CONFIG_IP6_NF_IPTABLES_LEGACY=y +CONFIG_NETFILTER_XTABLES_LEGACY=y CONFIG_NF_FLOW_TABLE=y CONFIG_NF_FLOW_TABLE_INET=y CONFIG_NETFILTER_NETLINK=y @@ -105,6 +108,7 @@ CONFIG_IP_NF_IPTABLES=y CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_FILTER=y CONFIG_NF_NAT=y +CONFIG_PACKET=y CONFIG_RC_CORE=y CONFIG_SECURITY=y CONFIG_SECURITYFS=y diff --git a/tools/testing/selftests/bpf/config.ppc64el b/tools/testing/selftests/bpf/config.ppc64el new file mode 100644 index 000000000000..9acf389dc4ce --- /dev/null +++ b/tools/testing/selftests/bpf/config.ppc64el @@ -0,0 +1,93 @@ +CONFIG_ALTIVEC=y +CONFIG_AUDIT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BONDING=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_PRELOAD_UMD=y +CONFIG_BPF_PRELOAD=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUPS=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1" +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPUSETS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_FS=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEVTMPFS=y +CONFIG_EXPERT=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_FRAME_POINTER=y +CONFIG_FRAME_WARN=1280 +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HUGETLBFS=y +CONFIG_HVC_CONSOLE=y +CONFIG_INET=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_JUMP_LABEL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KPROBES=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_NET_ACT_BPF=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_NET_VRF=y +CONFIG_NET=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NONPORTABLE=y +CONFIG_NR_CPUS=256 +CONFIG_PACKET=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI=y +CONFIG_POSIX_MQUEUE=y +CONFIG_PPC64=y +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y +CONFIG_PPC_PSERIES=y +CONFIG_PPC_RADIX_MMU=y +CONFIG_PRINTK_TIME=y +CONFIG_PROC_KCORE=y +CONFIG_PROFILING=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RT_GROUP_SCHED=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SMP=y +CONFIG_SOC_VIRT=y +CONFIG_SYSVIPC=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_THREAD_SHIFT=14 +CONFIG_TLS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS=y +CONFIG_TUN=y +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USER_NS=y +CONFIG_VETH=y +CONFIG_VLAN_8021Q=y +CONFIG_VSOCKETS_LOOPBACK=y +CONFIG_VSX=y +CONFIG_XFRM_USER=y diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index 67557cda2208..42b49870e520 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -13,7 +13,7 @@ static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); - __u32 value; + __u32 value = 0; int fd, err; /* Invalid key size */ diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 6befa870434b..4a0670c056ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -489,10 +489,28 @@ cleanup: bpf_link__destroy(link); } +static int verify_tracing_link_info(int fd, u64 cookie) +{ + struct bpf_link_info info; + int err; + u32 len = sizeof(info); + + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type")) + return -1; + + ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie"); + + return 0; +} + static void tracing_subtest(struct test_bpf_cookie *skel) { __u64 cookie; - int prog_fd; + int prog_fd, err; int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; LIBBPF_OPTS(bpf_test_run_opts, opts); LIBBPF_OPTS(bpf_link_create_opts, link_opts); @@ -507,6 +525,10 @@ static void tracing_subtest(struct test_bpf_cookie *skel) if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) goto cleanup; + err = verify_tracing_link_info(fentry_fd, cookie); + if (!ASSERT_OK(err, "verify_tracing_link_info")) + goto cleanup; + cookie = 0x20000000000000L; prog_fd = bpf_program__fd(skel->progs.fexit_test1); link_opts.tracing.cookie = cookie; @@ -635,10 +657,29 @@ cleanup: bpf_link__destroy(link); } +static int verify_raw_tp_link_info(int fd, u64 cookie) +{ + struct bpf_link_info info; + int err; + u32 len = sizeof(info); + + memset(&info, 0, sizeof(info)); + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type")) + return -1; + + ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie"); + + return 0; +} + static void raw_tp_subtest(struct test_bpf_cookie *skel) { __u64 cookie; - int prog_fd, link_fd = -1; + int err, prog_fd, link_fd = -1; struct bpf_link *link = NULL; LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts); LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts); @@ -656,6 +697,11 @@ static void raw_tp_subtest(struct test_bpf_cookie *skel) goto cleanup; usleep(1); /* trigger */ + + err = verify_raw_tp_link_info(link_fd, cookie); + if (!ASSERT_OK(err, "verify_raw_tp_link_info")) + goto cleanup; + close(link_fd); /* detach */ link_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index add4a18c33bd..5225d69bf79b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -323,7 +323,7 @@ static void test_task_pidfd(void) static void test_task_sleepable(void) { struct bpf_iter_tasks *skel; - int pid, status, err, data_pipe[2], finish_pipe[2], c; + int pid, status, err, data_pipe[2], finish_pipe[2], c = 0; char *test_data = NULL; char *test_data_long = NULL; char *data[2]; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c index fe2c502e5089..ecc3d47919ad 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -78,7 +78,7 @@ static int test_setup_uffd(void *fault_addr) } uffd_register.range.start = (unsigned long)fault_addr; - uffd_register.range.len = 4096; + uffd_register.range.len = getpagesize(); uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) { close(uffd); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index c0a776feec23..82903585c870 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -879,6 +879,122 @@ static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, "static int bpf_cgrp_storage_busy = (int)2", 2); } +struct btf_dump_string_ctx { + struct btf *btf; + struct btf_dump *d; + char *str; + struct btf_dump_type_data_opts *opts; + int array_id; +}; + +static int btf_dump_one_string(struct btf_dump_string_ctx *ctx, + char *ptr, size_t ptr_sz, + const char *expected_val) +{ + size_t type_sz; + int ret; + + ctx->str[0] = '\0'; + type_sz = btf__resolve_size(ctx->btf, ctx->array_id); + ret = btf_dump__dump_type_data(ctx->d, ctx->array_id, ptr, ptr_sz, ctx->opts); + if (type_sz <= ptr_sz) { + if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz")) + return -EINVAL; + } + if (!ASSERT_STREQ(ctx->str, expected_val, "ensure expected/actual match")) + return -EFAULT; + return 0; +} + +static void btf_dump_strings(struct btf_dump_string_ctx *ctx) +{ + struct btf_dump_type_data_opts *opts = ctx->opts; + + opts->emit_strings = true; + + opts->compact = true; + opts->emit_zeroes = false; + + opts->skip_names = false; + btf_dump_one_string(ctx, "foo", 4, "(char[4])\"foo\""); + + opts->skip_names = true; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* This should have no effect. */ + opts->emit_zeroes = false; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* This should have no effect. */ + opts->compact = false; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* Non-printable characters come out as hex. */ + btf_dump_one_string(ctx, "fo\xff", 4, "\"fo\\xff\""); + btf_dump_one_string(ctx, "fo\x7", 4, "\"fo\\x07\""); + + /* + * Strings that are too long for the specified type ("char[4]") + * should fall back to the current behavior. + */ + opts->compact = true; + btf_dump_one_string(ctx, "abcde", 6, "['a','b','c','d',]"); + + /* + * Strings that are too short for the specified type ("char[4]") + * should work normally. + */ + btf_dump_one_string(ctx, "ab", 3, "\"ab\""); + + /* Non-NUL-terminated arrays don't get printed as strings. */ + char food[4] = { 'f', 'o', 'o', 'd' }; + char bye[3] = { 'b', 'y', 'e' }; + + btf_dump_one_string(ctx, food, 4, "['f','o','o','d',]"); + btf_dump_one_string(ctx, bye, 3, "['b','y','e',]"); + + /* The embedded NUL should terminate the string. */ + char embed[4] = { 'f', 'o', '\0', 'd' }; + + btf_dump_one_string(ctx, embed, 4, "\"fo\""); +} + +static void test_btf_dump_string_data(void) +{ + struct test_ctx t = {}; + char str[STRSIZE]; + struct btf_dump *d; + DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); + struct btf_dump_string_ctx ctx; + int char_id, int_id, array_id; + + if (test_ctx__init(&t)) + return; + + d = btf_dump__new(t.btf, btf_dump_snprintf, str, NULL); + if (!ASSERT_OK_PTR(d, "could not create BTF dump")) + return; + + /* Generate BTF for a four-element char array. */ + char_id = btf__add_int(t.btf, "char", 1, BTF_INT_CHAR); + ASSERT_EQ(char_id, 1, "char_id"); + int_id = btf__add_int(t.btf, "int", 4, BTF_INT_SIGNED); + ASSERT_EQ(int_id, 2, "int_id"); + array_id = btf__add_array(t.btf, int_id, char_id, 4); + ASSERT_EQ(array_id, 3, "array_id"); + + ctx.btf = t.btf; + ctx.d = d; + ctx.str = str; + ctx.opts = &opts; + ctx.array_id = array_id; + + btf_dump_strings(&ctx); + + btf_dump__free(d); + test_ctx__free(&t); +} + static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, const char *name, const char *expected_val, void *data, size_t data_sz) @@ -970,6 +1086,8 @@ void test_btf_dump() { test_btf_dump_struct_data(btf, d, str); if (test__start_subtest("btf_dump: var_data")) test_btf_dump_var_data(btf, d, str); + if (test__start_subtest("btf_dump: string_data")) + test_btf_dump_string_data(); btf_dump__free(d); btf__free(btf); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c new file mode 100644 index 000000000000..bb60704a3ef9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "cgroup_mprog.skel.h" + +static void assert_mprog_count(int cg, int atype, int expected) +{ + __u32 count = 0, attach_flags = 0; + int err; + + err = bpf_prog_query(cg, atype, 0, &attach_flags, + NULL, &count); + ASSERT_EQ(count, expected, "count"); + ASSERT_EQ(err, 0, "prog_query"); +} + +static void test_prog_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct cgroup_mprog *skel; + __u32 prog_ids[10]; + int cg, err; + + cg = test__join_cgroup("/prog_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /prog_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE, + .expected_revision = 2, + ); + + /* ordering: [fd2, fd1] */ + err = bpf_prog_attach_opts(fd2, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + /* ordering: [fd2, fd3, fd1] */ + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + err = bpf_prog_attach_opts(fd4, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + /* retrieve optq.prog_cnt */ + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + /* optq.prog_cnt will be used in below query */ + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + +cleanup4: + optd.expected_revision = 5; + err = bpf_prog_detach_opts(fd4, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 3); + +cleanup3: + LIBBPF_OPTS_RESET(optd); + err = bpf_prog_detach_opts(fd3, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 2); + + /* Check revision after two detach operations */ + err = bpf_prog_query_opts(cg, atype, &optq); + ASSERT_OK(err, "prog_query"); + ASSERT_EQ(optq.revision, 7, "revision"); + +cleanup2: + err = bpf_prog_detach_opts(fd2, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_link_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_cgroup_opts, opta); + LIBBPF_OPTS(bpf_cgroup_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + struct bpf_link *link1, *link2, *link3, *link4; + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct cgroup_mprog *skel; + __u32 prog_ids[10]; + int cg, err; + + cg = test__join_cgroup("/link_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /link_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta); + if (!ASSERT_OK_PTR(link1, "link_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_LINK, + .relative_id = id_from_link_fd(bpf_link__fd(link1)), + .expected_revision = 2, + ); + + /* ordering: [fd2, fd1] */ + link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta); + if (!ASSERT_OK_PTR(link2, "link_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER | BPF_F_LINK, + .relative_fd = bpf_link__fd(link2), + .expected_revision = 3, + ); + + /* ordering: [fd2, fd3, fd1] */ + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_OK_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta); + if (!ASSERT_OK_PTR(link4, "link_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + /* retrieve optq.prog_cnt */ + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + /* optq.prog_cnt will be used in below query */ + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + +cleanup4: + bpf_link__destroy(link4); + assert_mprog_count(cg, atype, 3); + +cleanup3: + bpf_link__destroy(link3); + assert_mprog_count(cg, atype, 2); + + /* Check revision after two detach operations */ + err = bpf_prog_query_opts(cg, atype, &optq); + ASSERT_OK(err, "prog_query"); + ASSERT_EQ(optq.revision, 7, "revision"); + +cleanup2: + bpf_link__destroy(link2); + assert_mprog_count(cg, atype, 1); + +cleanup1: + bpf_link__destroy(link1); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_preorder_prog_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 fd1, fd2, fd3, fd4; + struct cgroup_mprog *skel; + int cg, err; + + cg = test__join_cgroup("/preorder_prog_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_prog_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER, + .expected_revision = 2, + ); + + /* ordering: [fd1, fd2] */ + err = bpf_prog_attach_opts(fd2, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, -EINVAL, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_PREORDER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + /* ordering: [fd1, fd2, fd3] */ + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + err = bpf_prog_attach_opts(fd4, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + err = bpf_prog_detach_opts(fd4, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_preorder_link_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_cgroup_opts, opta); + struct bpf_link *link1, *link2, *link3, *link4; + struct cgroup_mprog *skel; + __u32 fd2; + int cg; + + cg = test__join_cgroup("/preorder_link_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_link_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta); + if (!ASSERT_OK_PTR(link1, "link_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_PREORDER, + .expected_revision = 2, + ); + + /* ordering: [fd1, fd2] */ + link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta); + if (!ASSERT_OK_PTR(link2, "link_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_ERR_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER | BPF_F_PREORDER | BPF_F_LINK, + .relative_fd = bpf_link__fd(link2), + .expected_revision = 3, + ); + + /* ordering: [fd1, fd2, fd3] */ + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_OK_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta); + if (!ASSERT_OK_PTR(link4, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + bpf_link__destroy(link4); + assert_mprog_count(cg, atype, 3); + +cleanup3: + bpf_link__destroy(link3); + assert_mprog_count(cg, atype, 2); + +cleanup2: + bpf_link__destroy(link2); + assert_mprog_count(cg, atype, 1); + +cleanup1: + bpf_link__destroy(link1); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_invalid_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + __u32 fd1, fd2, id2; + struct cgroup_mprog *skel; + int cg, err; + + cg = test__join_cgroup("/invalid_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /invalid_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + + id2 = id_from_prog_fd(fd2); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_LINK, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE | BPF_F_AFTER, + .replace_prog_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 1); +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +void test_cgroup_mprog_opts(void) +{ + if (test__start_subtest("prog_attach_detach")) + test_prog_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("link_attach_detach")) + test_link_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("preorder_prog_attach_detach")) + test_preorder_prog_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("preorder_link_attach_detach")) + test_preorder_link_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("invalid_attach_detach")) + test_invalid_attach_detach(BPF_CGROUP_GETSOCKOPT); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c new file mode 100644 index 000000000000..a36d2e968bc5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "cgroup_preorder.skel.h" + +static int run_getsockopt_test(int cg_parent, int sock_fd, bool has_relative_fd) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opts); + enum bpf_attach_type prog_p_atype, prog_p2_atype; + int prog_p_fd, prog_p2_fd; + struct cgroup_preorder *skel = NULL; + struct bpf_program *prog; + __u8 *result, buf; + socklen_t optlen = 1; + int err = 0; + + skel = cgroup_preorder__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load")) + return 0; + + LIBBPF_OPTS_RESET(opts); + opts.flags = BPF_F_ALLOW_MULTI; + prog = skel->progs.parent; + prog_p_fd = bpf_program__fd(prog); + prog_p_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent")) + goto close_skel; + + opts.flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE; + if (has_relative_fd) + opts.relative_fd = prog_p_fd; + prog = skel->progs.parent_2; + prog_p2_fd = bpf_program__fd(prog); + prog_p2_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2")) + goto detach_parent; + + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!ASSERT_OK(err, "getsockopt")) + goto detach_parent_2; + + result = skel->bss->result; + ASSERT_TRUE(result[0] == 4 && result[1] == 3, "result values"); + +detach_parent_2: + ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype), + "bpf_prog_detach2-parent_2"); +detach_parent: + ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype), + "bpf_prog_detach2-parent"); +close_skel: + cgroup_preorder__destroy(skel); + return err; +} + +void test_cgroup_mprog_ordering(void) +{ + int cg_parent = -1, sock_fd = -1; + + cg_parent = test__join_cgroup("/parent"); + if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) + goto out; + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + + ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, false), "getsockopt_test_1"); + ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, true), "getsockopt_test_2"); + +out: + close(sock_fd); + close(cg_parent); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c index 87978a0f7eb7..e0dd966e4a3e 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c @@ -7,133 +7,60 @@ #include <string.h> #include <unistd.h> #include <sys/socket.h> -#include <sys/xattr.h> - #include <test_progs.h> +#include "cgroup_helpers.h" #include "read_cgroupfs_xattr.skel.h" #include "cgroup_read_xattr.skel.h" -#define CGROUP_FS_ROOT "/sys/fs/cgroup/" -#define CGROUP_FS_PARENT CGROUP_FS_ROOT "foo/" +#define CGROUP_FS_PARENT "foo/" #define CGROUP_FS_CHILD CGROUP_FS_PARENT "bar/" - -static int move_pid_to_cgroup(const char *cgroup_folder, pid_t pid) -{ - char filename[128]; - char pid_str[64]; - int procs_fd; - int ret; - - snprintf(filename, sizeof(filename), "%scgroup.procs", cgroup_folder); - snprintf(pid_str, sizeof(pid_str), "%d", pid); - - procs_fd = open(filename, O_WRONLY | O_APPEND); - if (!ASSERT_OK_FD(procs_fd, "open")) - return -1; - - ret = write(procs_fd, pid_str, strlen(pid_str)); - close(procs_fd); - if (!ASSERT_GT(ret, 0, "write cgroup.procs")) - return -1; - return 0; -} - -static void reset_cgroups_and_lo(void) -{ - rmdir(CGROUP_FS_CHILD); - rmdir(CGROUP_FS_PARENT); - system("ip addr del 1.1.1.1/32 dev lo"); - system("ip link set dev lo down"); -} +#define TMP_FILE "/tmp/selftests_cgroup_xattr" static const char xattr_value_a[] = "bpf_selftest_value_a"; static const char xattr_value_b[] = "bpf_selftest_value_b"; static const char xattr_name[] = "user.bpf_test"; -static int setup_cgroups_and_lo(void) -{ - int err; - - err = mkdir(CGROUP_FS_PARENT, 0755); - if (!ASSERT_OK(err, "mkdir 1")) - goto error; - err = mkdir(CGROUP_FS_CHILD, 0755); - if (!ASSERT_OK(err, "mkdir 2")) - goto error; - - err = setxattr(CGROUP_FS_PARENT, xattr_name, xattr_value_a, - strlen(xattr_value_a) + 1, 0); - if (!ASSERT_OK(err, "setxattr 1")) - goto error; - - err = setxattr(CGROUP_FS_CHILD, xattr_name, xattr_value_b, - strlen(xattr_value_b) + 1, 0); - if (!ASSERT_OK(err, "setxattr 2")) - goto error; - - err = system("ip link set dev lo up"); - if (!ASSERT_OK(err, "lo up")) - goto error; - - err = system("ip addr add 1.1.1.1 dev lo"); - if (!ASSERT_OK(err, "lo addr v4")) - goto error; - - err = write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0"); - if (!ASSERT_OK(err, "write_sysctl")) - goto error; - - return 0; -error: - reset_cgroups_and_lo(); - return err; -} - static void test_read_cgroup_xattr(void) { - struct sockaddr_in sa4 = { - .sin_family = AF_INET, - .sin_addr.s_addr = htonl(INADDR_LOOPBACK), - }; + int tmp_fd, parent_cgroup_fd = -1, child_cgroup_fd = -1; struct read_cgroupfs_xattr *skel = NULL; - pid_t pid = gettid(); - int sock_fd = -1; - int connect_fd = -1; - if (!ASSERT_OK(setup_cgroups_and_lo(), "setup_cgroups_and_lo")) + parent_cgroup_fd = test__join_cgroup(CGROUP_FS_PARENT); + if (!ASSERT_OK_FD(parent_cgroup_fd, "create parent cgroup")) return; - if (!ASSERT_OK(move_pid_to_cgroup(CGROUP_FS_CHILD, pid), - "move_pid_to_cgroup")) + if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_PARENT, xattr_name, xattr_value_a), + "set parent xattr")) + goto out; + + child_cgroup_fd = test__join_cgroup(CGROUP_FS_CHILD); + if (!ASSERT_OK_FD(child_cgroup_fd, "create child cgroup")) + goto out; + if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_CHILD, xattr_name, xattr_value_b), + "set child xattr")) goto out; skel = read_cgroupfs_xattr__open_and_load(); if (!ASSERT_OK_PTR(skel, "read_cgroupfs_xattr__open_and_load")) goto out; - skel->bss->target_pid = pid; + skel->bss->target_pid = gettid(); if (!ASSERT_OK(read_cgroupfs_xattr__attach(skel), "read_cgroupfs_xattr__attach")) goto out; - sock_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP); - if (!ASSERT_OK_FD(sock_fd, "sock create")) - goto out; - - connect_fd = connect(sock_fd, &sa4, sizeof(sa4)); - if (!ASSERT_OK_FD(connect_fd, "connect 1")) - goto out; - close(connect_fd); + tmp_fd = open(TMP_FILE, O_RDONLY | O_CREAT); + ASSERT_OK_FD(tmp_fd, "open tmp file"); + close(tmp_fd); ASSERT_TRUE(skel->bss->found_value_a, "found_value_a"); ASSERT_TRUE(skel->bss->found_value_b, "found_value_b"); out: - close(connect_fd); - close(sock_fd); + close(child_cgroup_fd); + close(parent_cgroup_fd); read_cgroupfs_xattr__destroy(skel); - move_pid_to_cgroup(CGROUP_FS_ROOT, pid); - reset_cgroups_and_lo(); + unlink(TMP_FILE); } void test_cgroup_xattr(void) diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 62e7ec775f24..9b2d9ceda210 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -21,6 +21,14 @@ static struct { {"test_dynptr_data", SETUP_SYSCALL_SLEEP}, {"test_dynptr_copy", SETUP_SYSCALL_SLEEP}, {"test_dynptr_copy_xdp", SETUP_XDP_PROG}, + {"test_dynptr_memset_zero", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_notzero", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_zero_offset", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_zero_adjusted", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_overflow", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_overflow_offset", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_readonly", SETUP_SKB_PROG}, + {"test_dynptr_memset_xdp_chunks", SETUP_XDP_PROG}, {"test_ringbuf", SETUP_SYSCALL_SLEEP}, {"test_skb_readonly", SETUP_SKB_PROG}, {"test_dynptr_skb_data", SETUP_SKB_PROG}, @@ -43,6 +51,8 @@ static struct { {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP}, }; +#define PAGE_SIZE_64K 65536 + static void verify_success(const char *prog_name, enum test_setup_type setup_type) { char user_data[384] = {[0 ... 382] = 'a', '\0'}; @@ -138,14 +148,18 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ } case SETUP_XDP_PROG: { - char data[5000]; + char data[90000]; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = sizeof(data), .repeat = 1, ); + if (getpagesize() == PAGE_SIZE_64K) + opts.data_size_in = sizeof(data); + else + opts.data_size_in = 5000; + prog_fd = bpf_program__fd(prog); err = bpf_prog_test_run_opts(prog_fd, &opts); diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c index 9add890c2d37..241b2c8c6e0f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fd_array.c +++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c @@ -312,7 +312,7 @@ static void check_fd_array_cnt__referenced_btfs(void) /* btf should still exist when original file descriptor is closed */ err = get_btf_id_by_fd(extra_fds[0], &btf_id); - if (!ASSERT_GE(err, 0, "get_btf_id_by_fd")) + if (!ASSERT_EQ(err, 0, "get_btf_id_by_fd")) goto cleanup; Close(extra_fds[0]); diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c b/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c deleted file mode 100644 index 568d3aa48a78..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include <test_progs.h> -#include "fexit_noreturns.skel.h" - -void test_fexit_noreturns(void) -{ - RUN_TESTS(fexit_noreturns); -} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 5266c7022863..14c5a7ef0e87 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -72,7 +72,7 @@ static struct { { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" }, { "obj_new_acq", "Unreleased reference id=" }, { "use_after_drop", "invalid mem access 'scalar'" }, - { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" }, + { "ptr_walk_scalar", "type=rdonly_untrusted_mem expected=percpu_ptr_" }, { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" }, { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" }, { "direct_read_head", "direct access to bpf_list_head is disallowed" }, diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 169ce689b97c..d6f14a232002 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -7,6 +7,10 @@ #include "test_log_buf.skel.h" #include "bpf_util.h" +#if !defined(__clang__) +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + static size_t libbpf_log_pos; static char libbpf_log_buf[1024 * 1024]; static bool libbpf_log_error; diff --git a/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c new file mode 100644 index 000000000000..40d4f687bd9c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <test_progs.h> +#include "mem_rdonly_untrusted.skel.h" + +void test_mem_rdonly_untrusted(void) +{ + RUN_TESTS(mem_rdonly_untrusted); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 39d42271cc46..e261b0e872db 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -465,6 +465,20 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, return range_improve(x_t, x, x_swap); } + if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) { + if (x_t == S64 && x.a > x.b) { + if (x.b < y.a && x.a <= y.b) + return range(x_t, x.a, y.b); + if (x.a > y.b && x.b >= y.a) + return range(x_t, y.a, x.b); + } else if (x_t == U64 && y.a > y.b) { + if (y.b < x.a && y.a <= x.b) + return range(x_t, y.a, x.b); + if (y.a > x.b && y.b >= x.a) + return range(x_t, x.a, y.b); + } + } + /* otherwise, plain range cast and intersection works */ return range_improve(x_t, x, y_cast); } diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index da430df45aa4..d1e4cb28a72c 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -97,7 +97,7 @@ static void ringbuf_write_subtest(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->maps.ringbuf.max_entries = 0x4000; + skel->maps.ringbuf.max_entries = 0x40000; err = test_ringbuf_write_lskel__load(skel); if (!ASSERT_OK(err, "skel_load")) @@ -108,7 +108,7 @@ static void ringbuf_write_subtest(void) mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos")) goto cleanup; - *mmap_ptr = 0x3000; + *mmap_ptr = 0x30000; ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); skel->bss->pid = getpid(); diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index a4517bee34d5..27781df8f2fb 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2024 Meta +#include <poll.h> #include <test_progs.h> #include "network_helpers.h" #include "sock_iter_batch.skel.h" #define TEST_NS "sock_iter_batch_netns" +#define TEST_CHILD_NS "sock_iter_batch_child_netns" static const int init_batch_size = 16; static const int nr_soreuse = 4; @@ -118,6 +120,45 @@ done: return nth_sock_idx; } +static void destroy(int fd) +{ + struct sock_iter_batch *skel = NULL; + __u64 cookie = socket_cookie(fd); + struct bpf_link *link = NULL; + int iter_fd = -1; + int nread; + __u64 out; + + skel = sock_iter_batch__open(); + if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open")) + goto done; + + skel->rodata->destroy_cookie = cookie; + + if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load")) + goto done; + + link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter")) + goto done; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create")) + goto done; + + /* Delete matching socket. */ + nread = read(iter_fd, &out, sizeof(out)); + ASSERT_GE(nread, 0, "nread"); + if (nread) + ASSERT_EQ(out, cookie, "cookie matches"); +done: + if (iter_fd >= 0) + close(iter_fd); + bpf_link__destroy(link); + sock_iter_batch__destroy(skel); + close(fd); +} + static int get_seen_count(int fd, struct sock_count counts[], int n) { __u64 cookie = socket_cookie(fd); @@ -152,8 +193,71 @@ static void check_n_were_seen_once(int *fds, int fds_len, int n, ASSERT_EQ(seen_once, n, "seen_once"); } +static int accept_from_one(struct pollfd *server_poll_fds, + int server_poll_fds_len) +{ + static const int poll_timeout_ms = 5000; /* 5s */ + int ret; + int i; + + ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms); + if (!ASSERT_EQ(ret, 1, "poll")) + return -1; + + for (i = 0; i < server_poll_fds_len; i++) + if (server_poll_fds[i].revents & POLLIN) + return accept(server_poll_fds[i].fd, NULL, NULL); + + return -1; +} + +static int *connect_to_server(int family, int sock_type, const char *addr, + __u16 port, int nr_connects, int *server_fds, + int server_fds_len) +{ + struct pollfd *server_poll_fds = NULL; + int *established_socks = NULL; + int i; + + server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds)); + if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds")) + return NULL; + + for (i = 0; i < server_fds_len; i++) { + server_poll_fds[i].fd = server_fds[i]; + server_poll_fds[i].events = POLLIN; + } + + i = 0; + + established_socks = malloc(sizeof(*established_socks) * nr_connects*2); + if (!ASSERT_OK_PTR(established_socks, "established_socks")) + goto error; + + while (nr_connects--) { + established_socks[i] = connect_to_addr_str(family, sock_type, + addr, port, NULL); + if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str")) + goto error; + i++; + established_socks[i] = accept_from_one(server_poll_fds, + server_fds_len); + if (!ASSERT_OK_FD(established_socks[i], "accept_from_one")) + goto error; + i++; + } + + free(server_poll_fds); + return established_socks; +error: + free_fds(established_socks, i); + free(server_poll_fds); + return NULL; +} + static void remove_seen(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { int close_idx; @@ -182,8 +286,46 @@ static void remove_seen(int family, int sock_type, const char *addr, __u16 port, counts_len); } +static void remove_seen_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Leave one established socket. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Close a socket we've already seen to remove it from the bucket. */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the last socket wasn't skipped and that there were no + * repeats. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + static void remove_unseen(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -214,8 +356,54 @@ static void remove_unseen(int family, int sock_type, const char *addr, counts_len); } +static void remove_unseen_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close what would be the next socket in the bucket to exercise the + * condition where we need to skip past the first cookie we remembered. + */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the remaining sockets were seen exactly once and that we + * didn't repeat the socket that was already seen. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + static void remove_all(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -242,8 +430,57 @@ static void remove_all(int family, int sock_type, const char *addr, ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); } +static void remove_all_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int *close_idx = NULL; + int i; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close all remaining sockets to exhaust the list of saved cookies and + * exit without putting any sockets into the batch on the next read. + */ + close_idx = malloc(sizeof(int) * (established_socks_len - 1)); + if (!ASSERT_OK_PTR(close_idx, "close_idx malloc")) + return; + for (i = 0; i < established_socks_len - 1; i++) { + close_idx[i] = get_nth_socket(established_socks, + established_socks_len, link, + listen_socks_len + i); + if (!ASSERT_GE(close_idx[i], 0, "close_idx")) + return; + } + + for (i = 0; i < established_socks_len - 1; i++) { + destroy(established_socks[close_idx[i]]); + established_socks[close_idx[i]] = -1; + } + + /* Make sure there are no more sockets returned */ + ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); + free(close_idx); +} + static void add_some(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { int *new_socks = NULL; @@ -271,8 +508,52 @@ done: free_fds(new_socks, socks_len); } +static void add_some_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, + int counts_len, struct bpf_link *link, + int iter_fd) +{ + int *new_socks = NULL; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established_socks_len - 1 sockets. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Make sure we saw established_socks_len - 1 sockets exactly once. */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); + + /* Double the number of established sockets in the bucket. */ + new_socks = connect_to_server(family, sock_type, addr, port, + established_socks_len / 2, listen_socks, + listen_socks_len); + if (!ASSERT_OK_PTR(new_socks, "connect_to_server")) + goto done; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each of the original sockets was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +done: + free_fds(new_socks, established_socks_len); +} + static void force_realloc(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -299,11 +580,32 @@ done: free_fds(new_socks, socks_len); } +static void force_realloc_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + /* Iterate through all sockets to trigger a realloc. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each socket was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +} + struct test_case { void (*test)(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd); const char *description; + int ehash_buckets; + int connections; int init_socks; int max_socks; int sock_type; @@ -358,18 +660,140 @@ static struct test_case resume_tests[] = { .family = AF_INET6, .test = force_realloc, }, + { + .description = "tcp: resume after removing a seen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen, + }, + { + .description = "tcp: resume after removing one unseen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen, + }, + { + .description = "tcp: resume after removing all unseen sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all, + }, + { + .description = "tcp: resume after adding a few sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + /* Use AF_INET so that new sockets are added to the head of the + * bucket's list. + */ + .family = AF_INET, + .test = add_some, + }, + { + .description = "tcp: force a realloc to occur (listening)", + .init_socks = init_batch_size, + .max_socks = init_batch_size * 2, + .sock_type = SOCK_STREAM, + /* Use AF_INET6 so that new sockets are added to the tail of the + * bucket's list, needing to be added to the next batch to force + * a realloc. + */ + .family = AF_INET6, + .test = force_realloc, + }, + { + .description = "tcp: resume after removing a seen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen_established, + }, + { + .description = "tcp: resume after removing one unseen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen_established, + }, + { + .description = "tcp: resume after removing all unseen sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all_established, + }, + { + .description = "tcp: resume after adding a few sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = add_some_established, + }, + { + .description = "tcp: force a realloc to occur (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + /* Bucket size will need to double when going from listening to + * established sockets. + */ + .connections = init_batch_size, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse + (init_batch_size * 2), + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = force_realloc_established, + }, }; static void do_resume_test(struct test_case *tc) { struct sock_iter_batch *skel = NULL; + struct sock_count *counts = NULL; static const __u16 port = 10001; + struct nstoken *nstoken = NULL; struct bpf_link *link = NULL; - struct sock_count *counts; + int *established_fds = NULL; int err, iter_fd = -1; const char *addr; int *fds = NULL; - int local_port; + + if (tc->ehash_buckets) { + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d", + tc->ehash_buckets); + SYS(done, "ip netns add %s", TEST_CHILD_NS); + SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS); + nstoken = open_netns(TEST_CHILD_NS); + if (!ASSERT_OK_PTR(nstoken, "open_child_netns")) + goto done; + } counts = calloc(tc->max_socks, sizeof(*counts)); if (!ASSERT_OK_PTR(counts, "counts")) @@ -384,11 +808,18 @@ static void do_resume_test(struct test_case *tc) tc->init_socks); if (!ASSERT_OK_PTR(fds, "start_reuseport_server")) goto done; - local_port = get_socket_local_port(*fds); - if (!ASSERT_GE(local_port, 0, "get_socket_local_port")) - goto done; - skel->rodata->ports[0] = ntohs(local_port); + if (tc->connections) { + established_fds = connect_to_server(tc->family, tc->sock_type, + addr, port, + tc->connections, fds, + tc->init_socks); + if (!ASSERT_OK_PTR(established_fds, "connect_to_server")) + goto done; + } + skel->rodata->ports[0] = 0; + skel->rodata->ports[1] = 0; skel->rodata->sf = tc->family; + skel->rodata->ss = 0; err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) @@ -406,10 +837,15 @@ static void do_resume_test(struct test_case *tc) goto done; tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks, - counts, tc->max_socks, link, iter_fd); + established_fds, tc->connections*2, counts, tc->max_socks, + link, iter_fd); done: + close_netns(nstoken); + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0"); free(counts); free_fds(fds, tc->init_socks); + free_fds(established_fds, tc->connections*2); if (iter_fd >= 0) close(iter_fd); bpf_link__destroy(link); @@ -454,6 +890,8 @@ static void do_test(int sock_type, bool onebyone) skel->rodata->ports[i] = ntohs(local_port); } skel->rodata->sf = AF_INET6; + if (sock_type == SOCK_STREAM) + skel->rodata->ss = TCP_LISTEN; err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index b6c471da5c28..b87e7f39e15a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -314,6 +314,95 @@ out: test_sockmap_ktls__destroy(skel); } +static void test_sockmap_ktls_tx_pop(int family, int sotype) +{ + char msg[37] = "0123456789abcdefghijklmnopqrstuvwxyz\0"; + int c = 0, p = 0, one = 1, sent, recvd; + struct test_sockmap_ktls *skel; + int prog_fd, map_fd; + char rcv[50] = {0}; + int err; + int i, m, r; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + struct { + int pop_start; + int pop_len; + } pop_policy[] = { + /* trim the start */ + {0, 2}, + {0, 10}, + {1, 2}, + {1, 10}, + /* trim the end */ + {35, 2}, + /* New entries should be added before this line */ + {-1, -1}, + }; + + i = 0; + while (pop_policy[i].pop_start >= 0) { + skel->bss->pop_start = pop_policy[i].pop_start; + skel->bss->pop_end = pop_policy[i].pop_len; + + sent = send(c, msg, sizeof(msg), 0); + if (!ASSERT_EQ(sent, sizeof(msg), "send(msg)")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(recvd, sizeof(msg) - pop_policy[i].pop_len, "pop len mismatch")) + goto out; + + /* verify the data + * msg: 0123456789a bcdefghij klmnopqrstuvwxyz + * | | + * popped data + */ + for (m = 0, r = 0; m < sizeof(msg);) { + /* skip checking the data that has been popped */ + if (m >= pop_policy[i].pop_start && + m <= pop_policy[i].pop_start + pop_policy[i].pop_len - 1) { + m++; + continue; + } + + if (!ASSERT_EQ(msg[m], rcv[r], "data mismatch")) + goto out; + m++; + r++; + } + i++; + } +out: + if (c) + close(c); + if (p) + close(p); + test_sockmap_ktls__destroy(skel); +} + static void run_tests(int family, enum bpf_map_type map_type) { int map; @@ -338,6 +427,8 @@ static void run_ktls_test(int family, int sotype) test_sockmap_ktls_tx_cork(family, sotype, true); if (test__start_subtest("tls tx egress with no buf")) test_sockmap_ktls_tx_no_buf(family, sotype, true); + if (test__start_subtest("tls tx with pop")) + test_sockmap_ktls_tx_pop(family, sotype); } void test_sockmap_ktls(void) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 1d98eee7a2c3..f1bdccc7e4e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -924,6 +924,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map) goto close; n = xsend(c1, buf, sizeof(buf), 0); + if (n == -1) + goto close; if (n < sizeof(buf)) FAIL("incomplete write"); diff --git a/tools/testing/selftests/bpf/prog_tests/stream.c b/tools/testing/selftests/bpf/prog_tests/stream.c new file mode 100644 index 000000000000..d9f0185dca61 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stream.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <regex.h> + +#include "stream.skel.h" +#include "stream_fail.skel.h" + +void test_stream_failure(void) +{ + RUN_TESTS(stream_fail); +} + +void test_stream_success(void) +{ + RUN_TESTS(stream); + return; +} + +struct { + int prog_off; + const char *errstr; +} stream_error_arr[] = { + { + offsetof(struct stream, progs.stream_cond_break), + "ERROR: Timeout detected for may_goto instruction\n" + "CPU: [0-9]+ UID: 0 PID: [0-9]+ Comm: .*\n" + "Call trace:\n" + "([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n" + "|[ \t]+[^\n]+\n)*", + }, + { + offsetof(struct stream, progs.stream_deadlock), + "ERROR: AA or ABBA deadlock detected for bpf_res_spin_lock\n" + "Attempted lock = (0x[0-9a-fA-F]+)\n" + "Total held locks = 1\n" + "Held lock\\[ 0\\] = \\1\n" // Lock address must match + "CPU: [0-9]+ UID: 0 PID: [0-9]+ Comm: .*\n" + "Call trace:\n" + "([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n" + "|[ \t]+[^\n]+\n)*", + }, +}; + +static int match_regex(const char *pattern, const char *string) +{ + int err, rc; + regex_t re; + + err = regcomp(&re, pattern, REG_EXTENDED | REG_NEWLINE); + if (err) + return -1; + rc = regexec(&re, string, 0, NULL, 0); + regfree(&re); + return rc == 0 ? 1 : 0; +} + +void test_stream_errors(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts); + struct stream *skel; + int ret, prog_fd; + char buf[1024]; + + skel = stream__open_and_load(); + if (!ASSERT_OK_PTR(skel, "stream__open_and_load")) + return; + + for (int i = 0; i < ARRAY_SIZE(stream_error_arr); i++) { + struct bpf_program **prog; + + prog = (struct bpf_program **)(((char *)skel) + stream_error_arr[i].prog_off); + prog_fd = bpf_program__fd(*prog); + ret = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + +#if !defined(__x86_64__) + ASSERT_TRUE(1, "Timed may_goto unsupported, skip."); + if (i == 0) { + ret = bpf_prog_stream_read(prog_fd, 2, buf, sizeof(buf), &ropts); + ASSERT_EQ(ret, 0, "stream read"); + continue; + } +#endif + + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, sizeof(buf), &ropts); + ASSERT_GT(ret, 0, "stream read"); + ASSERT_LE(ret, 1023, "len for buf"); + buf[ret] = '\0'; + + ret = match_regex(stream_error_arr[i].errstr, buf); + if (!ASSERT_TRUE(ret == 1, "regex match")) + fprintf(stderr, "Output from stream:\n%s\n", buf); + } + + stream__destroy(skel); +} + +void test_stream_syscall(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts); + struct stream *skel; + int ret, prog_fd; + char buf[64]; + + skel = stream__open_and_load(); + if (!ASSERT_OK_PTR(skel, "stream__open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.stream_syscall); + ret = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + + ASSERT_LT(bpf_prog_stream_read(0, BPF_STREAM_STDOUT, buf, sizeof(buf), &ropts), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -EINVAL, "bad prog_fd"); + + ASSERT_LT(bpf_prog_stream_read(prog_fd, 0, buf, sizeof(buf), &ropts), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -ENOENT, "bad stream id"); + + ASSERT_LT(bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, NULL, sizeof(buf), NULL), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -EFAULT, "bad stream buf"); + + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL); + ASSERT_EQ(ret, 2, "bytes"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL); + ASSERT_EQ(ret, 1, "bytes"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 1, &ropts); + ASSERT_EQ(ret, 0, "no bytes stdout"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, 1, &ropts); + ASSERT_EQ(ret, 0, "no bytes stderr"); + + stream__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c new file mode 100644 index 000000000000..35af8044d059 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include <test_progs.h> +#include "string_kfuncs_success.skel.h" +#include "string_kfuncs_failure1.skel.h" +#include "string_kfuncs_failure2.skel.h" +#include <sys/mman.h> + +static const char * const test_cases[] = { + "strcmp", + "strchr", + "strchrnul", + "strnchr", + "strrchr", + "strlen", + "strnlen", + "strspn_str", + "strspn_accept", + "strcspn_str", + "strcspn_reject", + "strstr", + "strnstr", +}; + +void run_too_long_tests(void) +{ + struct string_kfuncs_failure2 *skel; + struct bpf_program *prog; + char test_name[256]; + int err, i; + + skel = string_kfuncs_failure2__open_and_load(); + if (!ASSERT_OK_PTR(skel, "string_kfuncs_failure2__open_and_load")) + return; + + memset(skel->bss->long_str, 'a', sizeof(skel->bss->long_str)); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + sprintf(test_name, "test_%s_too_long", test_cases[i]); + if (!test__start_subtest(test_name)) + continue; + + prog = bpf_object__find_program_by_name(skel->obj, test_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + LIBBPF_OPTS(bpf_test_run_opts, topts); + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + + ASSERT_EQ(topts.retval, -E2BIG, "reading too long string fails with -E2BIG"); + } + +cleanup: + string_kfuncs_failure2__destroy(skel); +} + +void test_string_kfuncs(void) +{ + RUN_TESTS(string_kfuncs_success); + RUN_TESTS(string_kfuncs_failure1); + + run_too_long_tests(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 66a900327f91..0ab36503c3b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1195,7 +1195,7 @@ static void test_tailcall_hierarchy_count(const char *which, bool test_fentry, bool test_fexit, bool test_fentry_entry) { - int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val; + int err, map_fd, prog_fd, main_data_fd, fentry_data_fd = 0, fexit_data_fd = 0, i, val; struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; struct bpf_link *fentry_link = NULL, *fexit_link = NULL; struct bpf_program *prog, *fentry_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h index 924d0e25320c..d52a62af77bf 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h @@ -8,34 +8,6 @@ # define loopback 1 #endif -static inline __u32 id_from_prog_fd(int fd) -{ - struct bpf_prog_info prog_info = {}; - __u32 prog_info_len = sizeof(prog_info); - int err; - - err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); - if (!ASSERT_OK(err, "id_from_prog_fd")) - return 0; - - ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); - return prog_info.id; -} - -static inline __u32 id_from_link_fd(int fd) -{ - struct bpf_link_info link_info = {}; - __u32 link_info_len = sizeof(link_info); - int err; - - err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); - if (!ASSERT_OK(err, "id_from_link_fd")) - return 0; - - ASSERT_NEQ(link_info.id, 0, "link_info.id"); - return link_info.id; -} - static inline __u32 ifindex_from_link_fd(int fd) { struct bpf_link_info link_info = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/test_veristat.c b/tools/testing/selftests/bpf/prog_tests/test_veristat.c index 47b56c258f3f..367f47e4a936 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_veristat.c +++ b/tools/testing/selftests/bpf/prog_tests/test_veristat.c @@ -60,13 +60,19 @@ static void test_set_global_vars_succeeds(void) " -G \"var_s8 = -128\" "\ " -G \"var_u8 = 255\" "\ " -G \"var_ea = EA2\" "\ - " -G \"var_eb = EB2\" "\ - " -G \"var_ec = EC2\" "\ + " -G \"var_eb = EB2\" "\ + " -G \"var_ec=EC2\" "\ " -G \"var_b = 1\" "\ - " -G \"struct1.struct2.u.var_u8 = 170\" "\ + " -G \"struct1[2].struct2[1][2].u.var_u8[2]=170\" "\ " -G \"union1.struct3.var_u8_l = 0xaa\" "\ " -G \"union1.struct3.var_u8_h = 0xaa\" "\ - "-vl2 > %s", fix->veristat, fix->tmpfile); + " -G \"arr[3]= 171\" " \ + " -G \"arr[EA2] =172\" " \ + " -G \"enum_arr[EC2]=EA3\" " \ + " -G \"three_d[31][7][EA2]=173\"" \ + " -G \"struct1[2].struct2[1][2].u.mat[5][3]=174\" " \ + " -G \"struct11 [ 7 ] [ 5 ] .struct2[0][1].u.mat[3][0] = 175\" " \ + " -vl2 > %s", fix->veristat, fix->tmpfile); read(fix->fd, fix->output, fix->sz); __CHECK_STR("_w=0xf000000000000001 ", "var_s64 = 0xf000000000000001"); @@ -81,8 +87,14 @@ static void test_set_global_vars_succeeds(void) __CHECK_STR("_w=12 ", "var_eb = EB2"); __CHECK_STR("_w=13 ", "var_ec = EC2"); __CHECK_STR("_w=1 ", "var_b = 1"); - __CHECK_STR("_w=170 ", "struct1.struct2.u.var_u8 = 170"); + __CHECK_STR("_w=170 ", "struct1[2].struct2[1][2].u.var_u8[2]=170"); __CHECK_STR("_w=0xaaaa ", "union1.var_u16 = 0xaaaa"); + __CHECK_STR("_w=171 ", "arr[3]= 171"); + __CHECK_STR("_w=172 ", "arr[EA2] =172"); + __CHECK_STR("_w=10 ", "enum_arr[EC2]=EA3"); + __CHECK_STR("_w=173 ", "matrix[31][7][11]=173"); + __CHECK_STR("_w=174 ", "struct1[2].struct2[1][2].u.mat[5][3]=174"); + __CHECK_STR("_w=175 ", "struct11[7][5].struct2[0][1].u.mat[3][0]=175"); out: teardown_fixture(fix); @@ -129,6 +141,95 @@ out: teardown_fixture(fix); } +static void test_unsupported_ptr_array_type(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"ptr_arr[0] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Can't set ptr_arr[0]. Only ints and enums are supported", "ptr_arr"); + +out: + teardown_fixture(fix); +} + +static void test_array_out_of_bounds(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr[99] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Array index 99 is out of bounds", "arr[99]"); + +out: + teardown_fixture(fix); +} + +static void test_array_index_not_found(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr[EG2] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Can't resolve enum value EG2", "arr[EG2]"); + +out: + teardown_fixture(fix); +} + +static void test_array_index_for_non_array(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"var_b[0] = 1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Array index is not expected for var_b", "var_b[0] = 1"); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"union1.struct3[0].var_u8_l=1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Array index is not expected for struct3", "union1.struct3[0].var_u8_l=1"); + +out: + teardown_fixture(fix); +} + +static void test_no_array_index_for_array(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr = 1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Can't set arr. Only ints and enums are supported", "arr = 1"); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"struct1[0].struct2.u.var_u8[2]=1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Can't resolve field u for non-composite type", "struct1[0].struct2.u.var_u8[2]=1"); + +out: + teardown_fixture(fix); +} + void test_veristat(void) { if (test__start_subtest("set_global_vars_succeeds")) @@ -139,6 +240,22 @@ void test_veristat(void) if (test__start_subtest("set_global_vars_from_file_succeeds")) test_set_global_vars_from_file_succeeds(); + + if (test__start_subtest("test_unsupported_ptr_array_type")) + test_unsupported_ptr_array_type(); + + if (test__start_subtest("test_array_out_of_bounds")) + test_array_out_of_bounds(); + + if (test__start_subtest("test_array_index_not_found")) + test_array_index_not_found(); + + if (test__start_subtest("test_array_index_for_non_array")) + test_array_index_for_non_array(); + + if (test__start_subtest("test_no_array_index_for_array")) + test_no_array_index_for_array(); + } #undef __CHECK_STR diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c index f9392df23f8a..b81dde283052 100644 --- a/tools/testing/selftests/bpf/prog_tests/token.c +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -115,7 +115,7 @@ static int create_bpffs_fd(void) static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) { - int mnt_fd, err; + int err; /* set up token delegation mount options */ err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); @@ -136,12 +136,7 @@ static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) if (err < 0) return -errno; - /* create O_PATH fd for detached mount */ - mnt_fd = sys_fsmount(fs_fd, 0, 0); - if (err < 0) - return -errno; - - return mnt_fd; + return 0; } /* send FD over Unix domain (AF_UNIX) socket */ @@ -287,6 +282,7 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba { int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; struct token_lsm *lsm_skel = NULL; + char one; /* load and attach LSM "policy" before we go into unpriv userns */ lsm_skel = token_lsm__open_and_load(); @@ -333,13 +329,19 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba err = sendfd(sock_fd, fs_fd); if (!ASSERT_OK(err, "send_fs_fd")) goto cleanup; - zclose(fs_fd); + + /* wait that the parent reads the fd, does the fsconfig() calls + * and send us a signal that it is done + */ + err = read(sock_fd, &one, sizeof(one)); + if (!ASSERT_GE(err, 0, "read_one")) + goto cleanup; /* avoid mucking around with mount namespaces and mounting at - * well-known path, just get detach-mounted BPF FS fd back from parent + * well-known path, just create O_PATH fd for detached mount */ - err = recvfd(sock_fd, &mnt_fd); - if (!ASSERT_OK(err, "recv_mnt_fd")) + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (!ASSERT_OK_FD(mnt_fd, "mnt_fd")) goto cleanup; /* try to fspick() BPF FS and try to add some delegation options */ @@ -429,24 +431,24 @@ again: static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) { - int fs_fd = -1, mnt_fd = -1, token_fd = -1, err; + int fs_fd = -1, token_fd = -1, err; + char one = 1; err = recvfd(sock_fd, &fs_fd); if (!ASSERT_OK(err, "recv_bpffs_fd")) goto cleanup; - mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts); - if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) { + err = materialize_bpffs_fd(fs_fd, bpffs_opts); + if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) { err = -EINVAL; goto cleanup; } - zclose(fs_fd); - /* pass BPF FS context object to parent */ - err = sendfd(sock_fd, mnt_fd); - if (!ASSERT_OK(err, "send_mnt_fd")) + /* notify the child that we did the fsconfig() calls and it can proceed. */ + err = write(sock_fd, &one, sizeof(one)); + if (!ASSERT_EQ(err, sizeof(one), "send_one")) goto cleanup; - zclose(mnt_fd); + zclose(fs_fd); /* receive BPF token FD back from child for some extra tests */ err = recvfd(sock_fd, &token_fd); @@ -459,7 +461,6 @@ static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) cleanup: zclose(sock_fd); zclose(fs_fd); - zclose(mnt_fd); zclose(token_fd); if (child_pid > 0) @@ -1046,6 +1047,41 @@ err_out: #define bit(n) (1ULL << (n)) +static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel) +{ + int err, token_fd = -1; + struct bpf_token_info info; + u32 len = sizeof(struct bpf_token_info); + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + memset(&info, 0, len); + err = bpf_obj_get_info_by_fd(token_fd, &info, &len); + if (!ASSERT_ERR(err, "bpf_obj_get_token_info")) + goto cleanup; + if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) { + err = -EINVAL; + goto cleanup; + } + if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) { + err = -EINVAL; + goto cleanup; + } + + /* The BPF_PROG_TYPE_EXT is not set in token */ + if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext")) + err = -EINVAL; + +cleanup: + zclose(token_fd); + return err; +} + void test_token(void) { if (test__start_subtest("map_token")) { @@ -1149,4 +1185,13 @@ void test_token(void) subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); } + if (test__start_subtest("bpf_token_info")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_MAP_CREATE), + .progs = bit(BPF_PROG_TYPE_XDP), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_bpf_token_info); + } } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c index a222df765bc3..10e231965589 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c @@ -28,10 +28,62 @@ out: tracing_failure__destroy(skel); } +static void test_tracing_fail_prog(const char *prog_name, const char *exp_msg) +{ + struct tracing_failure *skel; + struct bpf_program *prog; + char log_buf[256]; + int err; + + skel = tracing_failure__open(); + if (!ASSERT_OK_PTR(skel, "tracing_failure__open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + + bpf_program__set_autoload(prog, true); + bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf)); + + err = tracing_failure__load(skel); + if (!ASSERT_ERR(err, "tracing_failure__load")) + goto out; + + ASSERT_HAS_SUBSTR(log_buf, exp_msg, "log_buf"); +out: + tracing_failure__destroy(skel); +} + +static void test_tracing_deny(void) +{ + int btf_id; + + /* __rcu_read_lock depends on CONFIG_PREEMPT_RCU */ + btf_id = libbpf_find_vmlinux_btf_id("__rcu_read_lock", BPF_TRACE_FENTRY); + if (btf_id <= 0) { + test__skip(); + return; + } + + test_tracing_fail_prog("tracing_deny", + "Attaching tracing programs to function '__rcu_read_lock' is rejected."); +} + +static void test_fexit_noreturns(void) +{ + test_tracing_fail_prog("fexit_noreturns", + "Attaching fexit/fmod_ret to __noreturn function 'do_exit' is rejected."); +} + void test_tracing_failure(void) { if (test__start_subtest("bpf_spin_lock")) test_bpf_spin_lock(true); if (test__start_subtest("bpf_spin_unlock")) test_bpf_spin_lock(false); + if (test__start_subtest("tracing_deny")) + test_tracing_deny(); + if (test__start_subtest("fexit_noreturns")) + test_fexit_noreturns(); } diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index c397336fe1ed..b17dc39a23db 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -251,7 +251,7 @@ static void test_uretprobe_syscall_call(void) .retprobe = true, ); struct uprobe_syscall_executed *skel; - int pid, status, err, go[2], c; + int pid, status, err, go[2], c = 0; if (!ASSERT_OK(pipe(go), "pipe")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c index 495d66414b57..9057e983cc54 100644 --- a/tools/testing/selftests/bpf/prog_tests/usdt.c +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -270,8 +270,16 @@ static void subtest_multispec_usdt(void) */ trigger_300_usdts(); - /* we'll reuse usdt_100 BPF program for usdt_300 test */ bpf_link__destroy(skel->links.usdt_100); + + bss->usdt_100_called = 0; + bss->usdt_100_sum = 0; + + /* If built with arm64/clang, there will be much less number of specs + * for usdt_300 call sites. + */ +#if !defined(__aarch64__) || !defined(__clang__) + /* we'll reuse usdt_100 BPF program for usdt_300 test */ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe", "test", "usdt_300", NULL); err = -errno; @@ -282,13 +290,11 @@ static void subtest_multispec_usdt(void) /* let's check that there are no "dangling" BPF programs attached due * to partial success of the above test:usdt_300 attachment */ - bss->usdt_100_called = 0; - bss->usdt_100_sum = 0; - f300(777); /* this is 301st instance of usdt_300 */ ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called"); ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum"); +#endif /* This time we have USDT with 400 inlined invocations, but arg specs * should be the same across all sites, so libbpf will only need to diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index d424e7ecbd12..9fd3ae987321 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -21,8 +21,7 @@ #include "../progs/test_user_ringbuf.h" static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; -static const long c_ringbuf_size = 1 << 12; /* 1 small page */ -static const long c_max_entries = c_ringbuf_size / c_sample_size; +static long c_ringbuf_size, c_max_entries; static void drain_current_samples(void) { @@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void) uint32_t remaining_samples = total_samples; int err; - BUILD_BUG_ON(total_samples <= c_max_entries); + if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries")) + return; + err = load_skel_create_user_ringbuf(&skel, &ringbuf); if (err) return; @@ -686,6 +687,9 @@ void test_user_ringbuf(void) { int i; + c_ringbuf_size = getpagesize(); /* 1 page */ + c_max_entries = c_ringbuf_size / c_sample_size; + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { if (!test__start_subtest(success_tests[i].test_name)) continue; diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c9da06741104..77ec95d4ffaa 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -85,6 +85,7 @@ #include "verifier_store_release.skel.h" #include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" +#include "verifier_tailcall.skel.h" #include "verifier_tailcall_jit.skel.h" #include "verifier_typedef.skel.h" #include "verifier_uninit.skel.h" @@ -219,6 +220,7 @@ void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_store_release(void) { RUN(verifier_store_release); } void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } +void test_verifier_tailcall(void) { RUN(verifier_tailcall); } void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); } void test_verifier_typedef(void) { RUN(verifier_typedef); } void test_verifier_uninit(void) { RUN(verifier_uninit); } diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c index ab0f02faa80c..4d69d9d55e17 100644 --- a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c @@ -268,7 +268,7 @@ static void test_verify_pkcs7_sig_from_map(void) char *tmp_dir; struct test_verify_pkcs7_sig *skel = NULL; struct bpf_map *map; - struct data data; + struct data data = {}; int ret, zero = 0; /* Trigger creation of session keyring. */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index b2b2d85dbb1b..43264347e7d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -37,21 +37,26 @@ static void test_xdp_adjust_tail_shrink(void) bpf_object__close(obj); } -static void test_xdp_adjust_tail_grow(void) +static void test_xdp_adjust_tail_grow(bool is_64k_pagesize) { const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; struct bpf_object *obj; - char buf[4096]; /* avoid segfault: large buf to hold grow results */ + char buf[8192]; /* avoid segfault: large buf to hold grow results */ __u32 expect_sz; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), .data_out = buf, .data_size_out = sizeof(buf), .repeat = 1, ); + /* topts.data_size_in as a special signal to bpf prog */ + if (is_64k_pagesize) + topts.data_size_in = sizeof(pkt_v4) - 1; + else + topts.data_size_in = sizeof(pkt_v4); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; @@ -208,7 +213,7 @@ out: bpf_object__close(obj); } -static void test_xdp_adjust_frags_tail_grow(void) +static void test_xdp_adjust_frags_tail_grow_4k(void) { const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; __u32 exp_size; @@ -246,14 +251,20 @@ static void test_xdp_adjust_frags_tail_grow(void) ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); - for (i = 0; i < 9000; i++) - ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + for (i = 0; i < 9000; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + } - for (i = 9000; i < 9010; i++) - ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + for (i = 9000; i < 9010; i++) { + if (buf[i] != 0) + ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + } - for (i = 9010; i < 16384; i++) - ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + for (i = 9010; i < 16384; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + } /* Test a too large grow */ memset(buf, 1, 16384); @@ -273,16 +284,93 @@ out: bpf_object__close(obj); } +static void test_xdp_adjust_frags_tail_grow_64k(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, i, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + goto out; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(262144); + if (!ASSERT_OK_PTR(buf, "alloc buf 256Kb")) + goto out; + + /* Test case add 10 bytes to last frag */ + memset(buf, 1, 262144); + exp_size = 90000 + 10; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 90000; + topts.data_size_out = 262144; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "90Kb+10b"); + ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size"); + + for (i = 0; i < 90000; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "90Kb+10b-old"); + } + + for (i = 90000; i < 90010; i++) { + if (buf[i] != 0) + ASSERT_EQ(buf[i], 0, "90Kb+10b-new"); + } + + for (i = 90010; i < 262144; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "90Kb+10b-untouched"); + } + + /* Test a too large grow */ + memset(buf, 1, 262144); + exp_size = 90001; + + topts.data_in = topts.data_out = buf; + topts.data_size_in = 90001; + topts.data_size_out = 262144; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "90Kb+10b"); + ASSERT_EQ(topts.retval, XDP_DROP, "90Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size"); + + free(buf); +out: + bpf_object__close(obj); +} + void test_xdp_adjust_tail(void) { + int page_size = getpagesize(); + if (test__start_subtest("xdp_adjust_tail_shrink")) test_xdp_adjust_tail_shrink(); if (test__start_subtest("xdp_adjust_tail_grow")) - test_xdp_adjust_tail_grow(); + test_xdp_adjust_tail_grow(page_size == 65536); if (test__start_subtest("xdp_adjust_tail_grow2")) test_xdp_adjust_tail_grow2(); if (test__start_subtest("xdp_adjust_frags_tail_shrink")) test_xdp_adjust_frags_tail_shrink(); - if (test__start_subtest("xdp_adjust_frags_tail_grow")) - test_xdp_adjust_frags_tail_grow(); + if (test__start_subtest("xdp_adjust_frags_tail_grow")) { + if (page_size == 65536) + test_xdp_adjust_frags_tail_grow_64k(); + else + test_xdp_adjust_frags_tail_grow_4k(); + } } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 7dac044664ac..dd34b0cc4b4e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -66,16 +66,25 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) #else #define MAX_PKT_SIZE 3408 #endif + +#define PAGE_SIZE_4K 4096 +#define PAGE_SIZE_64K 65536 + static void test_max_pkt_size(int fd) { - char data[MAX_PKT_SIZE + 1] = {}; + char data[PAGE_SIZE_64K + 1] = {}; int err; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = MAX_PKT_SIZE, .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); + + if (getpagesize() == PAGE_SIZE_64K) + opts.data_size_in = MAX_PKT_SIZE + PAGE_SIZE_64K - PAGE_SIZE_4K; + else + opts.data_size_in = MAX_PKT_SIZE; + err = bpf_prog_test_run_opts(fd, &opts); ASSERT_OK(err, "prog_run_max_size"); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c new file mode 100644 index 000000000000..2f20485e0de3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +__u32 value_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 value = 0; + + if (ctx->value == (void *)0) + return 0; + + bpf_probe_read_kernel(&value, sizeof(value), ctx->value); + value_sum += value; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 6e208e24ba3b..530752ddde8e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -83,9 +83,11 @@ * expect return value to match passed parameter: * - a decimal number * - a hexadecimal number, when starts from 0x - * - literal INT_MIN - * - literal POINTER_VALUE (see definition below) - * - literal TEST_DATA_LEN (see definition below) + * - a macro which expands to one of the above + * - literal _INT_MIN (expands to INT_MIN) + * In addition, two special macros are defined below: + * - POINTER_VALUE + * - TEST_DATA_LEN * __retval_unpriv Same, but load program in unprivileged mode. * * __description Text to be used instead of a program name for display @@ -125,8 +127,8 @@ #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) -#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) -#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val)))) +#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val)))) #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) @@ -155,7 +157,7 @@ #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) /* Magic constants used with __retval() */ -#define POINTER_VALUE 0xcafe4all +#define POINTER_VALUE 0xbadcafe #define TEST_DATA_LEN 64 #ifndef __used @@ -231,4 +233,12 @@ #define CAN_USE_LOAD_ACQ_STORE_REL #endif +#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) +#define SPEC_V1 +#endif + +#if defined(__TARGET_ARCH_x86) +#define SPEC_V4 +#endif + #endif diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c new file mode 100644 index 000000000000..6a0ea02c4de2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("cgroup/getsockopt") +int getsockopt_1(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_2(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_3(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_4(struct bpf_sockopt *ctx) +{ + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c index f3d79aecbf93..6884ab99a421 100644 --- a/tools/testing/selftests/bpf/progs/compute_live_registers.c +++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c @@ -240,6 +240,22 @@ __naked void if2(void) ::: __clobber_all); } +/* Verifier misses that r2 is alive if jset is not handled properly */ +SEC("socket") +__log_level(2) +__msg("2: 012....... (45) if r1 & 0x7 goto pc+1") +__naked void if3_jset_bug(void) +{ + asm volatile ( + "r0 = 1;" + "r2 = 2;" + "if r1 & 0x7 goto +1;" + "exit;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + SEC("socket") __log_level(2) __msg("0: .......... (b7) r1 = 0") diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index a0391f9da2d4..8315273cb900 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -9,6 +9,8 @@ #include "bpf_misc.h" #include "errno.h" +#define PAGE_SIZE_64K 65536 + char _license[] SEC("license") = "GPL"; int pid, err, val; @@ -611,11 +613,12 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp) struct bpf_dynptr ptr_buf, ptr_xdp; char data[] = "qwertyuiopasdfghjkl"; char buf[32] = {'\0'}; - __u32 len = sizeof(data); + __u32 len = sizeof(data), xdp_data_size; int i, chunks = 200; /* ptr_xdp is backed by non-contiguous memory */ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + xdp_data_size = bpf_dynptr_size(&ptr_xdp); bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf); /* Destination dynptr is backed by non-contiguous memory */ @@ -673,7 +676,7 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp) goto out; } - if (bpf_dynptr_copy(&ptr_xdp, 2000, &ptr_xdp, 0, len * chunks) != -E2BIG) + if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG) err = 1; out: @@ -681,6 +684,173 @@ out: return XDP_DROP; } +char memset_zero_data[] = "data to be zeroed"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero(void *ctx) +{ + __u32 data_sz = sizeof(memset_zero_data); + char zeroes[32] = {'\0'}; + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0); + err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz); + + return 0; +} + +#define DYNPTR_MEMSET_VAL 42 + +char memset_notzero_data[] = "data to be overwritten"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_notzero(void *ctx) +{ + u32 data_sz = sizeof(memset_notzero_data); + struct bpf_dynptr ptr; + char expected[32]; + + __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz); + + err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL); + err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz); + + return 0; +} + +char memset_zero_offset_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_offset(void *ctx) +{ + char expected[] = "data to \0\0\0\0eroed partially"; + __u32 data_sz = sizeof(memset_zero_offset_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0); + err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz); + + return 0; +} + +char memset_zero_adjusted_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_adjusted(void *ctx) +{ + char expected[] = "data\0\0\0\0be zeroed partially"; + __u32 data_sz = sizeof(memset_zero_adjusted_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_adjust(&ptr, 4, 8); + err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz); + + return 0; +} + +char memset_overflow_data[] = "memset overflow data"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow_offset(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_memset_readonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_skb(skb, 0, &ptr); + + /* cgroup skbs are read only, memset should fail */ + ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + if (ret != -EINVAL) + err = 1; + + return 0; +} + +#define min_t(type, x, y) ({ \ + type __x = (x); \ + type __y = (y); \ + __x < __y ? __x : __y; }) + +SEC("xdp") +int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp) +{ + u32 data_sz, chunk_sz, offset = 0; + const int max_chunks = 200; + struct bpf_dynptr ptr_xdp; + char expected_buf[32]; + char buf[32]; + int i; + + __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf)); + + /* ptr_xdp is backed by non-contiguous memory */ + bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + data_sz = bpf_dynptr_size(&ptr_xdp); + + err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL); + if (err) { + /* bpf_dynptr_memset() eventually called bpf_xdp_pointer() + * where if data_sz is greater than 0xffff, -EFAULT will be + * returned. For 64K page size, data_sz is greater than + * 64K, so error is expected and let us zero out error and + * return success. + */ + if (data_sz >= PAGE_SIZE_64K) + err = 0; + goto out; + } + + bpf_for(i, 0, max_chunks) { + offset = i * sizeof(buf); + if (offset >= data_sz) + goto out; + chunk_sz = min_t(u32, sizeof(buf), data_sz - offset); + err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0); + if (err) + goto out; + err = bpf_memcmp(buf, expected_buf, sizeof(buf)); + if (err) + goto out; + } +out: + return XDP_DROP; +} + void *user_ptr; /* Contains the copy of the data pointed by user_ptr. * Size 384 to make it not fit into a single kernel chunk when copying diff --git a/tools/testing/selftests/bpf/progs/fexit_noreturns.c b/tools/testing/selftests/bpf/progs/fexit_noreturns.c deleted file mode 100644 index 54654539f550..000000000000 --- a/tools/testing/selftests/bpf/progs/fexit_noreturns.c +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> -#include <bpf/bpf_tracing.h> -#include "bpf_misc.h" - -char _license[] SEC("license") = "GPL"; - -SEC("fexit/do_exit") -__failure __msg("Attaching fexit/fmod_ret to __noreturn functions is rejected.") -int BPF_PROG(noreturns) -{ - return 0; -} diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 76adf4a8f2da..7dd92a303bf6 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -1649,4 +1649,281 @@ int clean_live_states(const void *ctx) return 0; } +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "call noop;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int noop(void) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state2(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) { + * r6 = -31; + * jump_into_loop: + * goto +0; + * goto loop; + * } + * if (unlikely(bpf_get_prandom_u32())) + * goto jump_into_loop; + * loop: + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto jump_into_loop_%=;" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "jump_into_loop_%=: " + "goto +0;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state3(void) +{ + /* + * bpf_iter_num_new(&fp[-8], 0, 10) + * loop1(-32, &fp[-8]) + * loop1_wrapper(&fp[-8]) + * bpf_iter_num_destroy(&fp[-8]) + */ + asm volatile ( + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + /* call #1 */ + "r1 = -32;" + "r2 = r10;" + "r2 += -8;" + "call loop1;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + /* call #2 */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r1 = r10;" + "r1 += -8;" + "call loop1_wrapper;" + /* return */ + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1(void) +{ + /* + * int loop1(num, iter) { + * r6 = num; + * r7 = iter; + * while (bpf_iter_num_next(r7)) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * return 0 + * } + */ + asm volatile ( + "r6 = r1;" + "r7 = r2;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "loop_%=:" + "r1 = r7;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1_wrapper(void) +{ + /* + * int loop1_wrapper(iter) { + * r6 = -32; + * r7 = iter; + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * loop1(r6, r7); + * return 0; + * } + */ + asm volatile ( + "r6 = -32;" + "r7 = r1;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "r1 = r6;" + "r2 = r7;" + "call loop1;" + "r0 = 0;" + "exit;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c new file mode 100644 index 000000000000..4f94c971ae86 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r8 = *(u64 *)(r7 +0) ; R7_w=ptr_nameidata(off={{[0-9]+}}) R8_w=rdonly_untrusted_mem(sz=0)") +__msg("r9 = *(u8 *)(r8 +0) ; R8_w=rdonly_untrusted_mem(sz=0) R9_w=scalar") +int btf_id_to_ptr_mem(void *ctx) +{ + struct task_struct *task; + struct nameidata *idata; + u64 ret, off; + + task = bpf_get_current_task_btf(); + idata = task->nameidata; + off = bpf_core_field_offset(struct nameidata, pathname); + /* + * asm block to have reliable match target for __msg, equivalent of: + * ret = task->nameidata->pathname[0]; + */ + asm volatile ( + "r7 = %[idata];" + "r7 += %[off];" + "r8 = *(u64 *)(r7 + 0);" + "r9 = *(u8 *)(r8 + 0);" + "%[ret] = r9;" + : [ret]"=r"(ret) + : [idata]"r"(idata), + [off]"r"(off) + : "r7", "r8", "r9"); + return ret; +} + +SEC("socket") +__success +__retval(0) +int ldx_is_ok_bad_addr(void *ctx) +{ + char *p; + + if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID)) + return 42; + + p = bpf_rdonly_cast(0, 0); + return p[0x7fff]; +} + +SEC("socket") +__success +__retval(1) +int ldx_is_ok_good_addr(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return *p; +} + +SEC("socket") +__success +int offset_not_tracked(void *ctx) +{ + int *p, i, s; + + p = bpf_rdonly_cast(0, 0); + s = 0; + bpf_for(i, 0, 1000 * 1000 * 1000) { + p++; + s += *p; + } + return s; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int stx_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + *p = 1; + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + __sync_fetch_and_add(p, 1); + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_rmw_not_ok(void *ctx) +{ + long v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return __sync_val_compare_and_swap(p, 0, 42); +} + +SEC("socket") +__failure +__msg("invalid access to memory, mem_size=0 off=0 size=4") +__msg("R1 min value is outside of the allowed memory range") +int kfunc_param_not_ok(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + bpf_kfunc_trusted_num_test(p); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure +__msg("R1 type=rdonly_untrusted_mem expected=") +int helper_param_not_ok(void *ctx) +{ + char *p; + + p = bpf_rdonly_cast(0, 0); + /* + * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do, + * the most permissive constraint + */ + bpf_copy_from_user(p, 0, (void *)42); + return 0; +} + +static __noinline u64 *get_some_addr(void) +{ + if (bpf_get_prandom_u32()) + return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock)); + else + return bpf_rdonly_cast(0, 0); +} + +SEC("socket") +__success +__retval(0) +int mixed_mem_type(void *ctx) +{ + u64 *p; + + /* Try to avoid compiler hoisting load to if branches by using __noinline func. */ + p = get_some_addr(); + return *p; +} + +__attribute__((__aligned__(8))) +u8 global[] = { + 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, + 0x99 +}; + +__always_inline +static u64 combine(void *p) +{ + u64 acc; + + acc = 0; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + acc |= (*(u64 *)p >> 56) << 24; + acc |= (*(u32 *)p >> 24) << 16; + acc |= (*(u16 *)p >> 8) << 8; + acc |= *(u8 *)p; +#else + acc |= (*(u64 *)p & 0xff) << 24; + acc |= (*(u32 *)p & 0xff) << 16; + acc |= (*(u16 *)p & 0xff) << 8; + acc |= *(u8 *)p; +#endif + return acc; +} + +SEC("socket") +__retval(0x88442211) +int diff_size_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0)); +} + +SEC("socket") +__retval(0x99553322) +int misaligned_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0) + 1); +} + +__weak int return_one(void) +{ + return 1; +} + +SEC("socket") +__success +__retval(1) +int null_check(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + if (p == 0) + /* make this a function call to avoid compiler + * moving r0 assignment before check. + */ + return return_one(); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index a3620c15c136..49fe93d7e059 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -61,19 +61,19 @@ static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) } m->key = 1; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_rbtree_add(&groot, &m->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_rbtree_add(root, &m->node, less); + bpf_spin_unlock(lock); n = bpf_obj_new(typeof(*n)); if (!n) return 3; n->key = 3; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_spin_unlock(lock); return 0; } diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 43637ee2cdcd..3a868a199349 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -16,10 +16,11 @@ struct { __type(value, long); } map_a SEC(".maps"); -__u32 user_data, key_serial, target_pid; +__u32 user_data, target_pid; +__s32 key_serial; __u64 flags, task_storage_val, cgroup_id; -struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; void bpf_key_put(struct bpf_key *key) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c index 855f85fc5522..405adbe5e8b0 100644 --- a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c +++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c @@ -17,8 +17,8 @@ static const char expected_value_b[] = "bpf_selftest_value_b"; bool found_value_a; bool found_value_b; -SEC("lsm.s/socket_connect") -int BPF_PROG(test_socket_connect) +SEC("lsm.s/file_open") +int BPF_PROG(test_file_open) { u64 cgrp_id = bpf_get_current_cgroup_id(); struct cgroup_subsys_state *css, *tmp; diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c new file mode 100644 index 000000000000..7216b3450e96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +#define EPERM 1 /* Operation not permitted */ + +/* From include/linux/mm.h. */ +#define FMODE_WRITE 0x2 + +struct map; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +} prot_status_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} prot_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} not_prot_map SEC(".maps"); + +SEC("fmod_ret/security_bpf_map") +int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode) +{ + __u32 key = 0; + __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key); + + if (!status_ptr || !*status_ptr) + return 0; + + if (map == &prot_map) { + /* Allow read-only access */ + if (fmode & FMODE_WRITE) + return -EPERM; + } + + return 0; +} + +/* + * This program keeps references to maps. This is needed to prevent + * optimizing them out. + */ +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(fentry_dummy1, int a) +{ + __u32 key = 0; + __u32 val1 = a; + __u32 val2 = a + 1; + + bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY); + bpf_map_update_elem(¬_prot_map, &key, &val2, BPF_ANY); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c index 90f5656c3991..ebaef28b2cb3 100644 --- a/tools/testing/selftests/bpf/progs/set_global_vars.c +++ b/tools/testing/selftests/bpf/progs/set_global_vars.c @@ -7,22 +7,30 @@ char _license[] SEC("license") = "GPL"; -enum Enum { EA1 = 0, EA2 = 11 }; +typedef __s32 s32; +typedef s32 i32; +typedef __u8 u8; + +enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 }; enum Enumu64 {EB1 = 0llu, EB2 = 12llu }; enum Enums64 { EC1 = 0ll, EC2 = 13ll }; const volatile __s64 var_s64 = -1; const volatile __u64 var_u64 = 0; -const volatile __s32 var_s32 = -1; +const volatile i32 var_s32 = -1; const volatile __u32 var_u32 = 0; const volatile __s16 var_s16 = -1; const volatile __u16 var_u16 = 0; const volatile __s8 var_s8 = -1; -const volatile __u8 var_u8 = 0; +const volatile u8 var_u8 = 0; const volatile enum Enum var_ea = EA1; const volatile enum Enumu64 var_eb = EB1; const volatile enum Enums64 var_ec = EC1; const volatile bool var_b = false; +const volatile i32 arr[32]; +const volatile enum Enum enum_arr[32]; +const volatile i32 three_d[47][19][17]; +const volatile i32 *ptr_arr[32]; struct Struct { int:16; @@ -35,34 +43,38 @@ struct Struct { volatile struct { const int:1; union { - const volatile __u8 var_u8; + const volatile u8 var_u8[3]; const volatile __s16 filler3; const int:1; + s32 mat[7][5]; } u; }; - } struct2; + } struct2[2][4]; }; const volatile __u32 stru = 0; /* same prefix as below */ -const volatile struct Struct struct1 = {.struct2 = {.u = {.var_u8 = 1}}}; +const volatile struct Struct struct1[3]; +const volatile struct Struct struct11[11][7]; -union Union { - __u16 var_u16; - struct Struct3 { - struct { - __u8 var_u8_l; - }; +struct Struct3 { + struct { + u8 var_u8_l; + }; + struct { struct { - struct { - __u8 var_u8_h; - }; + u8 var_u8_h; }; - } struct3; + }; }; -const volatile union Union union1 = {.var_u16 = -1}; +typedef struct Struct3 Struct3_t; -char arr[4] = {0}; +union Union { + __u16 var_u16; + Struct3_t struct3; +}; + +const volatile union Union union1 = {.var_u16 = -1}; SEC("socket") int test_set_globals(void *ctx) @@ -81,8 +93,14 @@ int test_set_globals(void *ctx) a = var_eb; a = var_ec; a = var_b; - a = struct1.struct2.u.var_u8; + a = struct1[2].struct2[1][2].u.var_u8[2]; a = union1.var_u16; + a = arr[3]; + a = arr[EA2]; + a = enum_arr[EC2]; + a = three_d[31][7][EA2]; + a = struct1[2].struct2[1][2].u.mat[5][3]; + a = struct11[7][5].struct2[0][1].u.mat[3][0]; return a; } diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index 8f483337e103..77966ded5467 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -23,6 +23,7 @@ static bool ipv4_addr_loopback(__be32 a) } volatile const unsigned int sf; +volatile const unsigned int ss; volatile const __u16 ports[2]; unsigned int bucket[2]; @@ -42,16 +43,18 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_state != TCP_LISTEN || - sk->sk_family == AF_INET6 ? + (ss && sk->sk_state != ss) || + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; @@ -67,6 +70,27 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) return 0; } +volatile const __u64 destroy_cookie; + +SEC("iter/tcp") +int iter_tcp_destroy(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = (struct sock_common *)ctx->sk_common; + __u64 sock_cookie; + + if (!sk_common) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + if (sock_cookie != destroy_cookie) + return 0; + + bpf_sock_destroy(sk_common); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); + + return 0; +} + #define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk) SEC("iter/udp") @@ -83,15 +107,17 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_family == AF_INET6 ? + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c new file mode 100644 index 000000000000..35790897dc87 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct arr_elem { + struct bpf_res_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct arr_elem); +} arrmap SEC(".maps"); + +#define ENOSPC 28 +#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +int size; + +SEC("syscall") +__success __retval(0) +int stream_exhaust(void *ctx) +{ + /* Use global variable for loop convergence. */ + size = 0; + bpf_repeat(BPF_MAX_LOOPS) { + if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954) + return 0; + size += sizeof(_STR) - 1; + } + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_cond_break(void *ctx) +{ + while (can_loop) + ; + return 0; +} + +SEC("syscall") +__success __retval(0) +int stream_deadlock(void *ctx) +{ + struct bpf_res_spin_lock *lock, *nlock; + + lock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!lock) + return 1; + nlock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!nlock) + return 1; + if (bpf_res_spin_lock(lock)) + return 1; + if (bpf_res_spin_lock(nlock)) { + bpf_res_spin_unlock(lock); + return 0; + } + bpf_res_spin_unlock(nlock); + bpf_res_spin_unlock(lock); + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_syscall(void *ctx) +{ + bpf_stream_printk(BPF_STDOUT, "foo"); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c new file mode 100644 index 000000000000..b4a0d0cc8ec8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream_fail.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +SEC("syscall") +__failure __msg("Possibly NULL pointer passed") +int stream_vprintk_null_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("R3 type=scalar expected=") +int stream_vprintk_scalar_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("arg#1 doesn't point to a const string") +int stream_vprintk_string_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c new file mode 100644 index 000000000000..53af438bd998 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> +#include "bpf_misc.h" +#include "errno.h" + +char *user_ptr = (char *)1; +char *invalid_kern_ptr = (char *)-1; + +/* + * When passing userspace pointers, the error code differs based on arch: + * -ERANGE on arches with non-overlapping address spaces + * -EFAULT on other arches + */ +#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \ + defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86) +#define USER_PTR_ERR -ERANGE +#else +#define USER_PTR_ERR -EFAULT +#endif + +/* + * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and + * user_ptr (instead of causing an exception) so the below two groups of tests + * are not applicable. + */ +#ifndef __TARGET_ARCH_s390 + +/* Passing NULL to string kfuncs (treated as a userspace ptr) */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); } + +/* Passing userspace ptr to string kfuncs */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); } + +#endif /* __TARGET_ARCH_s390 */ + +/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */ +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c new file mode 100644 index 000000000000..89fb4669b0e9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> + +char long_str[XATTR_SIZE_MAX + 1]; + +SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); } +SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); } +SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); } +SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); } +SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); } +SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); } +SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); } +SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); } +SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); } +SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); } +SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); } +SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); } +SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c new file mode 100644 index 000000000000..46697f381878 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "errno.h" + +char str[] = "hello world"; + +#define __test(retval) SEC("syscall") __success __retval(retval) + +/* Functional tests */ +__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); } +__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); } +__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); } +__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); } +__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); } +__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); } +__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); } +__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); } +__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); } +__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); } +__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); } +__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); } +__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); } +__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); } +__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); } +__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); } +__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); } +__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); } +__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); } +__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); } +__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); } +__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); } +__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c index 0e4d2ff63ab8..dbe646013811 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c index 58d5d8dc2235..3d89ad7cbe2a 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c index 31e58389bb8b..b1f6d7e5a8e5 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c index cdbbb12f1491..1f7e1e59b073 100644 --- a/tools/testing/selftests/bpf/progs/test_lookup_key.c +++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c @@ -14,11 +14,11 @@ char _license[] SEC("license") = "GPL"; __u32 monitored_pid; -__u32 key_serial; +__s32 key_serial; __u32 key_id; __u64 flags; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c index 350513c0e4c9..f063a0013f85 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c @@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx) if (cur_pid != pid) return 0; - sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample1) return 0; /* first one can pass */ - sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample2) { bpf_ringbuf_discard(sample1, 0); __sync_fetch_and_add(&discarded, 1); diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c index 8ef6b39335b6..34b30e2603f0 100644 --- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c @@ -40,7 +40,7 @@ char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE]; __u32 monitored_pid; char sig[MAX_SIG_SIZE]; __u32 sig_size; -__u32 user_keyring_serial; +__s32 user_keyring_serial; SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c index 2796dd8545eb..1c7941a4ad00 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c @@ -1,8 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 ByteDance */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) + struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 1); @@ -31,7 +36,7 @@ int prog_skb_verdict(struct __sk_buff *skb) change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0); return SK_PASS; } else if (data[0] == 'E') { /* Error */ - change_tail_ret = bpf_skb_change_tail(skb, 65535, 0); + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); return SK_PASS; } return SK_PASS; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c index 8bdb9987c0c7..83df4919c224 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c @@ -7,6 +7,8 @@ int cork_byte; int push_start; int push_end; int apply_bytes; +int pop_start; +int pop_end; struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); @@ -22,6 +24,8 @@ int prog_sk_policy(struct sk_msg_md *msg) bpf_msg_cork_bytes(msg, cork_byte); if (push_start > 0 && push_end > 0) bpf_msg_push_data(msg, push_start, push_end, 0); + if (pop_start >= 0 && pop_end > 0) + bpf_msg_pop_data(msg, pop_start, pop_end, 0); return SK_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c index 28edafe803f0..fcba8299f0bc 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c +++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> -#include <linux/if_ether.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <linux/udp.h> -#include <linux/pkt_cls.h> + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) long change_tail_ret = 1; @@ -94,7 +94,7 @@ int change_tail(struct __sk_buff *skb) bpf_skb_change_tail(skb, len, 0); return TCX_PASS; } else if (payload[0] == 'E') { /* Error */ - change_tail_ret = bpf_skb_change_tail(skb, 65535, 0); + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); return TCX_PASS; } else if (payload[0] == 'Z') { /* Zero */ change_tail_ret = bpf_skb_change_tail(skb, 0, 0); diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c index e96d09e11115..ff8d755548b9 100644 --- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -17,7 +17,7 @@ #define MAX_SIG_SIZE 1024 __u32 monitored_pid; -__u32 user_keyring_serial; +__s32 user_keyring_serial; __u64 system_keyring_id; struct data { diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index dc74d8cf9e3f..5904f45cfbc4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -19,7 +19,9 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) /* Data length determine test case */ if (data_len == 54) { /* sizeof(pkt_v4) */ - offset = 4096; /* test too large offset */ + offset = 4096; /* test too large offset, 4k page size */ + } else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */ + offset = 65536; /* test too large offset, 64k page size */ } else if (data_len == 74) { /* sizeof(pkt_v6) */ offset = 40; } else if (data_len == 64) { @@ -31,6 +33,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) offset = 10; } else if (data_len == 9001) { offset = 4096; + } else if (data_len == 90000) { + offset = 10; /* test a small offset, 64k page size */ + } else if (data_len == 90001) { + offset = 65536; /* test too large offset, 64k page size */ } else { return XDP_ABORTED; /* No matching test */ } diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c index d41665d2ec8c..65e485c4468c 100644 --- a/tools/testing/selftests/bpf/progs/tracing_failure.c +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c @@ -18,3 +18,15 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) { return 0; } + +SEC("?fentry/__rcu_read_lock") +int BPF_PROG(tracing_deny) +{ + return 0; +} + +SEC("?fexit/do_exit") +int BPF_PROG(fexit_noreturns) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c index e97e518516b6..2b4fdca162be 100644 --- a/tools/testing/selftests/bpf/progs/verifier_and.c +++ b/tools/testing/selftests/bpf/progs/verifier_and.c @@ -85,8 +85,14 @@ l0_%=: r0 = r0; \ SEC("socket") __description("check known subreg with unknown reg") -__success __failure_unpriv __msg_unpriv("R1 !read_ok") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w0 < 0x1 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R1 !read_ok'` */ +__xlated_unpriv("goto pc-1") /* `r1 = *(u32*)(r1 + 512)`, sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void known_subreg_with_unknown_reg(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c index 67509c5d3982..7f4827eede3c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -3,6 +3,7 @@ #define BPF_NO_KFUNC_PROTOTYPES #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" @@ -114,6 +115,111 @@ int basic_alloc3(void *ctx) return 0; } +SEC("syscall") +__success __retval(0) +int basic_reserve1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page) + return 1; + + page += __PAGE_SIZE; + + /* Reserve the second page */ + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 2; + + /* Try to explicitly allocate the reserved page. */ + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if (page) + return 3; + + /* Try to implicitly allocate the page (since there's only 2 of them). */ + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (page) + return 4; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int basic_reserve2(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if ((u64)page) + return 2; +#endif + return 0; +} + +/* Reserve the same page twice, should return -EBUSY. */ +SEC("syscall") +__success __retval(0) +int reserve_twice(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret != -EBUSY) + return 2; +#endif + return 0; +} + +/* Try to reserve past the end of the arena. */ +SEC("syscall") +__success __retval(0) +int reserve_invalid_region(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + /* Try a NULL pointer. */ + ret = bpf_arena_reserve_pages(&arena, NULL, 3); + if (ret != -EINVAL) + return 1; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 3); + if (ret != -EINVAL) + return 2; + + ret = bpf_arena_reserve_pages(&arena, page, 4096); + if (ret != -EINVAL) + return 3; + + ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1); + if (ret != -EINVAL) + return 4; +#endif + return 0; +} + SEC("iter.s/bpf_map") __success __log_level(2) int iter_maps1(struct bpf_iter__bpf_map *ctx) diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index f94f30cf1bb8..9dbdf123542d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -67,6 +67,104 @@ int big_alloc1(void *ctx) return 0; } +/* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */ +SEC("syscall") +__success __retval(0) +int access_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + const size_t len = 4; + int ret, i; + + /* Get a separate region of the arena. */ + page = base = arena_base(&arena) + 16384 * PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base, len); + if (ret) + return 1; + + /* Try to dirty reserved memory. */ + for (i = 0; i < len && can_loop; i++) + *page = 0x5a; + + for (i = 0; i < len && can_loop; i++) { + page = (volatile char __arena *)(base + i * PAGE_SIZE); + + /* + * Error out in case either the write went through, + * or the address has random garbage. + */ + if (*page == 0x5a) + return 2 + 2 * i; + + if (*page) + return 2 + 2 * i + 1; + } +#endif + return 0; +} + +/* Try to allocate a region overlapping with a reservation. */ +SEC("syscall") +__success __retval(0) +int request_partially_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + int ret; + + /* Add an arbitrary page offset. */ + page = base = arena_base(&arena) + 4096 * __PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0); + if ((u64)page != 0ULL) + return 2; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int free_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *addr; + char __arena *page; + int ret; + + /* Add an arbitrary page offset. */ + addr = arena_base(&arena) + 32768 * __PAGE_SIZE; + + page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0); + if (!page) + return 1; + + ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2); + if (ret) + return 2; + + /* + * Reserved and allocated pages should be interchangeable for + * bpf_arena_free_pages(). Free a reserved and an allocated + * page with a single call. + */ + bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2); + + /* The free call above should have succeeded, so this allocation should too. */ + page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0); + if (!page) + return 3; +#endif + return 0; +} + #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) #define PAGE_CNT 100 __u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */ diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 0eb33bb801b5..87a2c60d86e6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -2,6 +2,7 @@ /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ #include <linux/bpf.h> +#include <../../../include/linux/filter.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -620,8 +621,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test1") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test1(void) { asm volatile (" \ @@ -643,8 +650,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test2(void) { asm volatile (" \ @@ -691,9 +704,14 @@ l0_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 0, reg xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_1(void) { asm volatile (" \ @@ -719,9 +737,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 = 0, reg32 xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_1(void) { asm volatile (" \ @@ -747,9 +770,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 2, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 > 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_2_reg_xor_3(void) { asm volatile (" \ @@ -829,9 +857,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg > 0, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_3(void) { asm volatile (" \ @@ -858,9 +891,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 > 0, reg32 xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_3(void) { asm volatile (" \ @@ -1028,7 +1066,7 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") __success __retval(0) -__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ +__flag(BPF_F_TEST_REG_INVARIANTS) __naked void crossing_64_bit_signed_boundary_2(void) { asm volatile (" \ @@ -1334,4 +1372,300 @@ __naked void mult_sign_ovf(void) __imm(bpf_skb_store_bytes) : __clobber_all); } + +SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 0xa000000000000000 ll;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit addition, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") +__retval(0) +__naked void add64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 2;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition overflow, all outcomes overflow") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 0xa0000000;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 2;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, all outcomes underflow") +__success __log_level(2) +__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") +__retval(0) +__naked void sub64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r1 = r0;" + "r2 = 0x8000000000000000 ll;" + "r1 |= r2;" + "r3 = 0;" + "r3 -= r1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") +__retval(0) +__naked void sub64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r3 = r0;" + "r2 = 1;" + "r3 -= r2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction overflow, all outcomes underflow") +__success __log_level(2) +__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w1 = w0;" + "w2 = 0x80000000;" + "w1 |= w2;" + "w3 = 0;" + "w3 -= w1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w3 = w0;" + "w2 = 1;" + "w3 -= w2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("dead branch on jset, does not result in invariants violation error") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_range_analysis(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + if r0 == 0 goto l0_%=; \ + if r0 & 0xffffffff goto +0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the negative side. At instruction 7, the ranges look as follows: + * + * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX + * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxxxx] [xxxxxxxxxxxx| + * 0 smax=0xeffffeee smin=-655 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX + * | [xxx] | + * |----------------------------|------------------------------| + * | [xxx] | + * 0 s64=[-655;-146] -1 + * + * Without the deduction cross sign boundary, we end up with an invariant + * violation error. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, negative overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))") +__retval(0) +__naked void bounds_deduct_negative_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + w3 = w0; \ + w6 = (s8)w0; \ + r0 = (s8)r0; \ + if w6 >= 0xf0000000 goto l0_%=; \ + r0 += r6; \ + r6 += 400; \ + r0 -= r6; \ + if r3 < r0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the positive side. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff00 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0;127] U64_MAX + * [xxxxxxxx] | + * |----------------------------|------------------------------| + * [xxxxxxxx] | + * 0 s64=[0;127] -1 + * + * Without the deduction cross sign boundary, the program is rejected due to + * the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, positive overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))") +__retval(0) +__naked void bounds_deduct_positive_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff00; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test is the same as above, but the s64 and u64 ranges overlap in two + * places. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff80 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and + * the program should fail due to the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, two overlaps") +__failure __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)") +__msg("frame pointer is read only") +__naked void bounds_deduct_two_overlaps(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff80; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c index c506afbdd936..260a6df264e3 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c @@ -159,13 +159,16 @@ __failure_unpriv __naked void deducing_bounds_from_const_10(void) { asm volatile (" \ + r6 = r1; \ r0 = 0; \ if r0 s<= 0 goto l0_%=; \ -l0_%=: /* Marks reg as unknown. */ \ - r0 = -r0; \ - r0 -= r1; \ +l0_%=: /* Marks r0 as unknown. */ \ + call %[bpf_get_prandom_u32]; \ + r0 -= r6; \ exit; \ -" ::: __clobber_all); +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); } char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index a83809a1dbbf..0450840c92d9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -218,4 +218,29 @@ __naked void null_check_8_null_bind(void) : __clobber_all); } +#define narrow_load(type, ctx, field) \ + SEC(type) \ + __description("narrow load on field " #field " of " #ctx) \ + __failure __msg("invalid bpf_context access") \ + __naked void invalid_narrow_load##ctx##field(void) \ + { \ + asm volatile (" \ + r1 = *(u32 *)(r1 + %[off]); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(off, offsetof(struct ctx, field) + 4) \ + : __clobber_all); \ + } + +narrow_load("cgroup/getsockopt", bpf_sockopt, sk); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval_end); +narrow_load("tc", __sk_buff, sk); +narrow_load("cgroup/bind4", bpf_sock_addr, sk); +narrow_load("sockops", bpf_sock_ops, sk); +narrow_load("sockops", bpf_sock_ops, skb_data); +narrow_load("sockops", bpf_sock_ops, skb_data_end); +narrow_load("sockops", bpf_sock_ops, skb_hwtstamp); + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c index 458984da804c..34e0c012ee76 100644 --- a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c +++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c @@ -77,7 +77,7 @@ l0_%=: exit; \ SEC("tc") __description("MOD32 overflow, check 1") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_1(void) { asm volatile (" \ @@ -92,7 +92,7 @@ __naked void mod32_overflow_check_1(void) SEC("tc") __description("MOD32 overflow, check 2") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c index 4ab0ef18d7eb..181da86ba5f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c @@ -179,4 +179,132 @@ int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags) return subprog_trusted_acq_rel(task); } +__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 untrusted cannot be combined with any other tags") +int untrusted_bad_tags(void *ctx) +{ + return subprog_untrusted_bad_tags(0); +} + +struct local_type_wont_be_accepted {}; + +__weak int subprog_untrusted_bad_type(struct local_type_wont_be_accepted *p __arg_untrusted) +{ + return 0; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 reference type('STRUCT local_type_wont_be_accepted') has no matches") +int untrusted_bad_type(void *ctx) +{ + return subprog_untrusted_bad_type(bpf_rdonly_cast(0, 0)); +} + +__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_untrusted') is global and assumed valid.") +__msg("Validating subprog_untrusted() func#1...") +__msg(": R1=untrusted_ptr_task_struct") +int trusted_to_untrusted(void *ctx) +{ + return subprog_untrusted(bpf_get_current_task_btf()); +} + +char mem[16]; +u32 off; + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted(void *ctx) +{ + /* untrusted to untrusted */ + subprog_untrusted(bpf_core_cast(0, struct task_struct)); + /* wrong type to untrusted */ + subprog_untrusted((void *)bpf_core_cast(0, struct bpf_verifier_env)); + /* map value to untrusted */ + subprog_untrusted((void *)mem); + /* scalar to untrusted */ + subprog_untrusted(0); + /* variable offset to untrusted (map) */ + subprog_untrusted((void *)mem + off); + /* variable offset to untrusted (trusted) */ + subprog_untrusted((void *)bpf_get_current_task_btf() + off); + return 0; +} + +__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted) +{ + return subprog_trusted_task_nullable(task); +} + +SEC("tp_btf/sys_enter") +__failure +__msg("R1 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#{{.*}} ('subprog_trusted_task_nullable')") +int untrusted_to_trusted(void *ctx) +{ + return subprog_untrusted2(bpf_get_current_task_btf()); +} + +__weak int subprog_void_untrusted(void *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_char_untrusted(char *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_enum_untrusted(enum bpf_attach_type *p __arg_untrusted) +{ + return *(int *)p; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_void_untrusted') is global and assumed valid.") +__msg("Validating subprog_void_untrusted() func#1...") +__msg(": R1=rdonly_untrusted_mem(sz=0)") +int trusted_to_untrusted_mem(void *ctx) +{ + return subprog_void_untrusted(bpf_get_current_task_btf()); +} + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted_mem(void *ctx) +{ + /* untrusted to untrusted mem */ + subprog_void_untrusted(bpf_core_cast(0, struct task_struct)); + /* map value to untrusted mem */ + subprog_void_untrusted(mem); + /* scalar to untrusted mem */ + subprog_void_untrusted(0); + /* variable offset to untrusted mem (map) */ + subprog_void_untrusted((void *)mem + off); + /* variable offset to untrusted mem (trusted) */ + subprog_void_untrusted(bpf_get_current_task_btf() + off); + /* variable offset to untrusted char/enum (map) */ + subprog_char_untrusted(mem + off); + subprog_enum_untrusted((void *)mem + off); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c index 7d088ba99ea5..16b761e510f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -139,4 +139,122 @@ __naked void on_the_inner_map_pointer(void) : __clobber_all); } +SEC("socket") +__description("map_ptr is never null") +__success +__naked void map_ptr_is_never_null(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_in_map] ll; \ + if r1 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner") +__success +__naked void map_ptr_is_never_null_inner(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + if r0 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner spill fill") +__success +__naked void map_ptr_is_never_null_inner_spill_fill(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64 *)(r10 -16) = r0; \ + r1 = *(u64 *)(r10 -16); \ + if r1 == 0 goto l1_%=; \ + exit; \ +l1_%=: r10 = 42; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 64 * 1024); + }); +} rb_in_map SEC(".maps"); + +struct rb_ctx { + void *rb; + struct bpf_dynptr dptr; +}; + +static __always_inline struct rb_ctx __rb_event_reserve(__u32 sz) +{ + struct rb_ctx rb_ctx = {}; + void *rb; + __u32 cpu = bpf_get_smp_processor_id(); + __u32 rb_slot = cpu & 1; + + rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot); + if (!rb) + return rb_ctx; + + rb_ctx.rb = rb; + bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr); + + return rb_ctx; +} + +static __noinline void __rb_event_submit(struct rb_ctx *ctx) +{ + if (!ctx->rb) + return; + + /* If the verifier (incorrectly) concludes that ctx->rb can be + * NULL at this point, we'll get "BPF_EXIT instruction in main + * prog would lead to reference leak" error + */ + bpf_ringbuf_submit_dynptr(&ctx->dptr, 0); +} + +SEC("socket") +int map_ptr_is_never_null_rb(void *ctx) +{ + struct rb_ctx event_ctx = __rb_event_reserve(256); + __rb_event_submit(&event_ctx); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index 994bbc346d25..a4d8814eb5ed 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -245,7 +245,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, positive after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_2(void) { asm volatile (" \ @@ -267,7 +273,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, negative after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_3(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c index 9fe5d255ee37..73fee2aec698 100644 --- a/tools/testing/selftests/bpf/progs/verifier_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -231,4 +231,74 @@ __naked void bpf_cond_op_not_r10(void) ::: __clobber_all); } +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (84) w0 = -w0 ; R0_w=0xffffffff") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_2(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_3(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (87) r0 = -r0 ; R0_w=-1") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_4(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_5(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c index fc91b414364e..1ecd34ebde19 100644 --- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c @@ -8,7 +8,7 @@ /* From include/linux/filter.h */ #define MAX_BPF_STACK 512 -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) struct elem { struct bpf_timer t; @@ -30,6 +30,18 @@ __jited(" movabsq $0x{{.*}}, %r9") __jited(" addq %gs:{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x100(%r9)") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_single_prog(void) { asm volatile (" \ @@ -45,6 +57,9 @@ __description("No private stack") __success __arch_x86_64 __jited(" subq $0x8, %rsp") +__arch_arm64 +__jited(" mov x25, sp") +__jited(" sub sp, sp, #0x10") __naked void no_private_stack_nested(void) { asm volatile (" \ @@ -81,6 +96,19 @@ __jited(" pushq %r9") __jited(" callq 0x{{.*}}") __jited(" popq %r9") __jited(" xorl %eax, %eax") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl {{.*}}") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_nested_1(void) { asm volatile (" \ @@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited("func #1") +__jited("...") +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x7, #0x0") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_callback(void) { asm volatile (" \ @@ -154,6 +200,28 @@ __arch_x86_64 __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x29, x30, [sp, #-0x10]!") +__jited(" mov x29, sp") +__jited(" stp xzr, x26, [sp, #-0x10]!") +__jited(" mov x26, sp") +__jited(" stp x19, x20, [sp, #-0x10]!") +__jited(" stp x21, x22, [sp, #-0x10]!") +__jited(" stp x23, x24, [sp, #-0x10]!") +__jited(" stp x25, x26, [sp, #-0x10]!") +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" mov x0, #0x0") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_main_prog(void) { asm volatile (" \ @@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_sub_prog(void) { asm volatile (" \ @@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested") __success __retval(0) __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") +__arch_arm64 +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") int private_stack_async_callback_1(void) { struct bpf_timer *arr_timer; @@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting") __success __retval(0) __arch_x86_64 __jited(" subq $0x100, %rsp") +__arch_arm64 +__jited(" sub sp, sp, #0x100") int private_stack_async_callback_2(void) { struct bpf_timer *arr_timer; diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c index 683a882b3e6d..910365201f68 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -27,7 +27,7 @@ struct bpf_key {} __attribute__((preserve_access_index)); extern void bpf_key_put(struct bpf_key *key) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; /* BTF FUNC records are not generated for kfuncs referenced * from inline assembly. These records are necessary for diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall.c b/tools/testing/selftests/bpf/progs/verifier_tailcall.c new file mode 100644 index 000000000000..b4acce60fb9b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_tailcall.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} map_array SEC(".maps"); + +SEC("socket") +__description("invalid map type for tail call") +__failure __msg("expected prog array map for tail call") +__failure_unpriv +__naked void invalid_map_for_tail_call(void) +{ + asm volatile (" \ + r2 = %[map_array] ll; \ + r3 = 0; \ + call %[bpf_tail_call]; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_array) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index a4a5e2071604..28b4f7035ceb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -572,8 +572,14 @@ l0_%=: exit; \ SEC("socket") __description("alu32: mov u32 const") -__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 == 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void alu32_mov_u32_const(void) { asm volatile (" \ @@ -619,12 +625,11 @@ __naked void pass_pointer_to_tail_call(void) SEC("socket") __description("unpriv: cmp map pointer with zero") -__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__success __success_unpriv __retval(0) __naked void cmp_map_pointer_with_zero(void) { asm volatile (" \ - r1 = 0; \ r1 = %[map_hash_8b] ll; \ if r1 == 0 goto l0_%=; \ l0_%=: r0 = 0; \ @@ -635,6 +640,22 @@ l0_%=: r0 = 0; \ } SEC("socket") +__description("unpriv: cmp map pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited") +__retval(0) +__naked void cmp_map_pointer_with_const(void) +{ + asm volatile (" \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0x0000beef goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") __description("unpriv: write into frame pointer") __failure __msg("frame pointer is read only") __failure_unpriv @@ -723,4 +744,210 @@ l0_%=: r0 = 0; \ " ::: __clobber_all); } +SEC("socket") +__description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 != 0x1 goto pc+2") +/* This nospec prevents the exploit because it forces the mispredicted (not + * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer. + * This causes the CPU to realize that `r6 = r9` should have never executed. It + * ensures that r6 always contains a readable stack slot ptr when the insn after + * the nospec executes. + */ +__xlated_unpriv("nospec") +__xlated_unpriv("r9 = *(u8 *)(r6 +0)") +#endif +__naked void unpriv_spec_v1_type_confusion(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* r0: pointer to a map array entry */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + /* r1, r2: prepared call args */ \ + r6 = r10; \ + r6 += -8; \ + /* r6: pointer to readable stack slot */ \ + r9 = 0xffffc900; \ + r9 <<= 32; \ + /* r9: scalar controlled by attacker */ \ + r0 = *(u64 *)(r0 + 0); /* cache miss */ \ + if r0 != 0x0 goto l0_%=; \ + r6 = r9; \ +l0_%=: if r0 != 0x1 goto l1_%=; \ + r9 = *(u8 *)(r6 + 0); \ +l1_%=: /* leak r9 */ \ + r9 &= 1; \ + r9 <<= 9; \ + *(u64*)(r10 - 8) = r9; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* leak secret into is_cached(map[0|512]): */ \ + r0 = *(u64 *)(r0 + 0); \ +l2_%=: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V4 +__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */ +__xlated_unpriv("*(u64 *)(r10 -8) = r1") +__xlated_unpriv("nospec") +#endif +__naked void unpriv_ldimm64_spectre_v4(void) +{ + asm volatile (" \ + r1 = 0x2020200005642020 ll; \ + *(u64 *)(r10 -8) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +/* starts with r0 == r8 == r9 == 0 */ +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("if r9 == 0x0 goto pc+4") +__xlated_unpriv("r2 = r0") +/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64) + * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The + * test therefore ensures the Spectre-v4--induced nospec does not prevent the + * Spectre-v1--induced speculative path from being fully analyzed. + */ +__xlated_unpriv("nospec") /* Spectre v1 */ +__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */ +__xlated_unpriv("nospec") /* Spectre v4 */ +#endif +#endif +__naked void unpriv_spectre_v1_and_v4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r8 = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r9 = r0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+4") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */ +__xlated_unpriv("goto pc-1") /* second part of ldimm64 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ + r1 = 0x2020200005642020 ll; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c index 5ba6e53571c8..af7938ce56cb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -231,6 +231,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_lt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -245,11 +249,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -259,7 +263,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -271,6 +276,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_gt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -285,11 +294,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -299,7 +308,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -398,7 +408,8 @@ l2_%=: r0 = 1; \ SEC("socket") __description("map access: mixing value pointer and scalar, 1") -__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_1(void) { @@ -433,6 +444,7 @@ l2_%=: /* common instruction */ \ l3_%=: /* branch B */ \ r0 = 0x13371337; \ /* verifier follows fall-through */ \ + /* unpriv: nospec (inserted to prevent `R2 pointer comparison prohibited`) */\ if r2 != 0x100000 goto l4_%=; \ r0 = 0; \ exit; \ @@ -450,7 +462,8 @@ l4_%=: /* fake-dead code; targeted from branch A to \ SEC("socket") __description("map access: mixing value pointer and scalar, 2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_2(void) { @@ -492,6 +505,7 @@ l4_%=: /* fake-dead code; targeted from branch A to \ * prevent dead code sanitization, rejected \ * via branch B however \ */ \ + /* unpriv: nospec (inserted to prevent `R0 invalid mem access 'scalar'`) */\ r0 = *(u8*)(r0 + 0); \ r0 = 0; \ exit; \ @@ -1296,9 +1310,13 @@ l0_%=: r0 = 1; \ SEC("socket") __description("map access: value_ptr -= unknown scalar, 2") -__success __failure_unpriv -__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__success __success_unpriv __retval(1) +#ifdef SPEC_V1 +__xlated_unpriv("r1 &= 7") +__xlated_unpriv("nospec") /* inserted to prevent `R0 pointer arithmetic of map value goes out of range` */ +__xlated_unpriv("r0 -= r1") +#endif __naked void value_ptr_unknown_scalar_2_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/test_bpftool_map.sh b/tools/testing/selftests/bpf/test_bpftool_map.sh new file mode 100755 index 000000000000..515b1df0501e --- /dev/null +++ b/tools/testing/selftests/bpf/test_bpftool_map.sh @@ -0,0 +1,398 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +TESTNAME="bpftool_map" +BPF_FILE="security_bpf_map.bpf.o" +BPF_ITER_FILE="bpf_iter_map_elem.bpf.o" +PROTECTED_MAP_NAME="prot_map" +NOT_PROTECTED_MAP_NAME="not_prot_map" +BPF_FS_TMP_PARENT="/tmp" +BPF_FS_PARENT=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts) +BPF_FS_PARENT=${BPF_FS_PARENT:-$BPF_FS_TMP_PARENT} +# bpftool will mount bpf file system under BPF_DIR if it is not mounted +# under BPF_FS_PARENT. +BPF_DIR="$BPF_FS_PARENT/test_$TESTNAME" +SCRIPT_DIR=$(dirname $(realpath "$0")) +BPF_FILE_PATH="$SCRIPT_DIR/$BPF_FILE" +BPF_ITER_FILE_PATH="$SCRIPT_DIR/$BPF_ITER_FILE" +BPFTOOL_PATH="bpftool" +# Assume the script is located under tools/testing/selftests/bpf/ +KDIR_ROOT_DIR=$(realpath "$SCRIPT_DIR"/../../../../) + +_cleanup() +{ + set +eu + + # If BPF_DIR is a mount point this will not remove the mount point itself. + [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2> /dev/null + + # Unmount if BPF filesystem was temporarily created. + if [ "$BPF_FS_PARENT" = "$BPF_FS_TMP_PARENT" ]; then + # A loop and recursive unmount are required as bpftool might + # create multiple mounts. For example, a bind mount of the directory + # to itself. The bind mount is created to change mount propagation + # flags on an actual mount point. + max_attempts=3 + attempt=0 + while mountpoint -q "$BPF_DIR" && [ $attempt -lt $max_attempts ]; do + umount -R "$BPF_DIR" 2>/dev/null + attempt=$((attempt+1)) + done + + # The directory still exists. Remove it now. + [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2>/dev/null + fi +} + +cleanup_skip() +{ + echo "selftests: $TESTNAME [SKIP]" + _cleanup + + exit $ksft_skip +} + +cleanup() +{ + if [ "$?" = 0 ]; then + echo "selftests: $TESTNAME [PASS]" + else + echo "selftests: $TESTNAME [FAILED]" + fi + _cleanup +} + +check_root_privileges() { + if [ $(id -u) -ne 0 ]; then + echo "Need root privileges" + exit $ksft_skip + fi +} + +# Function to verify bpftool path. +# Parameters: +# $1: bpftool path +verify_bpftool_path() { + local bpftool_path="$1" + if ! "$bpftool_path" version > /dev/null 2>&1; then + echo "Could not run test without bpftool" + exit $ksft_skip + fi +} + +# Function to verify BTF support. +# The test requires BTF support for fmod_ret programs. +verify_btf_support() { + if [ ! -f /sys/kernel/btf/vmlinux ]; then + echo "Could not run test without BTF support" + exit $ksft_skip + fi +} + +# Function to initialize map entries with keys [0..2] and values set to 0. +# Parameters: +# $1: Map name +# $2: bpftool path +initialize_map_entries() { + local map_name="$1" + local bpftool_path="$2" + + for key in 0 1 2; do + "$bpftool_path" map update name "$map_name" key $key 0 0 0 value 0 0 0 $key + done +} + +# Test read access to the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +access_for_read() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + + # Test read access to the map. + if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + echo " Read access to $key in $map_name failed" + exit 1 + fi + + # Test read access to map's BTF data. + if ! "$bpftool_path" btf dump map "$name_cmd" "$map_name" 1>/dev/null; then + echo " Read access to $map_name for BTF data failed" + exit 1 + fi +} + +# Test write access to the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +# $5: Whether write should succeed (true/false) +access_for_write() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + local write_should_succeed="$5" + local value="1 1 1 1" + + if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \ + $value 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Write access to $key in $map_name succeeded but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Write access to $key in $map_name failed but should have succeeded" + exit 1 + fi + fi +} + +# Test entry deletion for the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +# $5: Whether write should succeed (true/false) +access_for_deletion() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + local write_should_succeed="$5" + local value="1 1 1 1" + + # Test deletion by key for the map. + # Before deleting, check the key exists. + if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + echo " Key $key does not exist in $map_name" + exit 1 + fi + + # Delete by key. + if "$bpftool_path" map delete "$name_cmd" "$map_name" key $key 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Deletion for $key in $map_name succeeded but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Deletion for $key in $map_name failed but should have succeeded" + exit 1 + fi + fi + + # After deleting, check the entry existence according to the expected status. + if "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + if [ "$write_should_succeed" = "true" ]; then + echo " Key $key for $map_name was not deleted but should have been deleted" + exit 1 + fi + else + if [ "$write_should_succeed" = "false" ]; then + echo "Key $key for $map_name was deleted but should have not been deleted" + exit 1 + fi + fi + + # Test creation of map's deleted entry, if deletion was successful. + # Otherwise, the entry exists. + if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \ + $value 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Write access to $key in $map_name succeeded after deletion attempt but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Write access to $key in $map_name failed after deletion attempt but should have succeeded" + exit 1 + fi + fi +} + +# Test map elements iterator. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: BPF_DIR +# $5: bpf iterator object file path +iterate_map_elem() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local bpf_dir="$4" + local bpf_file="$5" + local pin_path="$bpf_dir/map_iterator" + + "$bpftool_path" iter pin "$bpf_file" "$pin_path" map "$name_cmd" "$map_name" + if [ ! -f "$pin_path" ]; then + echo " Failed to pin iterator to $pin_path" + exit 1 + fi + + cat "$pin_path" 1>/dev/null + rm "$pin_path" 2>/dev/null +} + +# Function to test map access with configurable write expectations +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key for rw +# $5: key to delete +# $6: Whether write should succeed (true/false) +# $7: BPF_DIR +# $8: bpf iterator object file path +access_map() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key_for_rw="$4" + local key_to_del="$5" + local write_should_succeed="$6" + local bpf_dir="$7" + local bpf_iter_file_path="$8" + + access_for_read "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw" + access_for_write "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw" \ + "$write_should_succeed" + access_for_deletion "$name_cmd" "$map_name" "$bpftool_path" "$key_to_del" \ + "$write_should_succeed" + iterate_map_elem "$name_cmd" "$map_name" "$bpftool_path" "$bpf_dir" \ + "$bpf_iter_file_path" +} + +# Function to test map access with configurable write expectations +# Parameters: +# $1: Map name +# $2: bpftool path +# $3: BPF_DIR +# $4: Whether write should succeed (true/false) +# $5: bpf iterator object file path +test_map_access() { + local map_name="$1" + local bpftool_path="$2" + local bpf_dir="$3" + local pin_path="$bpf_dir/${map_name}_pinned" + local write_should_succeed="$4" + local bpf_iter_file_path="$5" + + # Test access to the map by name. + access_map "name" "$map_name" "$bpftool_path" "0 0 0 0" "1 0 0 0" \ + "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path" + + # Pin the map to the BPF filesystem + "$bpftool_path" map pin name "$map_name" "$pin_path" + if [ ! -e "$pin_path" ]; then + echo " Failed to pin $map_name" + exit 1 + fi + + # Test access to the pinned map. + access_map "pinned" "$pin_path" "$bpftool_path" "0 0 0 0" "2 0 0 0" \ + "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path" +} + +# Function to test map creation and map-of-maps +# Parameters: +# $1: bpftool path +# $2: BPF_DIR +test_map_creation_and_map_of_maps() { + local bpftool_path="$1" + local bpf_dir="$2" + local outer_map_name="outer_map_tt" + local inner_map_name="inner_map_tt" + + "$bpftool_path" map create "$bpf_dir/$inner_map_name" type array key 4 \ + value 4 entries 4 name "$inner_map_name" + if [ ! -f "$bpf_dir/$inner_map_name" ]; then + echo " Failed to create inner map file at $bpf_dir/$outer_map_name" + return 1 + fi + + "$bpftool_path" map create "$bpf_dir/$outer_map_name" type hash_of_maps \ + key 4 value 4 entries 2 name "$outer_map_name" inner_map name "$inner_map_name" + if [ ! -f "$bpf_dir/$outer_map_name" ]; then + echo " Failed to create outer map file at $bpf_dir/$outer_map_name" + return 1 + fi + + # Add entries to the outer map by name and by pinned path. + "$bpftool_path" map update pinned "$bpf_dir/$outer_map_name" key 0 0 0 0 \ + value pinned "$bpf_dir/$inner_map_name" + "$bpftool_path" map update name "$outer_map_name" key 1 0 0 0 value \ + name "$inner_map_name" + + # The outer map should be full by now. + # The following map update command is expected to fail. + if "$bpftool_path" map update name "$outer_map_name" key 2 0 0 0 value name \ + "$inner_map_name" 2>/dev/null; then + echo " Update for $outer_map_name succeeded but should have failed" + exit 1 + fi +} + +# Function to test map access with the btf list command +# Parameters: +# $1: bpftool path +test_map_access_with_btf_list() { + local bpftool_path="$1" + + # The btf list command iterates over maps for + # loaded BPF programs. + if ! "$bpftool_path" btf list 1>/dev/null; then + echo " Failed to access btf data" + exit 1 + fi +} + +set -eu + +trap cleanup_skip EXIT + +check_root_privileges + +verify_bpftool_path "$BPFTOOL_PATH" + +verify_btf_support + +trap cleanup EXIT + +# Load and attach the BPF programs to control maps access. +"$BPFTOOL_PATH" prog loadall "$BPF_FILE_PATH" "$BPF_DIR" autoattach + +initialize_map_entries "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH" +initialize_map_entries "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH" + +# Activate the map protection mechanism. Protection status is controlled +# by a value stored in the prot_status_map at index 0. +"$BPFTOOL_PATH" map update name prot_status_map key 0 0 0 0 value 1 0 0 0 + +# Test protected map (write should fail). +test_map_access "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "false" \ + "$BPF_ITER_FILE_PATH" + +# Test not protected map (write should succeed). +test_map_access "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "true" \ + "$BPF_ITER_FILE_PATH" + +test_map_creation_and_map_of_maps "$BPFTOOL_PATH" "$BPF_DIR" + +test_map_access_with_btf_list "$BPFTOOL_PATH" + +exit 0 diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 9551d8d5f8f9..78423cf89e01 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -40,7 +40,7 @@ #define TEST_TAG_LOAD_MODE_PFX "comment:load_mode=" /* Warning: duplicated in bpf_misc.h */ -#define POINTER_VALUE 0xcafe4all +#define POINTER_VALUE 0xbadcafe #define TEST_DATA_LEN 64 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -318,20 +318,14 @@ static int parse_caps(const char *str, __u64 *val, const char *name) static int parse_retval(const char *str, int *val, const char *name) { - struct { - char *name; - int val; - } named_values[] = { - { "INT_MIN" , INT_MIN }, - { "POINTER_VALUE", POINTER_VALUE }, - { "TEST_DATA_LEN", TEST_DATA_LEN }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(named_values); ++i) { - if (strcmp(str, named_values[i].name) != 0) - continue; - *val = named_values[i].val; + /* + * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a + * single int and cannot be parsed with strtol, so we handle it + * separately here. In addition, it expands to different expressions in + * different compilers so we use a prefixed _INT_MIN instead. + */ + if (strcmp(str, "_INT_MIN") == 0) { + *val = INT_MIN; return 0; } @@ -1103,9 +1097,9 @@ void run_subtest(struct test_loader *tester, } } - do_prog_test_run(bpf_program__fd(tprog), &retval, - bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); - if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { + err = do_prog_test_run(bpf_program__fd(tprog), &retval, + bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); + if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) { PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); goto tobj_cleanup; } diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 986ce32b113a..3fae9ce46ca9 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -535,7 +535,7 @@ static void test_devmap_hash(unsigned int task, void *data) static void test_queuemap(unsigned int task, void *data) { const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE + MAP_SIZE/2], val; + __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0; int fd, i; /* Fill test values to be used */ @@ -591,7 +591,7 @@ static void test_queuemap(unsigned int task, void *data) static void test_stackmap(unsigned int task, void *data) { const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE + MAP_SIZE/2], val; + __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0; int fd, i; /* Fill test values to be used */ diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 870694f2a359..df2222a1806f 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -460,6 +460,34 @@ static inline void *u64_to_ptr(__u64 ptr) return (void *) (unsigned long) ptr; } +static inline __u32 id_from_prog_fd(int fd) +{ + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + int err; + + err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "id_from_prog_fd")) + return 0; + + ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); + return prog_info.id; +} + +static inline __u32 id_from_link_fd(int fd) +{ + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + int err; + + err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); + if (!ASSERT_OK(err, "id_from_link_fd")) + return 0; + + ASSERT_NEQ(link_info.id, 0, "link_info.id"); + return link_info.id; +} + int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index 220f6a963813..f997d7ec8fd0 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -1,15 +1,76 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <errno.h> +#include <limits.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <string.h> +#include <sys/utsname.h> #include <unistd.h> #include <fcntl.h> +#include <zlib.h> #include "unpriv_helpers.h" -static bool get_mitigations_off(void) +static gzFile open_config(void) +{ + struct utsname uts; + char buf[PATH_MAX]; + gzFile config; + + if (uname(&uts)) { + perror("uname"); + goto config_gz; + } + + snprintf(buf, sizeof(buf), "/boot/config-%s", uts.release); + config = gzopen(buf, "rb"); + if (config) + return config; + fprintf(stderr, "gzopen %s: %s\n", buf, strerror(errno)); + +config_gz: + config = gzopen("/proc/config.gz", "rb"); + if (!config) + perror("gzopen /proc/config.gz"); + return config; +} + +static int config_contains(const char *pat) +{ + const char *msg; + char buf[1024]; + gzFile config; + int n, err; + + config = open_config(); + if (!config) + return -1; + + for (;;) { + if (!gzgets(config, buf, sizeof(buf))) { + msg = gzerror(config, &err); + if (err == Z_ERRNO) + perror("gzgets /proc/config.gz"); + else if (err != Z_OK) + fprintf(stderr, "gzgets /proc/config.gz: %s", msg); + gzclose(config); + return -1; + } + n = strlen(buf); + if (buf[n - 1] == '\n') + buf[n - 1] = 0; + if (strcmp(buf, pat) == 0) { + gzclose(config); + return 1; + } + } + gzclose(config); + return 0; +} + +static bool cmdline_contains(const char *pat) { char cmdline[4096], *c; int fd, ret = false; @@ -27,7 +88,7 @@ static bool get_mitigations_off(void) cmdline[sizeof(cmdline) - 1] = '\0'; for (c = strtok(cmdline, " \n"); c; c = strtok(NULL, " \n")) { - if (strncmp(c, "mitigations=off", strlen(c))) + if (strncmp(c, pat, strlen(c))) continue; ret = true; break; @@ -37,8 +98,21 @@ out: return ret; } +static int get_mitigations_off(void) +{ + int enabled_in_config; + + if (cmdline_contains("mitigations=off")) + return 1; + enabled_in_config = config_contains("CONFIG_CPU_MITIGATIONS=y"); + if (enabled_in_config < 0) + return -1; + return !enabled_in_config; +} + bool get_unpriv_disabled(void) { + int mitigations_off; bool disabled; char buf[2]; FILE *fd; @@ -52,5 +126,19 @@ bool get_unpriv_disabled(void) disabled = true; } - return disabled ? true : get_mitigations_off(); + if (disabled) + return true; + + /* + * Some unpriv tests rely on spectre mitigations being on. + * If mitigations are off or status can't be determined + * assume that unpriv tests are disabled. + */ + mitigations_off = get_mitigations_off(); + if (mitigations_off < 0) { + fprintf(stderr, + "Can't determine if mitigations are enabled, disabling unpriv tests."); + return true; + } + return mitigations_off; } diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 18596ae0b0c1..f3492efc8834 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -2409,3 +2409,27 @@ .errstr_unpriv = "", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, }, +{ + "calls: several args with ref_obj_id", + .insns = { + /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer. + * With a smaller size, the verifier would reject the call to + * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the + * ref_obj_id error. + */ + BPF_MOV64_IMM(BPF_REG_2, 20), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + /* if r0 == 0 goto <exit> */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 2 }, + .result = REJECT, + .errstr = "more than one arg with ref_obj_id", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c index ee454327e5c6..77207b498c6f 100644 --- a/tools/testing/selftests/bpf/verifier/dead_code.c +++ b/tools/testing/selftests/bpf/verifier/dead_code.c @@ -2,14 +2,13 @@ "dead code: start", .insns = { BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_JMP_IMM(BPF_JA, 0, 0, 2), BPF_MOV64_IMM(BPF_REG_0, 7), BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 7, }, diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c index 43776f6f92f4..91d83e9cb148 100644 --- a/tools/testing/selftests/bpf/verifier/jmp32.c +++ b/tools/testing/selftests/bpf/verifier/jmp32.c @@ -84,11 +84,10 @@ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -149,11 +148,10 @@ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -214,11 +212,10 @@ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -283,11 +280,10 @@ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -354,11 +350,10 @@ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -425,11 +420,10 @@ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -496,11 +490,10 @@ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -567,11 +560,10 @@ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -638,11 +630,10 @@ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -709,11 +700,10 @@ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -780,11 +770,10 @@ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c index 11fc68da735e..e901eefd774a 100644 --- a/tools/testing/selftests/bpf/verifier/jset.c +++ b/tools/testing/selftests/bpf/verifier/jset.c @@ -78,12 +78,11 @@ .insns = { BPF_MOV64_IMM(BPF_REG_0, 1), BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .retval = 1, .result = ACCEPT, }, @@ -136,13 +135,12 @@ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2), BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -154,16 +152,16 @@ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff), BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3), BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index b2bb20b00952..d532dd82a3a8 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -23,6 +23,7 @@ #include <float.h> #include <math.h> #include <limits.h> +#include <assert.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) @@ -49,6 +50,7 @@ enum stat_id { STACK, PROG_TYPE, ATTACH_TYPE, + MEMORY_PEAK, FILE_NAME, PROG_NAME, @@ -155,13 +157,27 @@ struct filter { bool abs; }; -struct var_preset { - char *name; +struct rvalue { enum { INTEGRAL, ENUMERATOR } type; union { long long ivalue; char *svalue; }; +}; + +struct field_access { + enum { FIELD_NAME, ARRAY_INDEX } type; + union { + char *name; + struct rvalue index; + }; +}; + +struct var_preset { + struct field_access *atoms; + int atom_count; + char *full_name; + struct rvalue value; bool applied; }; @@ -208,6 +224,9 @@ static struct env { int top_src_lines; struct var_preset *presets; int npresets; + char orig_cgroup[PATH_MAX]; + char stat_cgroup[PATH_MAX]; + int memory_peak_fd; } env; static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) @@ -219,6 +238,22 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va return vfprintf(stderr, format, args); } +#define log_errno(fmt, ...) log_errno_aux(__FILE__, __LINE__, fmt, ##__VA_ARGS__) + +__attribute__((format(printf, 3, 4))) +static int log_errno_aux(const char *file, int line, const char *fmt, ...) +{ + int err = -errno; + va_list ap; + + va_start(ap, fmt); + fprintf(stderr, "%s:%d: ", file, line); + vfprintf(stderr, fmt, ap); + fprintf(stderr, " failed with error '%s'.\n", strerror(errno)); + va_end(ap); + return err; +} + #ifndef VERISTAT_VERSION #define VERISTAT_VERSION "<kernel>" #endif @@ -344,6 +379,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) fprintf(stderr, "invalid top N specifier: %s\n", arg); argp_usage(state); } + break; case 'C': env.comparison_mode = true; break; @@ -734,13 +770,13 @@ cleanup: } static const struct stat_specs default_csv_output_spec = { - .spec_cnt = 14, + .spec_cnt = 15, .ids = { FILE_NAME, PROG_NAME, VERDICT, DURATION, TOTAL_INSNS, TOTAL_STATES, PEAK_STATES, MAX_STATES_PER_INSN, MARK_READ_MAX_LEN, SIZE, JITED_SIZE, PROG_TYPE, ATTACH_TYPE, - STACK, + STACK, MEMORY_PEAK, }, }; @@ -781,6 +817,7 @@ static struct stat_def { [STACK] = {"Stack depth", {"stack_depth", "stack"}, }, [PROG_TYPE] = { "Program type", {"prog_type"}, }, [ATTACH_TYPE] = { "Attach type", {"attach_type", }, }, + [MEMORY_PEAK] = { "Peak memory (MiB)", {"mem_peak", }, }, }; static bool parse_stat_id_var(const char *name, size_t len, int *id, @@ -854,6 +891,18 @@ static bool is_desc_sym(char c) return c == 'v' || c == 'V' || c == '.' || c == '!' || c == '_'; } +static char *rtrim(char *str) +{ + int i; + + for (i = strlen(str) - 1; i > 0; --i) { + if (!isspace(str[i])) + break; + str[i] = '\0'; + } + return str; +} + static int parse_stat(const char *stat_name, struct stat_specs *specs) { int id; @@ -1182,6 +1231,7 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch case BPF_MAP_TYPE_TASK_STORAGE: case BPF_MAP_TYPE_INODE_STORAGE: case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_CGRP_STORAGE: break; case BPF_MAP_TYPE_STRUCT_OPS: mask_unrelated_struct_ops_progs(obj, map, prog); @@ -1278,16 +1328,243 @@ static int max_verifier_log_size(void) return log_size; } +static bool output_stat_enabled(int id) +{ + int i; + + for (i = 0; i < env.output_spec.spec_cnt; i++) + if (env.output_spec.ids[i] == id) + return true; + return false; +} + +__attribute__((format(printf, 2, 3))) +static int write_one_line(const char *file, const char *fmt, ...) +{ + int err, saved_errno; + va_list ap; + FILE *f; + + f = fopen(file, "w"); + if (!f) + return -1; + + va_start(ap, fmt); + errno = 0; + err = vfprintf(f, fmt, ap); + saved_errno = errno; + va_end(ap); + fclose(f); + errno = saved_errno; + return err < 0 ? -1 : 0; +} + +__attribute__((format(scanf, 3, 4))) +static int scanf_one_line(const char *file, int fields_expected, const char *fmt, ...) +{ + int res = 0, saved_errno = 0; + char *line = NULL; + size_t line_len; + va_list ap; + FILE *f; + + f = fopen(file, "r"); + if (!f) + return -1; + + va_start(ap, fmt); + while (getline(&line, &line_len, f) > 0) { + res = vsscanf(line, fmt, ap); + if (res == fields_expected) + goto out; + } + if (ferror(f)) { + saved_errno = errno; + res = -1; + } + +out: + va_end(ap); + free(line); + fclose(f); + errno = saved_errno; + return res; +} + +static void destroy_stat_cgroup(void) +{ + char buf[PATH_MAX]; + int err; + + close(env.memory_peak_fd); + + if (env.orig_cgroup[0]) { + snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.orig_cgroup); + err = write_one_line(buf, "%d\n", getpid()); + if (err < 0) + log_errno("moving self to original cgroup %s\n", env.orig_cgroup); + } + + if (env.stat_cgroup[0]) { + err = rmdir(env.stat_cgroup); + if (err < 0) + log_errno("deletion of cgroup %s", env.stat_cgroup); + } + + env.memory_peak_fd = -1; + env.orig_cgroup[0] = 0; + env.stat_cgroup[0] = 0; +} + +/* + * Creates a cgroup at /sys/fs/cgroup/veristat-accounting-<pid>, + * moves current process to this cgroup. + */ +static void create_stat_cgroup(void) +{ + char cgroup_fs_mount[4096]; + char buf[4096]; + int err; + + env.memory_peak_fd = -1; + + if (!output_stat_enabled(MEMORY_PEAK)) + return; + + err = scanf_one_line("/proc/self/mounts", 2, "%*s %4095s cgroup2 %s", + cgroup_fs_mount, buf); + if (err != 2) { + if (err < 0) + log_errno("reading /proc/self/mounts"); + else if (!env.quiet) + fprintf(stderr, "Can't find cgroupfs v2 mount point.\n"); + goto err_out; + } + + /* cgroup-v2.rst promises the line "0::<group>" for cgroups v2 */ + err = scanf_one_line("/proc/self/cgroup", 1, "0::%4095s", buf); + if (err != 1) { + if (err < 0) + log_errno("reading /proc/self/cgroup"); + else if (!env.quiet) + fprintf(stderr, "Can't infer veristat process cgroup."); + goto err_out; + } + + snprintf(env.orig_cgroup, sizeof(env.orig_cgroup), "%s/%s", cgroup_fs_mount, buf); + + snprintf(buf, sizeof(buf), "%s/veristat-accounting-%d", cgroup_fs_mount, getpid()); + err = mkdir(buf, 0777); + if (err < 0) { + log_errno("creation of cgroup %s", buf); + goto err_out; + } + strcpy(env.stat_cgroup, buf); + + snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.stat_cgroup); + err = write_one_line(buf, "%d\n", getpid()); + if (err < 0) { + log_errno("entering cgroup %s", buf); + goto err_out; + } + + snprintf(buf, sizeof(buf), "%s/memory.peak", env.stat_cgroup); + env.memory_peak_fd = open(buf, O_RDWR | O_APPEND); + if (env.memory_peak_fd < 0) { + log_errno("opening %s", buf); + goto err_out; + } + + return; + +err_out: + if (!env.quiet) + fprintf(stderr, "Memory usage metric unavailable.\n"); + destroy_stat_cgroup(); +} + +/* Current value of /sys/fs/cgroup/veristat-accounting-<pid>/memory.peak */ +static long cgroup_memory_peak(void) +{ + long err, memory_peak; + char buf[32]; + + if (env.memory_peak_fd < 0) + return -1; + + err = pread(env.memory_peak_fd, buf, sizeof(buf) - 1, 0); + if (err <= 0) { + log_errno("pread(%s/memory.peak)", env.stat_cgroup); + return -1; + } + + buf[err] = 0; + errno = 0; + memory_peak = strtoll(buf, NULL, 10); + if (errno) { + log_errno("%s/memory.peak:strtoll(%s)", env.stat_cgroup, buf); + return -1; + } + + return memory_peak; +} + +static int reset_stat_cgroup(void) +{ + char buf[] = "r\n"; + int err; + + if (env.memory_peak_fd < 0) + return -1; + + err = pwrite(env.memory_peak_fd, buf, sizeof(buf), 0); + if (err <= 0) { + log_errno("pwrite(%s/memory.peak)", env.stat_cgroup); + return -1; + } + return 0; +} + +static int parse_rvalue(const char *val, struct rvalue *rvalue) +{ + long long value; + char *val_end; + + if (val[0] == '-' || isdigit(val[0])) { + /* must be a number */ + errno = 0; + value = strtoll(val, &val_end, 0); + if (errno == ERANGE) { + errno = 0; + value = strtoull(val, &val_end, 0); + } + if (errno || *val_end != '\0') { + fprintf(stderr, "Failed to parse value '%s'\n", val); + return -EINVAL; + } + rvalue->ivalue = value; + rvalue->type = INTEGRAL; + } else { + /* if not a number, consider it enum value */ + rvalue->svalue = strdup(val); + if (!rvalue->svalue) + return -ENOMEM; + rvalue->type = ENUMERATOR; + } + return 0; +} + static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *base_filename = basename(strdupa(filename)); const char *prog_name = bpf_program__name(prog); + long mem_peak_a, mem_peak_b, mem_peak = -1; char *buf; int buf_sz, log_level; struct verif_stats *stats; struct bpf_prog_info info; __u32 info_len = sizeof(info); - int err = 0; + int err = 0, cgroup_err; void *tmp; int fd; @@ -1332,7 +1609,15 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf if (env.force_reg_invariants) bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS); - err = bpf_object__load(obj); + err = bpf_object__prepare(obj); + if (!err) { + cgroup_err = reset_stat_cgroup(); + mem_peak_a = cgroup_memory_peak(); + err = bpf_object__load(obj); + mem_peak_b = cgroup_memory_peak(); + if (!cgroup_err && mem_peak_a >= 0 && mem_peak_b >= 0) + mem_peak = mem_peak_b - mem_peak_a; + } env.progs_processed++; stats->file_name = strdup(base_filename); @@ -1341,6 +1626,7 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf stats->stats[SIZE] = bpf_program__insn_cnt(prog); stats->stats[PROG_TYPE] = bpf_program__type(prog); stats->stats[ATTACH_TYPE] = bpf_program__expected_attach_type(prog); + stats->stats[MEMORY_PEAK] = mem_peak < 0 ? -1 : mem_peak / (1024 * 1024); memset(&info, 0, info_len); fd = bpf_program__fd(prog); @@ -1361,15 +1647,74 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf free(buf); return 0; -}; +} + +static int append_preset_atom(struct var_preset *preset, char *value, bool is_index) +{ + struct field_access *tmp; + int i = preset->atom_count; + int err; + + tmp = reallocarray(preset->atoms, i + 1, sizeof(*preset->atoms)); + if (!tmp) + return -ENOMEM; + + preset->atoms = tmp; + preset->atom_count++; + + if (is_index) { + preset->atoms[i].type = ARRAY_INDEX; + err = parse_rvalue(value, &preset->atoms[i].index); + if (err) + return err; + } else { + preset->atoms[i].type = FIELD_NAME; + preset->atoms[i].name = strdup(value); + if (!preset->atoms[i].name) + return -ENOMEM; + } + return 0; +} + +static int parse_var_atoms(const char *full_var, struct var_preset *preset) +{ + char expr[256], var[256], *name, *saveptr; + int n, len, off, err; + + snprintf(expr, sizeof(expr), "%s", full_var); + preset->atom_count = 0; + while ((name = strtok_r(preset->atom_count ? NULL : expr, ".", &saveptr))) { + len = strlen(name); + /* parse variable name */ + if (sscanf(name, "%[a-zA-Z0-9_] %n", var, &off) != 1) { + fprintf(stderr, "Can't parse %s\n", name); + return -EINVAL; + } + err = append_preset_atom(preset, var, false); + if (err) + return err; + + /* parse optional array indexes */ + while (off < len) { + if (sscanf(name + off, " [ %[a-zA-Z0-9_] ] %n", var, &n) != 1) { + fprintf(stderr, "Can't parse %s as index\n", name + off); + return -EINVAL; + } + err = append_preset_atom(preset, var, true); + if (err) + return err; + off += n; + } + } + return 0; +} static int append_var_preset(struct var_preset **presets, int *cnt, const char *expr) { void *tmp; struct var_preset *cur; - char var[256], val[256], *val_end; - long long value; - int n; + char var[256], val[256]; + int n, err; tmp = realloc(*presets, (*cnt + 1) * sizeof(**presets)); if (!tmp) @@ -1379,37 +1724,25 @@ static int append_var_preset(struct var_preset **presets, int *cnt, const char * memset(cur, 0, sizeof(*cur)); (*cnt)++; - if (sscanf(expr, "%s = %s %n", var, val, &n) != 2 || n != strlen(expr)) { + if (sscanf(expr, " %[][a-zA-Z0-9_. ] = %s %n", var, val, &n) != 2 || n != strlen(expr)) { fprintf(stderr, "Failed to parse expression '%s'\n", expr); return -EINVAL; } + /* Remove trailing spaces from var, as scanf may add those */ + rtrim(var); - if (val[0] == '-' || isdigit(val[0])) { - /* must be a number */ - errno = 0; - value = strtoll(val, &val_end, 0); - if (errno == ERANGE) { - errno = 0; - value = strtoull(val, &val_end, 0); - } - if (errno || *val_end != '\0') { - fprintf(stderr, "Failed to parse value '%s'\n", val); - return -EINVAL; - } - cur->ivalue = value; - cur->type = INTEGRAL; - } else { - /* if not a number, consider it enum value */ - cur->svalue = strdup(val); - if (!cur->svalue) - return -ENOMEM; - cur->type = ENUMERATOR; - } + err = parse_rvalue(val, &cur->value); + if (err) + return err; - cur->name = strdup(var); - if (!cur->name) + cur->full_name = strdup(var); + if (!cur->full_name) return -ENOMEM; + err = parse_var_atoms(var, cur); + if (err) + return err; + return 0; } @@ -1486,22 +1819,96 @@ static bool is_preset_supported(const struct btf_type *t) return btf_is_int(t) || btf_is_enum(t) || btf_is_enum64(t); } -const int btf_find_member(const struct btf *btf, - const struct btf_type *parent_type, - __u32 parent_offset, - const char *member_name, - int *member_tid, - __u32 *member_offset) +static int find_enum_value(const struct btf *btf, const char *name, long long *value) +{ + const struct btf_type *t; + int cnt, i; + long long lvalue; + + cnt = btf__type_cnt(btf); + for (i = 1; i != cnt; ++i) { + t = btf__type_by_id(btf, i); + + if (!btf_is_any_enum(t)) + continue; + + if (enum_value_from_name(btf, t, name, &lvalue) == 0) { + *value = lvalue; + return 0; + } + } + return -ESRCH; +} + +static int resolve_rvalue(struct btf *btf, const struct rvalue *rvalue, long long *result) +{ + int err = 0; + + switch (rvalue->type) { + case INTEGRAL: + *result = rvalue->ivalue; + return 0; + case ENUMERATOR: + err = find_enum_value(btf, rvalue->svalue, result); + if (err) { + fprintf(stderr, "Can't resolve enum value %s\n", rvalue->svalue); + return err; + } + return 0; + default: + fprintf(stderr, "Unknown rvalue type\n"); + return -EOPNOTSUPP; + } + return 0; +} + +static int adjust_var_secinfo_array(struct btf *btf, int tid, struct field_access *atom, + const char *array_name, struct btf_var_secinfo *sinfo) +{ + const struct btf_type *t; + struct btf_array *barr; + long long idx; + int err; + + tid = btf__resolve_type(btf, tid); + t = btf__type_by_id(btf, tid); + if (!btf_is_array(t)) { + fprintf(stderr, "Array index is not expected for %s\n", + array_name); + return -EINVAL; + } + barr = btf_array(t); + err = resolve_rvalue(btf, &atom->index, &idx); + if (err) + return err; + if (idx < 0 || idx >= barr->nelems) { + fprintf(stderr, "Array index %lld is out of bounds [0, %u): %s\n", + idx, barr->nelems, array_name); + return -EINVAL; + } + sinfo->size = btf__resolve_size(btf, barr->type); + sinfo->offset += sinfo->size * idx; + sinfo->type = btf__resolve_type(btf, barr->type); + return 0; +} + +static int adjust_var_secinfo_member(const struct btf *btf, + const struct btf_type *parent_type, + __u32 parent_offset, + const char *member_name, + struct btf_var_secinfo *sinfo) { int i; - if (!btf_is_composite(parent_type)) + if (!btf_is_composite(parent_type)) { + fprintf(stderr, "Can't resolve field %s for non-composite type\n", member_name); return -EINVAL; + } for (i = 0; i < btf_vlen(parent_type); ++i) { const struct btf_member *member; const struct btf_type *member_type; - int tid; + int tid, off; member = btf_members(parent_type) + i; tid = btf__resolve_type(btf, member->type); @@ -1509,6 +1916,7 @@ const int btf_find_member(const struct btf *btf, return -EINVAL; member_type = btf__type_by_id(btf, tid); + off = parent_offset + member->offset; if (member->name_off) { const char *name = btf__name_by_offset(btf, member->name_off); @@ -1518,48 +1926,62 @@ const int btf_find_member(const struct btf *btf, name); return -EINVAL; } - *member_offset = parent_offset + member->offset; - *member_tid = tid; + sinfo->offset += off / 8; + sinfo->type = tid; + sinfo->size = member_type->size; return 0; } } else if (btf_is_composite(member_type)) { int err; - err = btf_find_member(btf, member_type, parent_offset + member->offset, - member_name, member_tid, member_offset); + err = adjust_var_secinfo_member(btf, member_type, off, + member_name, sinfo); if (!err) return 0; } } - return -EINVAL; + return -ESRCH; } static int adjust_var_secinfo(struct btf *btf, const struct btf_type *t, - struct btf_var_secinfo *sinfo, const char *var) + struct btf_var_secinfo *sinfo, struct var_preset *preset) { - char expr[256], *saveptr; - const struct btf_type *base_type, *member_type; - int err, member_tid; - char *name; - __u32 member_offset = 0; + const struct btf_type *base_type; + const char *prev_name; + int err, i; + int tid; - base_type = btf__type_by_id(btf, btf__resolve_type(btf, t->type)); - snprintf(expr, sizeof(expr), "%s", var); - strtok_r(expr, ".", &saveptr); + assert(preset->atom_count > 0); + assert(preset->atoms[0].type == FIELD_NAME); - while ((name = strtok_r(NULL, ".", &saveptr))) { - err = btf_find_member(btf, base_type, 0, name, &member_tid, &member_offset); - if (err) { - fprintf(stderr, "Could not find member %s for variable %s\n", name, var); - return err; + tid = btf__resolve_type(btf, t->type); + base_type = btf__type_by_id(btf, tid); + prev_name = preset->atoms[0].name; + + for (i = 1; i < preset->atom_count; ++i) { + struct field_access *atom = preset->atoms + i; + + switch (atom->type) { + case ARRAY_INDEX: + err = adjust_var_secinfo_array(btf, tid, atom, prev_name, sinfo); + break; + case FIELD_NAME: + err = adjust_var_secinfo_member(btf, base_type, 0, atom->name, sinfo); + if (err == -ESRCH) + fprintf(stderr, "Can't find '%s'\n", atom->name); + prev_name = atom->name; + break; + default: + fprintf(stderr, "Unknown field_access type\n"); + return -EOPNOTSUPP; } - member_type = btf__type_by_id(btf, member_tid); - sinfo->offset += member_offset / 8; - sinfo->size = member_type->size; - sinfo->type = member_tid; - base_type = member_type; + if (err) + return err; + base_type = btf__type_by_id(btf, sinfo->type); + tid = sinfo->type; } + return 0; } @@ -1569,7 +1991,7 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf, { const struct btf_type *base_type; void *ptr; - long long value = preset->ivalue; + long long value = preset->value.ivalue; size_t size; base_type = btf__type_by_id(btf, btf__resolve_type(btf, sinfo->type)); @@ -1578,22 +2000,23 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf, return -EINVAL; } if (!is_preset_supported(base_type)) { - fprintf(stderr, "Setting value for type %s is not supported\n", - btf__name_by_offset(btf, base_type->name_off)); + fprintf(stderr, "Can't set %s. Only ints and enums are supported\n", + preset->full_name); return -EINVAL; } - if (preset->type == ENUMERATOR) { + if (preset->value.type == ENUMERATOR) { if (btf_is_any_enum(base_type)) { - if (enum_value_from_name(btf, base_type, preset->svalue, &value)) { + if (enum_value_from_name(btf, base_type, preset->value.svalue, &value)) { fprintf(stderr, "Failed to find integer value for enum element %s\n", - preset->svalue); + preset->value.svalue); return -EINVAL; } } else { fprintf(stderr, "Value %s is not supported for type %s\n", - preset->svalue, btf__name_by_offset(btf, base_type->name_off)); + preset->value.svalue, + btf__name_by_offset(btf, base_type->name_off)); return -EINVAL; } } @@ -1660,20 +2083,16 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i for (j = 0; j < n; ++j, ++sinfo) { const struct btf_type *var_type = btf__type_by_id(btf, sinfo->type); const char *var_name; - int var_len; if (!btf_is_var(var_type)) continue; var_name = btf__name_by_offset(btf, var_type->name_off); - var_len = strlen(var_name); for (k = 0; k < npresets; ++k) { struct btf_var_secinfo tmp_sinfo; - if (strncmp(var_name, presets[k].name, var_len) != 0 || - (presets[k].name[var_len] != '\0' && - presets[k].name[var_len] != '.')) + if (strcmp(var_name, presets[k].atoms[0].name) != 0) continue; if (presets[k].applied) { @@ -1683,7 +2102,7 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i } tmp_sinfo = *sinfo; err = adjust_var_secinfo(btf, var_type, - &tmp_sinfo, presets[k].name); + &tmp_sinfo, presets + k); if (err) return err; @@ -1698,7 +2117,8 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i for (i = 0; i < npresets; ++i) { if (!presets[i].applied) { fprintf(stderr, "Global variable preset %s has not been applied\n", - presets[i].name); + presets[i].full_name); + err = -EINVAL; } presets[i].applied = false; } @@ -1824,6 +2244,7 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2, case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: + case MEMORY_PEAK: case MARK_READ_MAX_LEN: { long v1 = s1->stats[id]; long v2 = s2->stats[id]; @@ -2053,6 +2474,7 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id, case STACK: case SIZE: case JITED_SIZE: + case MEMORY_PEAK: *val = s ? s->stats[id] : 0; break; default: @@ -2139,6 +2561,7 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats case MARK_READ_MAX_LEN: case SIZE: case JITED_SIZE: + case MEMORY_PEAK: case STACK: { long val; int err, n; @@ -2776,7 +3199,7 @@ static void output_prog_stats(void) static int handle_verif_mode(void) { - int i, err; + int i, err = 0; if (env.filename_cnt == 0) { fprintf(stderr, "Please provide path to BPF object file!\n\n"); @@ -2784,11 +3207,12 @@ static int handle_verif_mode(void) return -EINVAL; } + create_stat_cgroup(); for (i = 0; i < env.filename_cnt; i++) { err = process_obj(env.filenames[i]); if (err) { fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err); - return err; + goto out; } } @@ -2796,7 +3220,9 @@ static int handle_verif_mode(void) output_prog_stats(); - return 0; +out: + destroy_stat_cgroup(); + return err; } static int handle_replay_mode(void) @@ -2826,7 +3252,7 @@ static int handle_replay_mode(void) int main(int argc, char **argv) { - int err = 0, i; + int err = 0, i, j; if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) return 1; @@ -2885,9 +3311,19 @@ int main(int argc, char **argv) } free(env.deny_filters); for (i = 0; i < env.npresets; ++i) { - free(env.presets[i].name); - if (env.presets[i].type == ENUMERATOR) - free(env.presets[i].svalue); + free(env.presets[i].full_name); + for (j = 0; j < env.presets[i].atom_count; ++j) { + switch (env.presets[i].atoms[j].type) { + case FIELD_NAME: + free(env.presets[i].atoms[j].name); + break; + case ARRAY_INDEX: + if (env.presets[i].atoms[j].index.type == ENUMERATOR) + free(env.presets[i].atoms[j].index.svalue); + break; + } + } + free(env.presets[i].atoms); } free(env.presets); return -err; diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 79505d294c44..2f869daf8a06 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -43,6 +43,15 @@ riscv64) BZIMAGE="arch/riscv/boot/Image" ARCH="riscv" ;; +ppc64el) + QEMU_BINARY=qemu-system-ppc64 + QEMU_CONSOLE="hvc0" + # KVM could not be tested for powerpc, therefore not enabled for now. + HOST_FLAGS=(-machine pseries -cpu POWER9) + CROSS_FLAGS=(-machine pseries -cpu POWER9) + BZIMAGE="vmlinux" + ARCH="powerpc" + ;; *) echo "Unsupported architecture" exit 1 diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 0ced4026ee44..a29de0713f19 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -109,6 +109,8 @@ #include <network_helpers.h> +#define MAX_TX_BUDGET_DEFAULT 32 + static bool opt_verbose; static bool opt_print_tests; static enum test_mode opt_mode = TEST_MODE_ALL; @@ -1091,11 +1093,45 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) return true; } +static u32 load_value(u32 *counter) +{ + return __atomic_load_n(counter, __ATOMIC_ACQUIRE); +} + +static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret) +{ + u32 max_budget = MAX_TX_BUDGET_DEFAULT; + u32 cons, ready_to_send; + int delta; + + cons = load_value(xsk->tx.consumer); + ready_to_send = load_value(xsk->tx.producer) - cons; + *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + + delta = load_value(xsk->tx.consumer) - cons; + /* By default, xsk should consume exact @max_budget descs at one + * send in this case where hitting the max budget limit in while + * loop is triggered in __xsk_generic_xmit(). Please make sure that + * the number of descs to be sent is larger than @max_budget, or + * else the tx.consumer will be updated in xskq_cons_peek_desc() + * in time which hides the issue we try to verify. + */ + if (ready_to_send > max_budget && delta != max_budget) + return false; + + return true; +} + static int kick_tx(struct xsk_socket_info *xsk) { int ret; - ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + if (xsk->check_consumer) { + if (!kick_tx_with_check(xsk, &ret)) + return TEST_FAILURE; + } else { + ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + } if (ret >= 0) return TEST_PASS; if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { @@ -2613,6 +2649,23 @@ static int testapp_adjust_tail_grow_mb(struct test_spec *test) XSK_UMEM__LARGE_FRAME_SIZE * 2); } +static int testapp_tx_queue_consumer(struct test_spec *test) +{ + int nr_packets; + + if (test->mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run TX_QUEUE_CONSUMER test for ZC mode\n"); + return TEST_SKIP; + } + + nr_packets = MAX_TX_BUDGET_DEFAULT + 1; + pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE); + test->ifobj_tx->xsk->batch_size = nr_packets; + test->ifobj_tx->xsk->check_consumer = true; + + return testapp_validate_traffic(test); +} + static void run_pkt_test(struct test_spec *test) { int ret; @@ -2723,6 +2776,7 @@ static const struct test_spec tests[] = { {.name = "XDP_ADJUST_TAIL_SHRINK_MULTI_BUFF", .test_func = testapp_adjust_tail_shrink_mb}, {.name = "XDP_ADJUST_TAIL_GROW", .test_func = testapp_adjust_tail_grow}, {.name = "XDP_ADJUST_TAIL_GROW_MULTI_BUFF", .test_func = testapp_adjust_tail_grow_mb}, + {.name = "TX_QUEUE_CONSUMER", .test_func = testapp_tx_queue_consumer}, }; static void print_tests(void) diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 67fc44b2813b..4df3a5d329ac 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -95,6 +95,7 @@ struct xsk_socket_info { u32 batch_size; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; + bool check_consumer; }; struct pkt { |