diff options
Diffstat (limited to 'tools/testing/selftests/bpf/test_kmods')
9 files changed, 2140 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/test_kmods/.gitignore b/tools/testing/selftests/bpf/test_kmods/.gitignore new file mode 100644 index 000000000000..ded513777281 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/.gitignore @@ -0,0 +1,6 @@ +*.mod +*.mod.c +*.o +.ko +/Module.symvers +/modules.order diff --git a/tools/testing/selftests/bpf/test_kmods/Makefile b/tools/testing/selftests/bpf/test_kmods/Makefile new file mode 100644 index 000000000000..d4e50c4509c9 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/Makefile @@ -0,0 +1,21 @@ +TEST_KMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +KDIR ?= $(abspath $(TEST_KMOD_DIR)/../../../../..) + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +MODULES = bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ + bpf_test_modorder_y.ko + +$(foreach m,$(MODULES),$(eval obj-m += $(m:.ko=.o))) + +CFLAGS_bpf_testmod.o = -I$(src) + +all: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) modules + +clean: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) clean diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c new file mode 100644 index 000000000000..0cc747fa912f --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/module.h> +#include <linux/init.h> + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_test_modorder_retx(void) +{ + return 'x'; +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(bpf_test_modorder_kfunc_x_ids) +BTF_ID_FLAGS(func, bpf_test_modorder_retx); +BTF_KFUNCS_END(bpf_test_modorder_kfunc_x_ids) + +static const struct btf_kfunc_id_set bpf_test_modorder_x_set = { + .owner = THIS_MODULE, + .set = &bpf_test_modorder_kfunc_x_ids, +}; + +static int __init bpf_test_modorder_x_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, + &bpf_test_modorder_x_set); +} + +static void __exit bpf_test_modorder_x_exit(void) +{ +} + +module_init(bpf_test_modorder_x_init); +module_exit(bpf_test_modorder_x_exit); + +MODULE_DESCRIPTION("BPF selftest ordertest module X"); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c new file mode 100644 index 000000000000..c627ee085d13 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/module.h> +#include <linux/init.h> + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_test_modorder_rety(void) +{ + return 'y'; +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(bpf_test_modorder_kfunc_y_ids) +BTF_ID_FLAGS(func, bpf_test_modorder_rety); +BTF_KFUNCS_END(bpf_test_modorder_kfunc_y_ids) + +static const struct btf_kfunc_id_set bpf_test_modorder_y_set = { + .owner = THIS_MODULE, + .set = &bpf_test_modorder_kfunc_y_ids, +}; + +static int __init bpf_test_modorder_y_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, + &bpf_test_modorder_y_set); +} + +static void __exit bpf_test_modorder_y_exit(void) +{ +} + +module_init(bpf_test_modorder_y_init); +module_exit(bpf_test_modorder_y_exit); + +MODULE_DESCRIPTION("BPF selftest ordertest module Y"); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c new file mode 100644 index 000000000000..948eb3962732 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/init.h> +#include <linux/module.h> + +struct bpf_test_no_cfi_ops { + void (*fn_1)(void); + void (*fn_2)(void); +}; + +static int dummy_init(struct btf *btf) +{ + return 0; +} + +static int dummy_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} + +static int dummy_reg(void *kdata, struct bpf_link *link) +{ + return 0; +} + +static void dummy_unreg(void *kdata, struct bpf_link *link) +{ +} + +static const struct bpf_verifier_ops dummy_verifier_ops; + +static void bpf_test_no_cfi_ops__fn_1(void) +{ +} + +static void bpf_test_no_cfi_ops__fn_2(void) +{ +} + +static struct bpf_test_no_cfi_ops __test_no_cif_ops = { + .fn_1 = bpf_test_no_cfi_ops__fn_1, + .fn_2 = bpf_test_no_cfi_ops__fn_2, +}; + +static struct bpf_struct_ops test_no_cif_ops = { + .verifier_ops = &dummy_verifier_ops, + .init = dummy_init, + .init_member = dummy_init_member, + .reg = dummy_reg, + .unreg = dummy_unreg, + .name = "bpf_test_no_cfi_ops", + .owner = THIS_MODULE, +}; + +static int bpf_test_no_cfi_init(void) +{ + int ret; + + ret = register_bpf_struct_ops(&test_no_cif_ops, + bpf_test_no_cfi_ops); + if (!ret) + return -EINVAL; + + test_no_cif_ops.cfi_stubs = &__test_no_cif_ops; + ret = register_bpf_struct_ops(&test_no_cif_ops, + bpf_test_no_cfi_ops); + return ret; +} + +static void bpf_test_no_cfi_exit(void) +{ +} + +module_init(bpf_test_no_cfi_init); +module_exit(bpf_test_no_cfi_exit); + +MODULE_AUTHOR("Kuifeng Lee"); +MODULE_DESCRIPTION("BPF no cfi_stubs test module"); +MODULE_LICENSE("Dual BSD/GPL"); + diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h new file mode 100644 index 000000000000..aeef86b3da74 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bpf_testmod + +#if !defined(_BPF_TESTMOD_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _BPF_TESTMOD_EVENTS_H + +#include <linux/tracepoint.h> +#include "bpf_testmod.h" + +TRACE_EVENT(bpf_testmod_test_read, + TP_PROTO(struct task_struct *task, struct bpf_testmod_test_read_ctx *ctx), + TP_ARGS(task, ctx), + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + __field(loff_t, off) + __field(size_t, len) + ), + TP_fast_assign( + __entry->pid = task->pid; + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->off = ctx->off; + __entry->len = ctx->len; + ), + TP_printk("pid=%d comm=%s off=%llu len=%zu", + __entry->pid, __entry->comm, __entry->off, __entry->len) +); + +/* A bare tracepoint with no event associated with it */ +DECLARE_TRACE(bpf_testmod_test_write_bare, + TP_PROTO(struct task_struct *task, struct bpf_testmod_test_write_ctx *ctx), + TP_ARGS(task, ctx) +); + +/* Used in bpf_testmod_test_read() to test __nullable suffix */ +DECLARE_TRACE(bpf_testmod_test_nullable_bare, + TP_PROTO(struct bpf_testmod_test_read_ctx *ctx__nullable), + TP_ARGS(ctx__nullable) +); + +struct sk_buff; + +DECLARE_TRACE(bpf_testmod_test_raw_tp_null, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb) +); + + +#undef BPF_TESTMOD_DECLARE_TRACE +#ifdef DECLARE_TRACE_WRITABLE +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE_WRITABLE(call, PARAMS(proto), PARAMS(args), size) +#else +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#endif + +BPF_TESTMOD_DECLARE_TRACE(bpf_testmod_test_writable_bare, + TP_PROTO(struct bpf_testmod_test_writable_ctx *ctx), + TP_ARGS(ctx), + sizeof(struct bpf_testmod_test_writable_ctx) +); + +#endif /* _BPF_TESTMOD_EVENTS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE bpf_testmod-events +#include <trace/define_trace.h> diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c new file mode 100644 index 000000000000..e6c248e3ae54 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -0,0 +1,1599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/btf_ids.h> +#include <linux/delay.h> +#include <linux/error-injection.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/percpu-defs.h> +#include <linux/sysfs.h> +#include <linux/tracepoint.h> +#include <linux/net.h> +#include <linux/socket.h> +#include <linux/nsproxy.h> +#include <linux/inet.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/un.h> +#include <linux/filter.h> +#include <net/sock.h> +#include <linux/namei.h> +#include "bpf_testmod.h" +#include "bpf_testmod_kfunc.h" + +#define CREATE_TRACE_POINTS +#include "bpf_testmod-events.h" + +#define CONNECT_TIMEOUT_SEC 1 + +typedef int (*func_proto_typedef)(long); +typedef int (*func_proto_typedef_nested1)(func_proto_typedef); +typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1); + +DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; +long bpf_testmod_test_struct_arg_result; +static DEFINE_MUTEX(sock_lock); +static struct socket *sock; + +struct bpf_testmod_struct_arg_1 { + int a; +}; +struct bpf_testmod_struct_arg_2 { + long a; + long b; +}; + +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + +struct bpf_testmod_struct_arg_4 { + u64 a; + int b; +}; + +struct bpf_testmod_struct_arg_5 { + char a; + short b; + int c; + long d; +}; + +__bpf_hook_start(); + +noinline int +bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { + bpf_testmod_test_struct_arg_result = a.a + a.b + b + c; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) { + bpf_testmod_test_struct_arg_result = a + b.a + b.b + c; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) { + bpf_testmod_test_struct_arg_result = a + b + c.a + c.b; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b, + int c, int d, struct bpf_testmod_struct_arg_2 e) { + bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_5(void) { + bpf_testmod_test_struct_arg_result = 1; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { + bpf_testmod_test_struct_arg_result = a->b[0]; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e, + struct bpf_testmod_struct_arg_4 f) +{ + bpf_testmod_test_struct_arg_result = a + (long)b + c + d + + (long)e + f.a + f.b; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, + struct bpf_testmod_struct_arg_4 f, int g) +{ + bpf_testmod_test_struct_arg_result = a + (long)b + c + d + + (long)e + f.a + f.b + g; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, + short g, struct bpf_testmod_struct_arg_5 h, long i) +{ + bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + + f + g + h.a + h.b + h.c + h.d + i; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { + bpf_testmod_test_struct_arg_result = a->a; + return bpf_testmod_test_struct_arg_result; +} + +__weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) +{ +} + +__bpf_kfunc void +bpf_testmod_test_mod_kfunc(int i) +{ + *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; +} + +__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) +{ + it->cnt = cnt; + + if (cnt < 0) + return -EINVAL; + + it->value = value; + + return 0; +} + +__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) +{ + if (it->cnt <= 0) + return NULL; + + it->cnt--; + + return &it->value; +} + +__bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter) +{ + if (it__iter->cnt < 0) + return 0; + + return val + it__iter->value; +} + +__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) +{ + it->cnt = 0; +} + +__bpf_kfunc void bpf_kfunc_common_test(void) +{ +} + +__bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, + struct bpf_dynptr *ptr__nullable) +{ +} + +__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) +{ + return NULL; +} + +__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) +{ + return NULL; +} + +__bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr) +{ +} + +__bpf_kfunc struct bpf_testmod_ctx * +bpf_testmod_ctx_create(int *err) +{ + struct bpf_testmod_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) { + *err = -ENOMEM; + return NULL; + } + refcount_set(&ctx->usage, 1); + + return ctx; +} + +static void testmod_free_cb(struct rcu_head *head) +{ + struct bpf_testmod_ctx *ctx; + + ctx = container_of(head, struct bpf_testmod_ctx, rcu); + kfree(ctx); +} + +__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) +{ + if (!ctx) + return; + if (refcount_dec_and_test(&ctx->usage)) + call_rcu(&ctx->rcu, testmod_free_cb); +} + +static struct bpf_testmod_ops3 *st_ops3; + +static int bpf_testmod_test_3(void) +{ + return 0; +} + +static int bpf_testmod_test_4(void) +{ + return 0; +} + +static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { + .test_1 = bpf_testmod_test_3, + .test_2 = bpf_testmod_test_4, +}; + +static void bpf_testmod_test_struct_ops3(void) +{ + if (st_ops3) + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_1(void) +{ + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_2(void) +{ + st_ops3->test_2(); +} + +struct bpf_testmod_btf_type_tag_1 { + int a; +}; + +struct bpf_testmod_btf_type_tag_2 { + struct bpf_testmod_btf_type_tag_1 __user *p; +}; + +struct bpf_testmod_btf_type_tag_3 { + struct bpf_testmod_btf_type_tag_1 __percpu *p; +}; + +noinline int +bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { + BTF_TYPE_EMIT(func_proto_typedef); + BTF_TYPE_EMIT(func_proto_typedef_nested1); + BTF_TYPE_EMIT(func_proto_typedef_nested2); + return arg->a; +} + +noinline int +bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { + return arg->p->a; +} + +noinline int +bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) { + return arg->a; +} + +noinline int +bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { + return arg->p->a; +} + +noinline int bpf_testmod_loop_test(int n) +{ + /* Make sum volatile, so smart compilers, such as clang, will not + * optimize the code by removing the loop. + */ + volatile int sum = 0; + int i; + + /* the primary goal of this test is to test LBR. Create a lot of + * branches in the function, so we can catch it easily. + */ + for (i = 0; i < n; i++) + sum += i; + return sum; +} + +__weak noinline struct file *bpf_testmod_return_ptr(int arg) +{ + static struct file f = {}; + + switch (arg) { + case 1: return (void *)EINVAL; /* user addr */ + case 2: return (void *)0xcafe4a11; /* user addr */ + case 3: return (void *)-EINVAL; /* canonical, but invalid */ + case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ + case 5: return (void *)~(1ull << 30); /* trigger extable */ + case 6: return &f; /* valid addr */ + case 7: return (void *)((long)&f | 1); /* kernel tricks */ +#ifdef CONFIG_X86_64 + case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */ +#endif + default: return NULL; + } +} + +noinline int bpf_testmod_fentry_test1(int a) +{ + return a + 1; +} + +noinline int bpf_testmod_fentry_test2(int a, u64 b) +{ + return a + b; +} + +noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) +{ + return a + b + c; +} + +noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d, + void *e, char f, int g) +{ + return a + (long)b + c + d + (long)e + f + g; +} + +noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d, + void *e, char f, int g, + unsigned int h, long i, __u64 j, + unsigned long k) +{ + return a + (long)b + c + d + (long)e + f + g + h + i + j + k; +} + +int bpf_testmod_fentry_ok; + +noinline ssize_t +bpf_testmod_test_read(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) +{ + struct bpf_testmod_test_read_ctx ctx = { + .buf = buf, + .off = off, + .len = len, + }; + struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; + struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; + struct bpf_testmod_struct_arg_3 *struct_arg3; + struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; + struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; + int i = 1; + + while (bpf_testmod_return_ptr(i)) + i++; + + (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4); + (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4); + (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2); + (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); + (void)bpf_testmod_test_struct_arg_5(); + (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19, + (void *)20, struct_arg4); + (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, + (void *)20, struct_arg4, 23); + (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, + 21, 22, struct_arg5, 27); + + (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); + + (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); + + bpf_testmod_test_struct_ops3(); + + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + + sizeof(int)), GFP_KERNEL); + if (struct_arg3 != NULL) { + struct_arg3->b[0] = 1; + (void)bpf_testmod_test_struct_arg_6(struct_arg3); + kfree(struct_arg3); + } + + /* This is always true. Use the check to make sure the compiler + * doesn't remove bpf_testmod_loop_test. + */ + if (bpf_testmod_loop_test(101) > 100) + trace_bpf_testmod_test_read(current, &ctx); + + trace_bpf_testmod_test_nullable_bare_tp(NULL); + + /* Magic number to enable writable tp */ + if (len == 64) { + struct bpf_testmod_test_writable_ctx writable = { + .val = 1024, + }; + trace_bpf_testmod_test_writable_bare_tp(&writable); + if (writable.early_ret) + return snprintf(buf, len, "%d\n", writable.val); + } + + if (bpf_testmod_fentry_test1(1) != 2 || + bpf_testmod_fentry_test2(2, 3) != 5 || + bpf_testmod_fentry_test3(4, 5, 6) != 15 || + bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20, + 21, 22) != 133 || + bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, + 21, 22, 23, 24, 25, 26) != 231) + goto out; + + bpf_testmod_fentry_ok = 1; +out: + return -EIO; /* always fail */ +} +EXPORT_SYMBOL(bpf_testmod_test_read); +ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO); + +noinline ssize_t +bpf_testmod_test_write(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) +{ + struct bpf_testmod_test_write_ctx ctx = { + .buf = buf, + .off = off, + .len = len, + }; + + trace_bpf_testmod_test_write_bare_tp(current, &ctx); + + return -EIO; /* always fail */ +} +EXPORT_SYMBOL(bpf_testmod_test_write); +ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO); + +noinline int bpf_fentry_shadow_test(int a) +{ + return a + 2; +} +EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); + +__bpf_hook_end(); + +static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { + .attr = { .name = "bpf_testmod", .mode = 0666, }, + .read = bpf_testmod_test_read, + .write = bpf_testmod_test_write, +}; + +/* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, + * please see test_uretprobe_regs_change test + */ +#ifdef __x86_64__ + +static int +uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, + struct pt_regs *regs, __u64 *data) + +{ + regs->ax = 0x12345678deadbeef; + regs->cx = 0x87654321feebdaed; + regs->r11 = (u64) -1; + return true; +} + +struct testmod_uprobe { + struct path path; + struct uprobe *uprobe; + struct uprobe_consumer consumer; +}; + +static DEFINE_MUTEX(testmod_uprobe_mutex); + +static struct testmod_uprobe uprobe = { + .consumer.ret_handler = uprobe_ret_handler, +}; + +static int testmod_register_uprobe(loff_t offset) +{ + int err = -EBUSY; + + if (uprobe.uprobe) + return -EBUSY; + + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.uprobe) + goto out; + + err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); + if (err) + goto out; + + uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry), + offset, 0, &uprobe.consumer); + if (IS_ERR(uprobe.uprobe)) { + err = PTR_ERR(uprobe.uprobe); + path_put(&uprobe.path); + uprobe.uprobe = NULL; + } +out: + mutex_unlock(&testmod_uprobe_mutex); + return err; +} + +static void testmod_unregister_uprobe(void) +{ + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.uprobe) { + uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer); + uprobe_unregister_sync(); + path_put(&uprobe.path); + uprobe.uprobe = NULL; + } + + mutex_unlock(&testmod_uprobe_mutex); +} + +static ssize_t +bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) +{ + unsigned long offset = 0; + int err = 0; + + if (kstrtoul(buf, 0, &offset)) + return -EINVAL; + + if (offset) + err = testmod_register_uprobe(offset); + else + testmod_unregister_uprobe(); + + return err ?: strlen(buf); +} + +static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { + .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, + .write = bpf_testmod_uprobe_write, +}; + +static int register_bpf_testmod_uprobe(void) +{ + return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +static void unregister_bpf_testmod_uprobe(void) +{ + testmod_unregister_uprobe(); + sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +#else +static int register_bpf_testmod_uprobe(void) +{ + return 0; +} + +static void unregister_bpf_testmod_uprobe(void) { } +#endif + +BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) +BTF_ID_FLAGS(func, bpf_kfunc_common_test) +BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) +BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) +BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) +BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) +BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) +BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) + +BTF_ID_LIST(bpf_testmod_dtor_ids) +BTF_ID(struct, bpf_testmod_ctx) +BTF_ID(func, bpf_testmod_ctx_release) + +static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_testmod_common_kfunc_ids, +}; + +__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) +{ + return a + b + c + d; +} + +__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) +{ + return a + b; +} + +__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) +{ + return sk; +} + +__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) +{ + /* Provoke the compiler to assume that the caller has sign-extended a, + * b and c on platforms where this is required (e.g. s390x). + */ + return (long)a + (long)b + (long)c + d; +} + +static struct prog_test_ref_kfunc prog_test_struct = { + .a = 42, + .b = 108, + .next = &prog_test_struct, + .cnt = REFCOUNT_INIT(1), +}; + +__bpf_kfunc struct prog_test_ref_kfunc * +bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) +{ + refcount_inc(&prog_test_struct.cnt); + return &prog_test_struct; +} + +__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) +{ + WARN_ON_ONCE(1); +} + +__bpf_kfunc struct prog_test_member * +bpf_kfunc_call_memb_acquire(void) +{ + WARN_ON_ONCE(1); + return NULL; +} + +__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) +{ + WARN_ON_ONCE(1); +} + +static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) +{ + if (size > 2 * sizeof(int)) + return NULL; + + return (int *)p; +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, + const int rdwr_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +/* the next 2 ones can't be really used for testing expect to ensure + * that the verifier rejects the call. + * Acquire functions must return struct pointers, so these ones are + * failing. + */ +__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) +{ + /* p != NULL, but p->cnt could be 0 */ +} + +__bpf_kfunc void bpf_kfunc_call_test_destructive(void) +{ +} + +__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) +{ + return arg; +} + +__bpf_kfunc void bpf_kfunc_call_test_sleepable(void) +{ +} + +__bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args) +{ + int proto; + int err; + + mutex_lock(&sock_lock); + + if (sock) { + pr_err("%s called without releasing old sock", __func__); + err = -EPERM; + goto out; + } + + switch (args->af) { + case AF_INET: + case AF_INET6: + proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP; + break; + case AF_UNIX: + proto = PF_UNIX; + break; + default: + pr_err("invalid address family %d\n", args->af); + err = -EINVAL; + goto out; + } + + err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type, + proto, &sock); + + if (!err) + /* Set timeout for call to kernel_connect() to prevent it from hanging, + * and consider the connection attempt failed if it returns + * -EINPROGRESS. + */ + sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ; +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc void bpf_kfunc_close_sock(void) +{ + mutex_lock(&sock_lock); + + if (sock) { + sock_release(sock); + sock = NULL; + } + + mutex_unlock(&sock_lock); +} + +__bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args) +{ + int err; + + if (args->addrlen > sizeof(args->addr)) + return -EINVAL; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_connect(sock, (struct sockaddr *)&args->addr, + args->addrlen, 0); +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args) +{ + int err; + + if (args->addrlen > sizeof(args->addr)) + return -EINVAL; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen); +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_kernel_listen(void) +{ + int err; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_listen(sock, 128); +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) +{ + struct msghdr msg = { + .msg_name = &args->addr.addr, + .msg_namelen = args->addr.addrlen, + }; + struct kvec iov; + int err; + + if (args->addr.addrlen > sizeof(args->addr.addr) || + args->msglen > sizeof(args->msg)) + return -EINVAL; + + iov.iov_base = args->msg; + iov.iov_len = args->msglen; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen); + args->addr.addrlen = msg.msg_namelen; +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) +{ + struct msghdr msg = { + .msg_name = &args->addr.addr, + .msg_namelen = args->addr.addrlen, + }; + struct kvec iov; + int err; + + if (args->addr.addrlen > sizeof(args->addr.addr) || + args->msglen > sizeof(args->msg)) + return -EINVAL; + + iov.iov_base = args->msg; + iov.iov_len = args->msglen; + + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen); + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = sock_sendmsg(sock, &msg); + args->addr.addrlen = msg.msg_namelen; +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) +{ + int err; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_getsockname(sock, (struct sockaddr *)&args->addr); + if (err < 0) + goto out; + + args->addrlen = err; + err = 0; +out: + mutex_unlock(&sock_lock); + + return err; +} + +__bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) +{ + int err; + + mutex_lock(&sock_lock); + + if (!sock) { + pr_err("%s called without initializing sock", __func__); + err = -EPERM; + goto out; + } + + err = kernel_getpeername(sock, (struct sockaddr *)&args->addr); + if (err < 0) + goto out; + + args->addrlen = err; + err = 0; +out: + mutex_unlock(&sock_lock); + + return err; +} + +static DEFINE_MUTEX(st_ops_mutex); +static struct bpf_testmod_st_ops *st_ops; + +__bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_prologue) + ret = st_ops->test_prologue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_epilogue) + ret = st_ops->test_epilogue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_pro_epilogue) + ret = st_ops->test_pro_epilogue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) +{ + args->a += 10; + return args->a; +} + +BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) +BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) +BTF_ID_FLAGS(func, bpf_kfunc_call_test1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test4) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) +BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) + +static int bpf_testmod_ops_init(struct btf *btf) +{ + return 0; +} + +static bool bpf_testmod_ops_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_testmod_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) { + /* For data fields, this function has to copy it and return + * 1 to indicate that the data has been handled by the + * struct_ops type, or the verifier will reject the map if + * the value of the data field is not zero. + */ + ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; + return 1; + } + return 0; +} + +static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_testmod_check_kfunc_ids, +}; + +static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { + .get_func_proto = bpf_base_func_proto, + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + +static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + +static int bpf_dummy_reg(void *kdata, struct bpf_link *link) +{ + struct bpf_testmod_ops *ops = kdata; + + if (ops->test_1) + ops->test_1(); + /* Some test cases (ex. struct_ops_maybe_null) may not have test_2 + * initialized, so we need to check for NULL. + */ + if (ops->test_2) + ops->test_2(4, ops->data); + + return 0; +} + +static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) +{ +} + +static int bpf_testmod_test_1(void) +{ + return 0; +} + +static void bpf_testmod_test_2(int a, int b) +{ +} + +static int bpf_testmod_tramp(int value) +{ + return 0; +} + +static int bpf_testmod_ops__test_maybe_null(int dummy, + struct task_struct *task__nullable) +{ + return 0; +} + +static int bpf_testmod_ops__test_refcounted(int dummy, + struct task_struct *task__ref) +{ + return 0; +} + +static struct task_struct * +bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref, + struct cgroup *cgrp) +{ + return NULL; +} + +static struct bpf_testmod_ops __bpf_testmod_ops = { + .test_1 = bpf_testmod_test_1, + .test_2 = bpf_testmod_test_2, + .test_maybe_null = bpf_testmod_ops__test_maybe_null, + .test_refcounted = bpf_testmod_ops__test_refcounted, + .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr, +}; + +struct bpf_struct_ops bpf_bpf_testmod_ops = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops, + .name = "bpf_testmod_ops", + .owner = THIS_MODULE, +}; + +static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) +{ + struct bpf_testmod_ops2 *ops = kdata; + + ops->test_1(); + return 0; +} + +static struct bpf_testmod_ops2 __bpf_testmod_ops2 = { + .test_1 = bpf_testmod_test_1, +}; + +struct bpf_struct_ops bpf_testmod_ops2 = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg2, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops2, + .name = "bpf_testmod_ops2", + .owner = THIS_MODULE, +}; + +static int st_ops3_reg(void *kdata, struct bpf_link *link) +{ + int err = 0; + + mutex_lock(&st_ops_mutex); + if (st_ops3) { + pr_err("st_ops has already been registered\n"); + err = -EEXIST; + goto unlock; + } + st_ops3 = kdata; + +unlock: + mutex_unlock(&st_ops_mutex); + return err; +} + +static void st_ops3_unreg(void *kdata, struct bpf_link *link) +{ + mutex_lock(&st_ops_mutex); + st_ops3 = NULL; + mutex_unlock(&st_ops_mutex); +} + +static void test_1_recursion_detected(struct bpf_prog *prog) +{ + struct bpf_prog_stats *stats; + + stats = this_cpu_ptr(prog->stats); + printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", + u64_stats_read(&stats->misses)); +} + +static int st_ops3_check_member(const struct btf_type *t, + const struct btf_member *member, + const struct bpf_prog *prog) +{ + u32 moff = __btf_member_bit_offset(t, member) / 8; + + switch (moff) { + case offsetof(struct bpf_testmod_ops3, test_1): + prog->aux->priv_stack_requested = true; + prog->aux->recursion_detected = test_1_recursion_detected; + fallthrough; + default: + break; + } + return 0; +} + +struct bpf_struct_ops bpf_testmod_ops3 = { + .verifier_ops = &bpf_testmod_verifier_ops3, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = st_ops3_reg, + .unreg = st_ops3_unreg, + .check_member = st_ops3_check_member, + .cfi_stubs = &__bpf_testmod_ops3, + .name = "bpf_testmod_ops3", + .owner = THIS_MODULE, +}; + +static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) +{ + return 0; +} + +static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) +{ + return 0; +} + +static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) +{ + return 0; +} + +static int bpf_cgroup_from_id_id; +static int bpf_cgroup_release_id; + +static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + struct bpf_insn *insn = insn_buf; + + /* r8 = r1; // r8 will be "u64 *ctx". + * r1 = 0; + * r0 = bpf_cgroup_from_id(r1); + * if r0 != 0 goto pc+5; + * r6 = r8[0]; // r6 will be "struct st_ops *args". + * r7 = r6->a; + * r7 += 1000; + * r6->a = r7; + * goto pc+2; + * r1 = r0; + * bpf_cgroup_release(r1); + * r1 = r8; + */ + *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); + *insn++ = prog->insnsi[0]; + + return insn - insn_buf; +} + +static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, + s16 ctx_stack_off) +{ + struct bpf_insn *insn = insn_buf; + + /* r1 = 0; + * r6 = 0; + * r0 = bpf_cgroup_from_id(r1); + * if r0 != 0 goto pc+6; + * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" + * r1 = r1[0]; // r1 will be "struct st_ops *args" + * r6 = r1->a; + * r6 += 10000; + * r1->a = r6; + * goto pc+2 + * r1 = r0; + * bpf_cgroup_release(r1); + * r0 = r6; + * r0 *= 2; + * BPF_EXIT; + */ + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); + *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); + *insn++ = BPF_EXIT_INSN(); + + return insn - insn_buf; +} + +#define KFUNC_PRO_EPI_PREFIX "test_kfunc_" +static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + struct bpf_insn *insn = insn_buf; + + if (strcmp(prog->aux->attach_func_name, "test_prologue") && + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) + return 0; + + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) + return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); + + /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". + * r7 = r6->a; + * r7 += 1000; + * r6->a = r7; + */ + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); + *insn++ = prog->insnsi[0]; + + return insn - insn_buf; +} + +static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, + s16 ctx_stack_off) +{ + struct bpf_insn *insn = insn_buf; + + if (strcmp(prog->aux->attach_func_name, "test_epilogue") && + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) + return 0; + + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) + return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); + + /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" + * r1 = r1[0]; // r1 will be "struct st_ops *args" + * r6 = r1->a; + * r6 += 10000; + * r1->a = r6; + * r0 = r6; + * r0 *= 2; + * BPF_EXIT; + */ + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); + *insn++ = BPF_EXIT_INSN(); + + return insn - insn_buf; +} + +static int st_ops_btf_struct_access(struct bpf_verifier_log *log, + const struct bpf_reg_state *reg, + int off, int size) +{ + if (off < 0 || off + size > sizeof(struct st_ops_args)) + return -EACCES; + return 0; +} + +static const struct bpf_verifier_ops st_ops_verifier_ops = { + .is_valid_access = bpf_testmod_ops_is_valid_access, + .btf_struct_access = st_ops_btf_struct_access, + .gen_prologue = st_ops_gen_prologue, + .gen_epilogue = st_ops_gen_epilogue, + .get_func_proto = bpf_base_func_proto, +}; + +static struct bpf_testmod_st_ops st_ops_cfi_stubs = { + .test_prologue = bpf_test_mod_st_ops__test_prologue, + .test_epilogue = bpf_test_mod_st_ops__test_epilogue, + .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, +}; + +static int st_ops_reg(void *kdata, struct bpf_link *link) +{ + int err = 0; + + mutex_lock(&st_ops_mutex); + if (st_ops) { + pr_err("st_ops has already been registered\n"); + err = -EEXIST; + goto unlock; + } + st_ops = kdata; + +unlock: + mutex_unlock(&st_ops_mutex); + return err; +} + +static void st_ops_unreg(void *kdata, struct bpf_link *link) +{ + mutex_lock(&st_ops_mutex); + st_ops = NULL; + mutex_unlock(&st_ops_mutex); +} + +static int st_ops_init(struct btf *btf) +{ + struct btf *kfunc_btf; + + bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); + bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); + if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) + return -EINVAL; + + return 0; +} + +static int st_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} + +static struct bpf_struct_ops testmod_st_ops = { + .verifier_ops = &st_ops_verifier_ops, + .init = st_ops_init, + .init_member = st_ops_init_member, + .reg = st_ops_reg, + .unreg = st_ops_unreg, + .cfi_stubs = &st_ops_cfi_stubs, + .name = "bpf_testmod_st_ops", + .owner = THIS_MODULE, +}; + +extern int bpf_fentry_test1(int a); + +static int bpf_testmod_init(void) +{ + const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { + { + .btf_id = bpf_testmod_dtor_ids[0], + .kfunc_btf_id = bpf_testmod_dtor_ids[1] + }, + }; + void **tramp; + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); + ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); + ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, + ARRAY_SIZE(bpf_testmod_dtors), + THIS_MODULE); + if (ret < 0) + return ret; + if (bpf_fentry_test1(0) < 0) + return -EINVAL; + sock = NULL; + mutex_init(&sock_lock); + ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + if (ret < 0) + return ret; + ret = register_bpf_testmod_uprobe(); + if (ret < 0) + return ret; + + /* Ensure nothing is between tramp_1..tramp_40 */ + BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) != + offsetofend(struct bpf_testmod_ops, tramp_40)); + tramp = (void **)&__bpf_testmod_ops.tramp_1; + while (tramp <= (void **)&__bpf_testmod_ops.tramp_40) + *tramp++ = bpf_testmod_tramp; + + return 0; +} + +static void bpf_testmod_exit(void) +{ + /* Need to wait for all references to be dropped because + * bpf_kfunc_call_test_release() which currently resides in kernel can + * be called after bpf_testmod is unloaded. Once release function is + * moved into the module this wait can be removed. + */ + while (refcount_read(&prog_test_struct.cnt) > 1) + msleep(20); + + bpf_kfunc_close_sock(); + sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + unregister_bpf_testmod_uprobe(); +} + +module_init(bpf_testmod_init); +module_exit(bpf_testmod_exit); + +MODULE_AUTHOR("Andrii Nakryiko"); +MODULE_DESCRIPTION("BPF selftests module"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h new file mode 100644 index 000000000000..c9fab51f16e2 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +#ifndef _BPF_TESTMOD_H +#define _BPF_TESTMOD_H + +#include <linux/types.h> + +struct task_struct; +struct cgroup; + +struct bpf_testmod_test_read_ctx { + char *buf; + loff_t off; + size_t len; +}; + +struct bpf_testmod_test_write_ctx { + char *buf; + loff_t off; + size_t len; +}; + +struct bpf_testmod_test_writable_ctx { + bool early_ret; + int val; +}; + +/* BPF iter that returns *value* *n* times in a row */ +struct bpf_iter_testmod_seq { + s64 value; + int cnt; +}; + +struct bpf_testmod_ops { + int (*test_1)(void); + void (*test_2)(int a, int b); + /* Used to test nullable arguments. */ + int (*test_maybe_null)(int dummy, struct task_struct *task); + int (*unsupported_ops)(void); + /* Used to test ref_acquired arguments. */ + int (*test_refcounted)(int dummy, struct task_struct *task); + /* Used to test returning referenced kptr. */ + struct task_struct *(*test_return_ref_kptr)(int dummy, struct task_struct *task, + struct cgroup *cgrp); + + /* The following fields are used to test shadow copies. */ + char onebyte; + struct { + int a; + int b; + } unsupported; + int data; + + /* The following pointers are used to test the maps having multiple + * pages of trampolines. + */ + int (*tramp_1)(int value); + int (*tramp_2)(int value); + int (*tramp_3)(int value); + int (*tramp_4)(int value); + int (*tramp_5)(int value); + int (*tramp_6)(int value); + int (*tramp_7)(int value); + int (*tramp_8)(int value); + int (*tramp_9)(int value); + int (*tramp_10)(int value); + int (*tramp_11)(int value); + int (*tramp_12)(int value); + int (*tramp_13)(int value); + int (*tramp_14)(int value); + int (*tramp_15)(int value); + int (*tramp_16)(int value); + int (*tramp_17)(int value); + int (*tramp_18)(int value); + int (*tramp_19)(int value); + int (*tramp_20)(int value); + int (*tramp_21)(int value); + int (*tramp_22)(int value); + int (*tramp_23)(int value); + int (*tramp_24)(int value); + int (*tramp_25)(int value); + int (*tramp_26)(int value); + int (*tramp_27)(int value); + int (*tramp_28)(int value); + int (*tramp_29)(int value); + int (*tramp_30)(int value); + int (*tramp_31)(int value); + int (*tramp_32)(int value); + int (*tramp_33)(int value); + int (*tramp_34)(int value); + int (*tramp_35)(int value); + int (*tramp_36)(int value); + int (*tramp_37)(int value); + int (*tramp_38)(int value); + int (*tramp_39)(int value); + int (*tramp_40)(int value); +}; + +struct bpf_testmod_ops2 { + int (*test_1)(void); +}; + +struct bpf_testmod_ops3 { + int (*test_1)(void); + int (*test_2)(void); +}; + +struct st_ops_args { + u64 a; +}; + +struct bpf_testmod_st_ops { + int (*test_prologue)(struct st_ops_args *args); + int (*test_epilogue)(struct st_ops_args *args); + int (*test_pro_epilogue)(struct st_ops_args *args); + struct module *owner; +}; + +#endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h new file mode 100644 index 000000000000..b58817938deb --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BPF_TESTMOD_KFUNC_H +#define _BPF_TESTMOD_KFUNC_H + +#ifndef __KERNEL__ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#else +#define __ksym +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; +#endif + +struct prog_test_pass1 { + int x0; + struct { + int x1; + struct { + int x2; + struct { + int x3; + }; + }; + }; +}; + +struct prog_test_pass2 { + int len; + short arr1[4]; + struct { + char arr2[4]; + unsigned long arr3[8]; + } x; +}; + +struct prog_test_fail1 { + void *p; + int x; +}; + +struct prog_test_fail2 { + int x8; + struct prog_test_pass1 x; +}; + +struct prog_test_fail3 { + int len; + char arr1[2]; + char arr2[]; +}; + +struct init_sock_args { + int af; + int type; +}; + +struct addr_args { + char addr[sizeof(struct __kernel_sockaddr_storage)]; + int addrlen; +}; + +struct sendmsg_args { + struct addr_args addr; + char msg[10]; + int msglen; +}; + +struct bpf_testmod_ctx { + struct callback_head rcu; + refcount_t usage; +}; + +struct prog_test_ref_kfunc * +bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; +void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; + +void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; +int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +void bpf_kfunc_call_int_mem_release(int *p) __ksym; + +/* The bpf_kfunc_call_test_static_unused_arg is defined as static, + * but bpf program compilation needs to see it as global symbol. + */ +#ifndef __KERNEL__ +u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym; +#endif + +void bpf_testmod_test_mod_kfunc(int i) __ksym; + +__u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, + __u32 c, __u64 d) __ksym; +int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; +struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym; +long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym; + +void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym; +void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; +void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; +void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; + +void bpf_kfunc_call_test_destructive(void) __ksym; +void bpf_kfunc_call_test_sleepable(void) __ksym; + +void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p); +struct prog_test_member *bpf_kfunc_call_memb_acquire(void); +void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p); +void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p); +void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p); +void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p); +void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len); + +void bpf_kfunc_common_test(void) __ksym; + +int bpf_kfunc_init_sock(struct init_sock_args *args) __ksym; +void bpf_kfunc_close_sock(void) __ksym; +int bpf_kfunc_call_kernel_connect(struct addr_args *args) __ksym; +int bpf_kfunc_call_kernel_bind(struct addr_args *args) __ksym; +int bpf_kfunc_call_kernel_listen(void) __ksym; +int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) __ksym; +int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym; +int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym; +int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym; + +void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym; + +struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym; +void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym; + +struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) __ksym; +struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym; +void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym; + +struct st_ops_args; +int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym; + +void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) __ksym; +void bpf_kfunc_trusted_task_test(struct task_struct *ptr) __ksym; +void bpf_kfunc_trusted_num_test(int *ptr) __ksym; +void bpf_kfunc_rcu_task_test(struct task_struct *ptr) __ksym; + +#endif /* _BPF_TESTMOD_KFUNC_H */ |