diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
35 files changed, 2407 insertions, 88 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index d3c1217ba79a..38a57a2e70db 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -86,6 +86,10 @@ #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 +#ifndef __used +#define __used __attribute__((used)) +#endif + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 50f95ec61165..76d661b20e87 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -2,6 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr *ptr; @@ -14,9 +15,6 @@ struct { __uint(max_entries, 16); } array_map SEC(".maps"); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - static __noinline int cb1(void *map, void *key, void *value, void *ctx) { void *p = *(void **)ctx; diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c index b2a409e6382a..932b8ecd4ae3 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c @@ -12,6 +12,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; __u32 ctx_retval_value = 0; +__u32 page_size = 0; SEC("cgroup/getsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -20,6 +21,10 @@ int get_retval(struct bpf_sockopt *ctx) ctx_retval_value = ctx->retval; __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -31,6 +36,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -41,5 +50,9 @@ int clear_retval(struct bpf_sockopt *ctx) ctx->retval = 0; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c index d6e5903e06ba..b7fa8804e19d 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c @@ -11,6 +11,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; +__u32 page_size = 0; SEC("cgroup/setsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -18,6 +19,10 @@ int get_retval(struct bpf_sockopt *ctx) retval_value = bpf_get_retval(); __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -29,6 +34,10 @@ int set_eunatch(struct bpf_sockopt *ctx) if (bpf_set_retval(-EUNATCH)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -40,6 +49,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -48,5 +61,9 @@ int legacy_eperm(struct bpf_sockopt *ctx) { __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 0c5b785a93e4..b15c588ace15 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -28,6 +28,8 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +u32 bpf_cpumask_first_and(const struct cpumask *src1, + const struct cpumask *src2) __ksym; void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; @@ -50,8 +52,8 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; -u32 bpf_cpumask_any(const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; +u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym; +u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 2fcdd7f68ac7..674a63424dee 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -5,6 +5,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" #include "cpumask_common.h" char _license[] SEC("license") = "GPL"; @@ -175,6 +176,38 @@ release_exit: } SEC("tp_btf/task_newtask") +int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2; + u32 first; + + if (!is_test_task()) + return 0; + + mask1 = create_cpumask(); + if (!mask1) + return 0; + + mask2 = create_cpumask(); + if (!mask2) + goto release_exit; + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + + first = bpf_cpumask_first_and(cast(mask1), cast(mask2)); + if (first <= 1) + err = 3; + +release_exit: + if (mask1) + bpf_cpumask_release(mask1); + if (mask2) + bpf_cpumask_release(mask2); + return 0; +} + +SEC("tp_btf/task_newtask") int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -311,13 +344,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) bpf_cpumask_set_cpu(1, mask2); bpf_cpumask_or(dst1, cast(mask1), cast(mask2)); - cpu = bpf_cpumask_any(cast(mask1)); + cpu = bpf_cpumask_any_distribute(cast(mask1)); if (cpu != 0) { err = 6; goto release_exit; } - cpu = bpf_cpumask_any(cast(dst2)); + cpu = bpf_cpumask_any_distribute(cast(dst2)); if (cpu < nr_cpus) { err = 7; goto release_exit; @@ -329,13 +362,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) goto release_exit; } - cpu = bpf_cpumask_any(cast(dst2)); + cpu = bpf_cpumask_any_distribute(cast(dst2)); if (cpu > 1) { err = 9; goto release_exit; } - cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2)); + cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2)); if (cpu < nr_cpus) { err = 10; goto release_exit; @@ -426,3 +459,26 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) return 0; } + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2; + + mask1 = bpf_cpumask_create(); + mask2 = bpf_cpumask_create(); + + if (!mask1 || !mask2) + goto free_masks_return; + + bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1); + bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2); + +free_masks_return: + if (mask1) + bpf_cpumask_release(mask1); + if (mask2) + bpf_cpumask_release(mask2); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 759eb5c245cd..7ce7e827d5f0 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -3,6 +3,7 @@ #include <errno.h> #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <linux/if_ether.h> @@ -1378,3 +1379,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) return 0; } + +/* bpf_dynptr_adjust can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_adjust_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_adjust(&ptr, 1, 2); + + return 0; +} + +/* bpf_dynptr_is_null can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_null_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_null(&ptr); + + return 0; +} + +/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_rdonly_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_rdonly(&ptr); + + return 0; +} + +/* bpf_dynptr_size can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_size_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_size(&ptr); + + return 0; +} + +/* Only initialized dynptrs can be cloned */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int clone_invalid1(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &ptr2); + + return 0; +} + +/* Can't overwrite an existing dynptr when cloning */ +SEC("?xdp") +__failure __msg("cannot overwrite referenced dynptr") +int clone_invalid2(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr clone; + + bpf_dynptr_from_xdp(xdp, 0, &ptr1); + + bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone); + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its clones */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate1(void *ctx) +{ + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its parent */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate2(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its siblings */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate3(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its clones + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate4(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_data(&clone, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its parent + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate5(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + data = bpf_dynptr_data(&ptr, 0, sizeof(val)); + if (!data) + return 0; + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its sibling + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate6(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + data = bpf_dynptr_data(&clone1, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A skb clone's data slices should be invalid anytime packet data changes */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int clone_skb_packet_data(struct __sk_buff *skb) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + __u32 *data; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A xdp clone's data slices should be invalid anytime packet data changes */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int clone_xdp_packet_data(struct xdp_md *xdp) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + __u32 *data; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Buffers that are provided must be sufficiently long */ +SEC("?cgroup_skb/egress") +__failure __msg("memory, len pair leads to invalid memory access") +int test_dynptr_skb_small_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char buffer[8] = {}; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, buffer, 9); + + return !!data; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index b2fa6c47ecc0..5985920d162e 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Facebook */ #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -207,3 +208,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb) return 1; } + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust(void *ctx) +{ + struct bpf_dynptr ptr; + __u32 bytes = 64; + __u32 off = 10; + __u32 trim = 15; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr); + if (err) { + err = 1; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes) { + err = 2; + goto done; + } + + /* Advance the dynptr by off */ + err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr)); + if (err) { + err = 3; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes - off) { + err = 4; + goto done; + } + + /* Trim the dynptr */ + err = bpf_dynptr_adjust(&ptr, off, 15); + if (err) { + err = 5; + goto done; + } + + /* Check that the size was adjusted correctly */ + if (bpf_dynptr_size(&ptr) != trim - off) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust_err(void *ctx) +{ + char write_data[45] = "hello there, world!!"; + struct bpf_dynptr ptr; + __u32 size = 64; + __u32 off = 20; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* Check that start can't be greater than end */ + if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) { + err = 2; + goto done; + } + + /* Check that start can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) { + err = 3; + goto done; + } + + /* Check that end can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) { + err = 4; + goto done; + } + + if (bpf_dynptr_adjust(&ptr, off, size)) { + err = 5; + goto done; + } + + /* Check that you can't write more bytes than available into the dynptr + * after you've adjusted it + */ + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 6; + goto done; + } + + /* Check that even after adjusting, submitting/discarding + * a ringbuf dynptr works + */ + bpf_ringbuf_submit_dynptr(&ptr, 0); + return 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_zero_size_dynptr(void *ctx) +{ + char write_data = 'x', read_data; + struct bpf_dynptr ptr; + __u32 size = 64; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* After this, the dynptr has a size of 0 */ + if (bpf_dynptr_adjust(&ptr, size, size)) { + err = 2; + goto done; + } + + /* Test that reading + writing non-zero bytes is not ok */ + if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) { + err = 3; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 4; + goto done; + } + + /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */ + if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) { + err = 5; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) { + err = 6; + goto done; + } + + err = 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_dynptr_is_null(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u64 size = 4; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) { + err = 1; + goto exit_early; + } + + /* Test that the invalid dynptr is null */ + if (!bpf_dynptr_is_null(&ptr1)) { + err = 2; + goto exit_early; + } + + /* Get a valid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) { + err = 3; + goto exit; + } + + /* Test that the valid dynptr is not null */ + if (bpf_dynptr_is_null(&ptr2)) { + err = 4; + goto exit; + } + +exit: + bpf_ringbuf_discard_dynptr(&ptr2, 0); +exit_early: + bpf_ringbuf_discard_dynptr(&ptr1, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_is_rdonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + struct bpf_dynptr ptr3; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) { + err = 1; + return 0; + } + + /* Test that an invalid dynptr is_rdonly returns false */ + if (bpf_dynptr_is_rdonly(&ptr1)) { + err = 2; + return 0; + } + + /* Get a read-only dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr2)) { + err = 3; + return 0; + } + + /* Test that the dynptr is read-only */ + if (!bpf_dynptr_is_rdonly(&ptr2)) { + err = 4; + return 0; + } + + /* Get a read-writeable dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) { + err = 5; + goto done; + } + + /* Test that the dynptr is read-only */ + if (bpf_dynptr_is_rdonly(&ptr3)) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr3, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_clone(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u32 off = 2, size; + + /* Get a dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr1)) { + err = 1; + return 0; + } + + if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) { + err = 2; + return 0; + } + + /* Clone the dynptr */ + if (bpf_dynptr_clone(&ptr1, &ptr2)) { + err = 3; + return 0; + } + + size = bpf_dynptr_size(&ptr1); + + /* Check that the clone has the same size and rd-only */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 4; + return 0; + } + + if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) { + err = 5; + return 0; + } + + /* Advance and trim the original dynptr */ + bpf_dynptr_adjust(&ptr1, 5, 5); + + /* Check that only original dynptr was affected, and the clone wasn't */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 6; + return 0; + } + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_no_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 1); + + return !!data; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_strcmp(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 10); + if (data) { + bpf_strncmp(data, 10, "foo"); + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index be16143ae292..6b9b3c56f009 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx) return sum; } -#define ARR_SZ 16 - -static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) +static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) { - int *t; - __u64 i; + int *t, i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; arr[i] = i * mul; } } -static __noinline int sum(struct bpf_iter_num *it, int *arr) +static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) { - int *t, sum = 0;; - __u64 i; + int *t, i, sum = 0;; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; sum += arr[i]; } @@ -685,7 +681,7 @@ SEC("raw_tp") __success int iter_pass_iter_ptr_to_subprog(const void *ctx) { - int arr1[ARR_SZ], arr2[ARR_SZ]; + int arr1[16], arr2[32]; struct bpf_iter_num it; int n, sum1, sum2; @@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) /* fill arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - fill(&it, arr1, 2); + fill(&it, arr1, n, 2); bpf_iter_num_destroy(&it); /* fill arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - fill(&it, arr2, 10); + fill(&it, arr2, n, 10); bpf_iter_num_destroy(&it); /* sum arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - sum1 = sum(&it, arr1); + sum1 = sum(&it, arr1, n); bpf_iter_num_destroy(&it); /* sum arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - sum2 = sum(&it, arr2); + sum2 = sum(&it, arr2, n); bpf_iter_num_destroy(&it); bpf_printk("sum1=%d, sum2=%d", sum1, sum2); diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index 13f00ca2ed0a..f9789e668297 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -3,13 +3,11 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - SEC("tc") int test_jit_probe_mem(struct __sk_buff *ctx) { diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c index 767472bc5a97..7632d9ecb253 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern void bpf_kfunc_call_test_destructive(void) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_destructive_test(void) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index b98313d391c6..4b0b7b79cdfb 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -2,14 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; -extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; -extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern void bpf_kfunc_call_int_mem_release(int *p) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct syscall_test_args { __u8 data[16]; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c index 4e8fed75a4e0..d532af07decf 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern void bpf_testmod_test_mod_kfunc(int i) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_fail(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 7daa8f5720b9..cf68d1e48a0f 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -2,22 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym; -extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; -extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, - __u32 c, __u64 d) __ksym; - -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym; -extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; -extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; -extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; -extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; -extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; -extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_test4(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c index c1fdecabeabf..2380c75e74ce 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c @@ -1,13 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> -#include "bpf_tcp_helpers.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" extern const int bpf_prog_active __ksym; -extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, - __u32 c, __u64 d) __ksym; -extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym; int active_res = -1; int sk_state_res = -1; @@ -28,7 +23,7 @@ int __noinline f1(struct __sk_buff *skb) if (active) active_res = *active; - sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->sk_state; + sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state; return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4); } diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index 0ef286da092b..06838083079c 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -5,7 +5,8 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> -#include "bpf_experimental.h" +#include "../bpf_experimental.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct node_data { long key; @@ -32,8 +33,6 @@ struct map_value { */ struct node_data *just_here_because_btf_bug; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, int); diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index d7150041e5d1..da30f0d59364 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -2,6 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; @@ -114,10 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps); DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps); DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; - #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) static void test_kptr_unref(struct map_value *v) diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index da8c724f839b..450bb373b179 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { char buf[8]; @@ -19,9 +20,6 @@ struct array_map { __uint(max_entries, 1); } array_map SEC(".maps"); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - SEC("?tc") __failure __msg("kptr access size must be BPF_DW") int size_not_bpf_dw(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 1d348a225140..a3da610b1e6b 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -375,6 +375,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx) bpf_rbtree_add(&aroot, &n->node, less_a); m = bpf_refcount_acquire(n); bpf_spin_unlock(&alock); + if (!m) + return 2; m->key = 2; bpf_obj_drop(m); diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index efcb308f80ad..0b09e5c915b1 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -29,7 +29,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=21") +__failure __msg("Unreleased reference id=4 alloc_insn=21") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -43,6 +43,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx) /* m becomes an owning ref but is never drop'd or added to a tree */ m = bpf_refcount_acquire(n); bpf_spin_unlock(&glock); + if (!m) + return 2; m->key = 2; return 0; diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c new file mode 100644 index 000000000000..9e0bf7a54cec --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include "bpf_tracing_net.h" + +__be16 serv_port = 0; + +int bpf_sock_destroy(struct sock_common *sk) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} tcp_conn_sockets SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} udp_conn_sockets SEC(".maps"); + +SEC("cgroup/connect6") +int sock_connect(struct bpf_sock_addr *ctx) +{ + __u64 sock_cookie = 0; + int key = 0; + __u32 keyc = 0; + + if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) + return 1; + + sock_cookie = bpf_get_socket_cookie(ctx); + if (ctx->protocol == IPPROTO_TCP) + bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0); + else if (ctx->protocol == IPPROTO_UDP) + bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0); + else + return 1; + + return 1; +} + +SEC("iter/tcp") +int iter_tcp6_client(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + __u64 sock_cookie = 0; + __u64 *val; + int key = 0; + + if (!sk_common) + return 0; + + if (sk_common->skc_family != AF_INET6) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + val = bpf_map_lookup_elem(&tcp_conn_sockets, &key); + if (!val) + return 0; + /* Destroy connected client sockets. */ + if (sock_cookie == *val) + bpf_sock_destroy(sk_common); + + return 0; +} + +SEC("iter/tcp") +int iter_tcp6_server(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + const struct inet_connection_sock *icsk; + const struct inet_sock *inet; + struct tcp6_sock *tcp_sk; + __be16 srcp; + + if (!sk_common) + return 0; + + if (sk_common->skc_family != AF_INET6) + return 0; + + tcp_sk = bpf_skc_to_tcp6_sock(sk_common); + if (!tcp_sk) + return 0; + + icsk = &tcp_sk->tcp.inet_conn; + inet = &icsk->icsk_inet; + srcp = inet->inet_sport; + + /* Destroy server sockets. */ + if (srcp == serv_port) + bpf_sock_destroy(sk_common); + + return 0; +} + + +SEC("iter/udp") +int iter_udp6_client(struct bpf_iter__udp *ctx) +{ + struct udp_sock *udp_sk = ctx->udp_sk; + struct sock *sk = (struct sock *) udp_sk; + __u64 sock_cookie = 0, *val; + int key = 0; + + if (!sk) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk); + val = bpf_map_lookup_elem(&udp_conn_sockets, &key); + if (!val) + return 0; + /* Destroy connected client sockets. */ + if (sock_cookie == *val) + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + +SEC("iter/udp") +int iter_udp6_server(struct bpf_iter__udp *ctx) +{ + struct udp_sock *udp_sk = ctx->udp_sk; + struct sock *sk = (struct sock *) udp_sk; + struct inet_sock *inet; + __be16 srcp; + + if (!sk) + return 0; + + inet = &udp_sk->inet; + srcp = inet->inet_sport; + if (srcp == serv_port) + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c new file mode 100644 index 000000000000..dd6850b58e25 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +int bpf_sock_destroy(struct sock_common *sk) __ksym; + +SEC("tp_btf/tcp_destroy_sock") +__failure __msg("calling kernel function bpf_sock_destroy is not allowed") +int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk) +{ + /* should not load */ + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index 9fb241b97291..c8f59caa4639 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; #define CUSTOM_INHERIT2 1 #define CUSTOM_LISTENER 2 +__u32 page_size = 0; + struct sockopt_inherit { __u8 val; }; @@ -55,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -70,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -80,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -93,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = -1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c index 177a59069dae..96f29fce050b 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_multi.c +++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c @@ -5,6 +5,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/getsockopt") int _getsockopt_child(struct bpf_sockopt *ctx) { @@ -12,7 +14,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -26,6 +28,12 @@ int _getsockopt_child(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/getsockopt") @@ -35,7 +43,7 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -49,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -58,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -67,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c index 1bce83b6e3a7..dbe235ede7f3 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/setsockopt") int sockopt_qos_to_cc(struct bpf_sockopt *ctx) { @@ -19,7 +21,7 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) char cc_cubic[TCP_CA_NAME_MAX] = "cubic"; if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -36,4 +38,10 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) return 0; } return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index fe1df4cd206e..cb990a7d3d45 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -37,7 +37,7 @@ int _getsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -52,8 +52,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - ctx->optlen = 0; /* bypass optval>PAGE_SIZE */ - return 1; + goto out; } if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) { @@ -61,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) { @@ -69,7 +68,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) { @@ -85,7 +84,7 @@ int _getsockopt(struct bpf_sockopt *ctx) if (((struct tcp_zerocopy_receive *)optval)->address != 0) return 0; /* unexpected data */ - return 1; + goto out; } if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { @@ -129,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -142,7 +147,7 @@ int _setsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -224,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx) */ return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index b85fc8c423ba..17a9f59bf5f3 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -10,6 +10,8 @@ static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) { + asm volatile (""); + return skb->len; } diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c new file mode 100644 index 000000000000..2588f2384246 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +/* rodata section */ +const volatile pid_t pid; +const volatile size_t bss_array_len; +const volatile size_t data_array_len; + +/* bss section */ +int sum = 0; +int array[1]; + +/* custom data secton */ +int my_array[1] SEC(".data.custom"); + +/* custom data section which should NOT be resizable, + * since it contains a single var which is not an array + */ +int my_int SEC(".data.non_array"); + +/* custom data section which should NOT be resizable, + * since its last var is not an array + */ +int my_array_first[1] SEC(".data.array_not_last"); +int my_int_last SEC(".data.array_not_last"); + +SEC("tp/syscalls/sys_enter_getpid") +int bss_array_sum(void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + sum = 0; + + for (size_t i = 0; i < bss_array_len; ++i) + sum += array[i]; + + return 0; +} + +SEC("tp/syscalls/sys_enter_getuid") +int data_array_sum(void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + sum = 0; + + for (size_t i = 0; i < data_array_len; ++i) + sum += my_array[i]; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index bbad3c2d9aa5..f75e531bf36f 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -265,7 +265,10 @@ static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk) static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk) { - __u16 *half = (__u16 *)&sk->dst_port; + __u16 *half; + + asm volatile (""); + half = (__u16 *)&sk->dst_port; return half[0] == bpf_htons(0xcafe); } diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c new file mode 100644 index 000000000000..56cdc0a553f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +const volatile int local_pid; +const volatile __u64 cgid; +int remote_pid; + +SEC("tp_btf/task_newtask") +int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags) +{ + struct cgroup *cgrp = NULL; + struct task_struct *acquired; + + if (local_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + acquired = bpf_task_acquire(task); + if (!acquired) + return 0; + + if (local_pid == acquired->tgid) + goto out; + + cgrp = bpf_cgroup_from_id(cgid); + if (!cgrp) + goto out; + + if (bpf_task_under_cgroup(acquired, cgrp)) + remote_pid = acquired->tgid; + +out: + if (cgrp) + bpf_cgroup_release(cgrp); + bpf_task_release(acquired); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c index 25ee4a22e48d..78c368e71797 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Meta */ #include <stddef.h> #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <linux/if_ether.h> #include <linux/if_packet.h> diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c new file mode 100644 index 000000000000..13b29a7faa71 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -0,0 +1,659 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +/* Check that precision marks propagate through scalar IDs. + * Registers r{0,1,2} have the same scalar ID at the moment when r0 is + * marked to be precise, this mark is immediately propagated to r{1,2}. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_same_state(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state, but mark propagates through state / + * parent state boundary. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1") +__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10") +__msg("frame0: parent state regs=r0,r1,r2 stack=:") +__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: parent state regs=r0 stack=:") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_cross_state(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force checkpoint */ + "goto +0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state, but break one of the + * links, note that r1 is absent from regs=... in __msg below. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10") +__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0") +__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_same_state_broken_link(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* break link for r1, this is the only line that differs + * compared to the previous test + */ + "r1 = 0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state_broken_link, but with state / + * parent state boundary. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10") +__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0") +__msg("frame0: parent state regs=r0,r2 stack=:") +__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: parent state regs=r0 stack=:") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_cross_state_broken_link(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force checkpoint, although link between r1 and r{0,2} is + * broken by the next statement current precision tracking + * algorithm can't react to it and propagates mark for r1 to + * the parent state. + */ + "goto +0;" + /* break link for r1, this is the only line that differs + * compared to precision_cross_state() + */ + "r1 = 0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that precision marks propagate through scalar IDs. + * Use the same scalar ID in multiple stack frames, check that + * precision information is propagated up the call stack. + */ +SEC("socket") +__success __log_level(2) +__msg("11: (0f) r2 += r1") +/* Current state */ +__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1") +__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame2: parent state regs=r1 stack=") +/* frame1.r{6,7} are marked because mark_precise_scalar_ids() + * looks for all registers with frame2.r1.id in the current state + */ +__msg("frame1: parent state regs=r6,r7 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10") +__msg("frame2: regs=r1 stack= before 8: (85) call pc+1") +/* frame1.r1 is marked because of backtracking of call instruction */ +__msg("frame1: parent state regs=r1,r6,r7 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8") +__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1") +__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1") +__msg("frame1: parent state regs=r1 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6") +__msg("frame1: regs=r1 stack= before 4: (85) call pc+1") +__msg("frame0: parent state regs=r1,r6 stack=") +/* Parent state */ +__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4") +__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_many_frames(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r6.id */ + "r1 = r0;" + "r6 = r0;" + "call precision_many_frames__foo;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +static __naked __noinline __used +void precision_many_frames__foo(void) +{ + asm volatile ( + /* conflate one of the register numbers (r6) with outer frame, + * to verify that those are tracked independently + */ + "r6 = r1;" + "r7 = r1;" + "call precision_many_frames__bar;" + "exit" + ::: __clobber_all); +} + +static __naked __noinline __used +void precision_many_frames__bar(void) +{ + asm volatile ( + /* force r1 to be precise, this immediately marks: + * - bar frame r1 + * - foo frame r{1,6,7} + * - main frame r{1,6} + */ + "r2 = r10;" + "r2 += r1;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +/* Check that scalars with the same IDs are marked precise on stack as + * well as in registers. + */ +SEC("socket") +__success __log_level(2) +/* foo frame */ +__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10") +__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1") +__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame1: regs=r1 stack= before 4: (85) call pc+2") +/* main frame */ +__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_stack(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == fp[-8].id */ + "r1 = r0;" + "*(u64*)(r10 - 8) = r1;" + "call precision_stack__foo;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +static __naked __noinline __used +void precision_stack__foo(void) +{ + asm volatile ( + /* conflate one of the register numbers (r6) with outer frame, + * to verify that those are tracked independently + */ + "*(u64*)(r10 - 8) = r1;" + "*(u64*)(r10 - 16) = r1;" + /* force r1 to be precise, this immediately marks: + * - foo frame r1,fp{-8,-16} + * - main frame r1,fp{-8} + */ + "r2 = r10;" + "r2 += r1;" + "exit" + ::: __clobber_all); +} + +/* Use two separate scalar IDs to check that these are propagated + * independently. + */ +SEC("socket") +__success __log_level(2) +/* r{6,7} */ +__msg("11: (0f) r3 += r7") +__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10") +/* ... skip some insns ... */ +__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0") +__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0") +/* r{8,9} */ +__msg("12: (0f) r3 += r9") +__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7") +/* ... skip some insns ... */ +__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0") +__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_two_ids(void) +{ + asm volatile ( + /* r6 = random number up to 0xff + * r6.id == r7.id + */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r6 = r0;" + "r7 = r0;" + /* same, but for r{8,9} */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r8 = r0;" + "r9 = r0;" + /* clear r0 id */ + "r0 = 0;" + /* force checkpoint */ + "goto +0;" + "r3 = r10;" + /* force r7 to be precise, this also marks r6 */ + "r3 += r7;" + /* force r9 to be precise, this also marks r8 */ + "r3 += r9;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Verify that check_ids() is used by regsafe() for scalars. + * + * r9 = ... some pointer with range X ... + * r6 = ... unbound scalar ID=a ... + * r7 = ... unbound scalar ID=b ... + * if (r6 > r7) goto +1 + * r7 = r6 + * if (r7 > X) goto exit + * r9 += r6 + * ... access memory using r9 ... + * + * The memory access is safe only if r7 is bounded, + * which is true for one branch and not true for another. + */ +SEC("socket") +__failure __msg("register with unbounded min value") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_in_regsafe(void) +{ + asm volatile ( + /* Bump allocated stack */ + "r1 = 0;" + "*(u64*)(r10 - 8) = r1;" + /* r9 = pointer to stack */ + "r9 = r10;" + "r9 += -8;" + /* r7 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + /* r6 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + /* if r6 > r7 is an unpredictable jump */ + "if r6 > r7 goto l1_%=;" + "r7 = r6;" +"l1_%=:" + /* if r7 > 4 ...; transfers range to r6 on one execution path + * but does not transfer on another + */ + "if r7 > 4 goto l2_%=;" + /* Access memory at r9[r6], r6 is not always bounded */ + "r9 += r6;" + "r0 = *(u8*)(r9 + 0);" +"l2_%=:" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Similar to check_ids_in_regsafe. + * The l0 could be reached in two states: + * + * (1) r6{.id=A}, r7{.id=A}, r8{.id=B} + * (2) r6{.id=B}, r7{.id=A}, r8{.id=B} + * + * Where (2) is not safe, as "r7 > 4" check won't propagate range for it. + * This example would be considered safe without changes to + * mark_chain_precision() to track scalar values with equal IDs. + */ +SEC("socket") +__failure __msg("register with unbounded min value") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_in_regsafe_2(void) +{ + asm volatile ( + /* Bump allocated stack */ + "r1 = 0;" + "*(u64*)(r10 - 8) = r1;" + /* r9 = pointer to stack */ + "r9 = r10;" + "r9 += -8;" + /* r8 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r8 = r0;" + /* r7 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + /* r6 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + /* scratch .id from r0 */ + "r0 = 0;" + /* if r6 > r7 is an unpredictable jump */ + "if r6 > r7 goto l1_%=;" + /* tie r6 and r7 .id */ + "r6 = r7;" +"l0_%=:" + /* if r7 > 4 exit(0) */ + "if r7 > 4 goto l2_%=;" + /* Access memory at r9[r6] */ + "r9 += r6;" + "r0 = *(u8*)(r9 + 0);" +"l2_%=:" + "r0 = 0;" + "exit;" +"l1_%=:" + /* tie r6 and r8 .id */ + "r6 = r8;" + "goto l0_%=;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that scalar IDs *are not* generated on register to register + * assignments if source register is a constant. + * + * If such IDs *are* generated the 'l1' below would be reached in + * two states: + * + * (1) r1{.id=A}, r2{.id=A} + * (2) r1{.id=C}, r2{.id=C} + * + * Thus forcing 'if r1 == r2' verification twice. + */ +SEC("socket") +__success __log_level(2) +__msg("11: (1d) if r3 == r4 goto pc+0") +__msg("frame 0: propagating r3,r4") +__msg("11: safe") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void no_scalar_id_for_const(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + /* unpredictable jump */ + "if r0 > 7 goto l0_%=;" + /* possibly generate same scalar ids for r3 and r4 */ + "r1 = 0;" + "r1 = r1;" + "r3 = r1;" + "r4 = r1;" + "goto l1_%=;" +"l0_%=:" + /* possibly generate different scalar ids for r3 and r4 */ + "r1 = 0;" + "r2 = 0;" + "r3 = r1;" + "r4 = r2;" +"l1_%=:" + /* predictable jump, marks r3 and r4 precise */ + "if r3 == r4 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as no_scalar_id_for_const() but for 32-bit values */ +SEC("socket") +__success __log_level(2) +__msg("11: (1e) if w3 == w4 goto pc+0") +__msg("frame 0: propagating r3,r4") +__msg("11: safe") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void no_scalar_id_for_const32(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + /* unpredictable jump */ + "if r0 > 7 goto l0_%=;" + /* possibly generate same scalar ids for r3 and r4 */ + "w1 = 0;" + "w1 = w1;" + "w3 = w1;" + "w4 = w1;" + "goto l1_%=;" +"l0_%=:" + /* possibly generate different scalar ids for r3 and r4 */ + "w1 = 0;" + "w2 = 0;" + "w3 = w1;" + "w4 = w2;" +"l1_%=:" + /* predictable jump, marks r1 and r2 precise */ + "if w3 == w4 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that unique scalar IDs are ignored when new verifier state is + * compared to cached verifier state. For this test: + * - cached state has no id on r1 + * - new state has a unique id on r1 + */ +SEC("socket") +__success __log_level(2) +__msg("6: (25) if r6 > 0x7 goto pc+1") +__msg("7: (57) r1 &= 255") +__msg("8: (bf) r2 = r10") +__msg("from 6 to 8: safe") +__msg("processed 12 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void ignore_unique_scalar_ids_cur(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* r1.id == r0.id */ + "r1 = r0;" + /* make r1.id unique */ + "r0 = 0;" + "if r6 > 7 goto l0_%=;" + /* clear r1 id, but keep the range compatible */ + "r1 &= 0xff;" +"l0_%=:" + /* get here in two states: + * - first: r1 has no id (cached state) + * - second: r1 has a unique id (should be considered equivalent) + */ + "r2 = r10;" + "r2 += r1;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that unique scalar IDs are ignored when new verifier state is + * compared to cached verifier state. For this test: + * - cached state has a unique id on r1 + * - new state has no id on r1 + */ +SEC("socket") +__success __log_level(2) +__msg("6: (25) if r6 > 0x7 goto pc+1") +__msg("7: (05) goto pc+1") +__msg("9: (bf) r2 = r10") +__msg("9: safe") +__msg("processed 13 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void ignore_unique_scalar_ids_old(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* r1.id == r0.id */ + "r1 = r0;" + /* make r1.id unique */ + "r0 = 0;" + "if r6 > 7 goto l1_%=;" + "goto l0_%=;" +"l1_%=:" + /* clear r1 id, but keep the range compatible */ + "r1 &= 0xff;" +"l0_%=:" + /* get here in two states: + * - first: r1 has a unique id (cached state) + * - second: r1 has no id (should be considered equivalent) + */ + "r2 = r10;" + "r2 += r1;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that two different scalar IDs in a verified state can't be + * mapped to the same scalar ID in current state. + */ +SEC("socket") +__success __log_level(2) +/* The exit instruction should be reachable from two states, + * use two matches and "processed .. insns" to ensure this. + */ +__msg("13: (95) exit") +__msg("13: (95) exit") +__msg("processed 18 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void two_old_ids_one_cur_id(void) +{ + asm volatile ( + /* Give unique scalar IDs to r{6,7} */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r7 = r0;" + "r0 = 0;" + /* Maybe make r{6,7} IDs identical */ + "if r6 > r7 goto l0_%=;" + "goto l1_%=;" +"l0_%=:" + "r6 = r7;" +"l1_%=:" + /* Mark r{6,7} precise. + * Get here in two states: + * - first: r6{.id=A}, r7{.id=B} (cached state) + * - second: r6{.id=A}, r7{.id=A} + * Currently we don't want to consider such states equivalent. + * Thus "exit;" would be verified twice. + */ + "r2 = r10;" + "r2 += r6;" + "r2 += r7;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c new file mode 100644 index 000000000000..db6b3143338b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <errno.h> +#include <string.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) + +int vals[] SEC(".data.vals") = {1, 2, 3, 4}; + +__naked __noinline __used +static unsigned long identity_subprog() +{ + /* the simplest *static* 64-bit identity function */ + asm volatile ( + "r0 = r1;" + "exit;" + ); +} + +__noinline __used +unsigned long global_identity_subprog(__u64 x) +{ + /* the simplest *global* 64-bit identity function */ + return x; +} + +__naked __noinline __used +static unsigned long callback_subprog() +{ + /* the simplest callback function */ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r0") +__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * this whole chain will have to be marked as precise later + */ + "r1 = r6;" + "call identity_subprog;" + /* now use subprog's returned value (which is a + * r6 -> r1 -> r0 chain), as index into vals array, forcing + * all of that to be known precisely + */ + "r0 *= 4;" + "r1 = %[vals];" + /* here r0->r1->r6 chain is forced to be precise and has to be + * propagated back to the beginning, including through the + * subprog call + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r0") +__msg("mark_precise: frame0: last_idx 9 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1") +__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7") +__naked int global_subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * given global_identity_subprog is global, precision won't + * propagate all the way back to r6 + */ + "r1 = r6;" + "call global_identity_subprog;" + /* now use subprog's returned value (which is unknown now, so + * we need to clamp it), as index into vals array, forcing r0 + * to be marked precise (with no effect on r6, though) + */ + "if r0 < %[vals_arr_sz] goto 1f;" + "r0 = %[vals_arr_sz] - 1;" + "1:" + "r0 *= 4;" + "r1 = %[vals];" + /* here r0 is forced to be precise and has to be + * propagated back to the global subprog call, but it + * shouldn't go all the way to mark r6 as precise + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_const(vals_arr_sz, ARRAY_SIZE(vals)) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") +__msg("mark_precise: frame0: parent state regs=r0 stack=:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit") +__naked int callback_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and use result; r0 shouldn't propagate back to + * callback_subprog + */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 = r0;" + "if r6 > 3 goto 1f;" + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the bpf_loop() call, but not beyond + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "1:" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_global(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("12: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 12 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") +__msg("mark_precise: frame0: parent state regs=r6 stack=:") +__msg("mark_precise: frame0: last_idx 16 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_with_callback(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 1;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) callback call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 13 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 5 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise_global(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 11") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") +__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_stack_slot_precise_with_callback(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* ensure we have callback frame in jump history */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +__noinline __used +static __u64 subprog_with_precise_arg(__u64 x) +{ + return vals[x]; /* x is forced to be precise */ +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (0f) r2 += r1") +__msg("mark_precise: frame1: last_idx 8 first_idx 0") +__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ") +__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2") +__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2") +__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3") +__naked int subprog_arg_precise(void) +{ + asm volatile ( + "r6 = 3;" + "r1 = r6;" + /* subprog_with_precise_arg expects its argument to be + * precise, so r1->r6 will be marked precise from inside the + * subprog + */ + "call subprog_with_precise_arg;" + "r0 += r6;" + "exit;" + : + : + : __clobber_common, "r6" + ); +} + +/* r1 is pointer to stack slot; + * r2 is a register to spill into that slot + * subprog also spills r2 into its own stack slot + */ +__naked __noinline __used +static __u64 subprog_spill_reg_precise(void) +{ + asm volatile ( + /* spill to parent stack */ + "*(u64 *)(r1 + 0) = r2;" + /* spill to subprog stack (we use -16 offset to avoid + * accidental confusion with parent's -8 stack slot in + * verifier log output) + */ + "*(u64 *)(r10 - 16) = r2;" + /* use both spills as return result to propagete precision everywhere */ + "r0 = *(u64 *)(r10 - 16);" + "r2 = *(u64 *)(r1 + 0);" + "r0 += r2;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +/* precision backtracking can't currently handle stack access not through r10, + * so we won't be able to mark stack slot fp-8 as precise, and so will + * fallback to forcing all as precise + */ +__msg("mark_precise: frame0: falling back to forcing all scalars precise") +__naked int subprog_spill_into_parent_stack_slot_precise(void) +{ + asm volatile ( + "r6 = 1;" + + /* pass pointer to stack slot and r6 to subprog; + * r6 will be marked precise and spilled into fp-8 slot, which + * also should be marked precise + */ + "r1 = r10;" + "r1 += -8;" + "r2 = r6;" + "call subprog_spill_reg_precise;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r7 = *(u64 *)(r10 - 8);" + + "r7 *= 4;" + "r1 = %[vals];" + /* here r7 is forced to be precise and has to be propagated + * back to the beginning, handling subprog call and logic + */ + "r1 += r7;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6", "r7" + ); +} + +__naked __noinline __used +static __u64 subprog_with_checkpoint(void) +{ + asm volatile ( + "r0 = 0;" + /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */ + "goto +0;" + "exit;" + ); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c new file mode 100644 index 000000000000..bcfb6feb38c0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/pkt_cls.h> +#include <stdbool.h> + +int lookup_status; +bool test_xdp; +bool tcp_skc; + +#define CUR_NS BPF_F_CURRENT_NETNS + +static void socket_lookup(void *ctx, void *data_end, void *data) +{ + struct ethhdr *eth = data; + struct bpf_sock_tuple *tp; + struct bpf_sock *sk; + struct iphdr *iph; + int tplen; + + if (eth + 1 > data_end) + return; + + if (eth->h_proto != bpf_htons(ETH_P_IP)) + return; + + iph = (struct iphdr *)(eth + 1); + if (iph + 1 > data_end) + return; + + tp = (struct bpf_sock_tuple *)&iph->saddr; + tplen = sizeof(tp->ipv4); + if ((void *)tp + tplen > data_end) + return; + + switch (iph->protocol) { + case IPPROTO_TCP: + if (tcp_skc) + sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); + else + sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); + break; + case IPPROTO_UDP: + sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0); + break; + default: + return; + } + + lookup_status = 0; + + if (sk) { + bpf_sk_release(sk); + lookup_status = 1; + } +} + +SEC("tc") +int tc_socket_lookup(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + + if (test_xdp) + return TC_ACT_UNSPEC; + + socket_lookup(skb, data_end, data); + return TC_ACT_UNSPEC; +} + +SEC("xdp") +int xdp_socket_lookup(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + + if (!test_xdp) + return XDP_PASS; + + socket_lookup(xdp, data_end, data); + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c index e1c787815e44..b2dfd7066c6e 100644 --- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx) } err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp); - if (err) + if (!err) + meta->xdp_timestamp = bpf_ktime_get_tai_ns(); + else meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */ err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type); |