summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/include/uapi/linux/bpf.h4
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c18
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c34
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.h4
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c96
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c74
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c77
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c406
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c72
12 files changed, 806 insertions, 160 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3823100b7934..4b20a7269bee 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -6985,6 +6985,10 @@ struct bpf_rb_node {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_refcount {
+ __u32 :32;
+} __attribute__((aligned(4)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index dbd2c729781a..209811b1993a 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -14,7 +14,8 @@
* type ID of a struct in program BTF.
*
* The 'local_type_id' parameter must be a known constant.
- * The 'meta' parameter is a hidden argument that is ignored.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
* Returns
* A pointer to an object of the type corresponding to the passed in
* 'local_type_id', or NULL on failure.
@@ -28,7 +29,8 @@ extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
* Free an allocated object. All fields of the object that require
* destruction will be destructed before the storage is freed.
*
- * The 'meta' parameter is a hidden argument that is ignored.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
* Returns
* Void.
*/
@@ -38,18 +40,50 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
/* Description
+ * Increment the refcount on a refcounted local kptr, turning the
+ * non-owning reference input into an owning reference in the process.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * An owning reference to the object pointed to by 'kptr'
+ */
+extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_refcount_acquire_impl */
+#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
+
+/* Description
* Add a new entry to the beginning of the BPF linked list.
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
* Returns
- * Void.
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a list
*/
-extern void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+extern int bpf_list_push_front_impl(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_push_front_impl */
+#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
/* Description
* Add a new entry to the end of the BPF linked list.
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
* Returns
- * Void.
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a list
*/
-extern void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+extern int bpf_list_push_back_impl(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_push_back_impl */
+#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
/* Description
* Remove the entry at the beginning of the BPF linked list.
@@ -75,11 +109,19 @@ extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
/* Description
* Add 'node' to rbtree with root 'root' using comparator 'less'
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
* Returns
- * Nothing
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a tree
*/
-extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
- bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym;
+extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_rbtree_add_impl */
+#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
/* Description
* Return the first (leftmost) node in input tree
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 0ed8132ce1c3..f63309fd0e28 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -84,11 +84,11 @@ static struct {
{ "double_push_back", "arg#1 expected pointer to allocated object" },
{ "no_node_value_type", "bpf_list_node not found at offset=0" },
{ "incorrect_value_type",
- "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
+ "operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, "
"but arg is at offset=0 in struct bar" },
{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
- { "incorrect_node_off1", "bpf_list_node not found at offset=1" },
- { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
+ { "incorrect_node_off1", "bpf_list_node not found at offset=41" },
+ { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" },
{ "no_head_type", "bpf_list_head not found at offset=0" },
{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
@@ -266,6 +266,59 @@ end:
return NULL;
}
+static void list_and_rb_node_same_struct(bool refcount_field)
+{
+ int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id;
+ struct btf *btf;
+ int id, err;
+
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ return;
+
+ bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24);
+ if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
+ return;
+
+ if (refcount_field) {
+ bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
+ if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
+ return;
+ }
+
+ id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40);
+ if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
+ return;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ return;
+ err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ return;
+ if (refcount_field) {
+ err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::ref"))
+ return;
+ }
+
+ foo_btf_id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
+ return;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ return;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ return;
+ id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
+ if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
+ return;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
+ btf__free(btf);
+}
+
static void test_btf(void)
{
struct btf *btf = NULL;
@@ -717,39 +770,12 @@ static void test_btf(void)
}
while (test__start_subtest("btf: list_node and rb_node in same struct")) {
- btf = init_btf();
- if (!ASSERT_OK_PTR(btf, "init_btf"))
- break;
-
- id = btf__add_struct(btf, "bpf_rb_node", 24);
- if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
- break;
- id = btf__add_struct(btf, "bar", 40);
- if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
- break;
- err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
- if (!ASSERT_OK(err, "btf__add_field bar::a"))
- break;
- err = btf__add_field(btf, "c", 5, 128, 0);
- if (!ASSERT_OK(err, "btf__add_field bar::c"))
- break;
-
- id = btf__add_struct(btf, "foo", 20);
- if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
- break;
- err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
- if (!ASSERT_OK(err, "btf__add_field foo::a"))
- break;
- err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
- if (!ASSERT_OK(err, "btf__add_field foo::b"))
- break;
- id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
- if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
- break;
+ list_and_rb_node_same_struct(true);
+ break;
+ }
- err = btf__load_into_kernel(btf);
- ASSERT_EQ(err, -EINVAL, "check btf");
- btf__free(btf);
+ while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
+ list_and_rb_node_same_struct(false);
break;
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
index 156fa95c42f6..e9300c96607d 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -77,6 +77,29 @@ static void test_rbtree_first_and_remove(void)
rbtree__destroy(skel);
}
+static void test_rbtree_api_release_aliasing(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
+ ASSERT_OK(ret, "rbtree_api_release_aliasing");
+ ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
+ ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
+ ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
+
+ rbtree__destroy(skel);
+}
+
void test_rbtree_success(void)
{
if (test__start_subtest("rbtree_add_nodes"))
@@ -85,6 +108,8 @@ void test_rbtree_success(void)
test_rbtree_add_and_remove();
if (test__start_subtest("rbtree_first_and_remove"))
test_rbtree_first_and_remove();
+ if (test__start_subtest("rbtree_api_release_aliasing"))
+ test_rbtree_api_release_aliasing();
}
#define BTF_FAIL_TEST(suffix) \
diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
new file mode 100644
index 000000000000..2ab23832062d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "refcounted_kptr.skel.h"
+#include "refcounted_kptr_fail.skel.h"
+
+void test_refcounted_kptr(void)
+{
+ RUN_TESTS(refcounted_kptr);
+}
+
+void test_refcounted_kptr_fail(void)
+{
+ RUN_TESTS(refcounted_kptr_fail);
+}
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
index 53ded51a3abb..57440a554304 100644
--- a/tools/testing/selftests/bpf/progs/linked_list.c
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -25,7 +25,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
bpf_obj_drop(f);
return 3;
}
@@ -34,7 +34,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
bpf_obj_drop(f);
return 4;
}
@@ -42,7 +42,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
bpf_spin_lock(lock);
f->data = 42;
- bpf_list_push_front(head, &f->node);
+ bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
if (leave_in_map)
return 0;
@@ -51,7 +51,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
bpf_spin_unlock(lock);
if (!n)
return 5;
- f = container_of(n, struct foo, node);
+ f = container_of(n, struct foo, node2);
if (f->data != 42) {
bpf_obj_drop(f);
return 6;
@@ -59,14 +59,14 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
bpf_spin_lock(lock);
f->data = 13;
- bpf_list_push_front(head, &f->node);
+ bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (!n)
return 7;
- f = container_of(n, struct foo, node);
+ f = container_of(n, struct foo, node2);
if (f->data != 13) {
bpf_obj_drop(f);
return 8;
@@ -77,7 +77,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
return 9;
}
@@ -85,7 +85,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
return 10;
}
return 0;
@@ -119,8 +119,8 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea
f[i + 1]->data = i + 1;
bpf_spin_lock(lock);
- bpf_list_push_front(head, &f[i]->node);
- bpf_list_push_front(head, &f[i + 1]->node);
+ bpf_list_push_front(head, &f[i]->node2);
+ bpf_list_push_front(head, &f[i + 1]->node2);
bpf_spin_unlock(lock);
}
@@ -130,13 +130,13 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea
bpf_spin_unlock(lock);
if (!n)
return 3;
- pf = container_of(n, struct foo, node);
+ pf = container_of(n, struct foo, node2);
if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
bpf_obj_drop(pf);
return 4;
}
bpf_spin_lock(lock);
- bpf_list_push_back(head, &pf->node);
+ bpf_list_push_back(head, &pf->node2);
bpf_spin_unlock(lock);
}
@@ -149,7 +149,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea
bpf_spin_unlock(lock);
if (!n)
return 5;
- pf = container_of(n, struct foo, node);
+ pf = container_of(n, struct foo, node2);
if (pf->data != i) {
bpf_obj_drop(pf);
return 6;
@@ -160,7 +160,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
return 7;
}
@@ -168,7 +168,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
- bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(container_of(n, struct foo, node2));
return 8;
}
return 0;
@@ -199,7 +199,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le
bpf_spin_lock(lock);
f->data = 42;
- bpf_list_push_front(head, &f->node);
+ bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
if (leave_in_map)
@@ -210,7 +210,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le
bpf_spin_unlock(lock);
if (!n)
return 4;
- f = container_of(n, struct foo, node);
+ f = container_of(n, struct foo, node2);
if (f->data != 42) {
bpf_obj_drop(f);
return 5;
diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h
index 3fb2412552fc..c0f3609a7ffa 100644
--- a/tools/testing/selftests/bpf/progs/linked_list.h
+++ b/tools/testing/selftests/bpf/progs/linked_list.h
@@ -22,7 +22,7 @@ struct foo {
struct map_value {
struct bpf_spin_lock lock;
int data;
- struct bpf_list_head head __contains(foo, node);
+ struct bpf_list_head head __contains(foo, node2);
};
struct array_map {
@@ -50,7 +50,7 @@ struct {
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
-private(A) struct bpf_list_head ghead __contains(foo, node);
+private(A) struct bpf_list_head ghead __contains(foo, node2);
private(B) struct bpf_spin_lock glock2;
#endif
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
index 41978b46f58e..f4c63daba229 100644
--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -73,22 +73,21 @@ CHECK(inner_map, pop_back, &iv->head);
int test##_missing_lock_##op(void *ctx) \
{ \
INIT; \
- void (*p)(void *, void *) = (void *)&bpf_list_##op; \
- p(hexpr, nexpr); \
+ bpf_list_##op(hexpr, nexpr); \
return 0; \
}
-CHECK(kptr, push_front, &f->head, b);
-CHECK(kptr, push_back, &f->head, b);
+CHECK(kptr, push_front, &f->head, &b->node);
+CHECK(kptr, push_back, &f->head, &b->node);
-CHECK(global, push_front, &ghead, f);
-CHECK(global, push_back, &ghead, f);
+CHECK(global, push_front, &ghead, &f->node2);
+CHECK(global, push_back, &ghead, &f->node2);
-CHECK(map, push_front, &v->head, f);
-CHECK(map, push_back, &v->head, f);
+CHECK(map, push_front, &v->head, &f->node2);
+CHECK(map, push_back, &v->head, &f->node2);
-CHECK(inner_map, push_front, &iv->head, f);
-CHECK(inner_map, push_back, &iv->head, f);
+CHECK(inner_map, push_front, &iv->head, &f->node2);
+CHECK(inner_map, push_back, &iv->head, &f->node2);
#undef CHECK
@@ -135,32 +134,31 @@ CHECK_OP(pop_back);
int test##_incorrect_lock_##op(void *ctx) \
{ \
INIT; \
- void (*p)(void *, void*) = (void *)&bpf_list_##op; \
bpf_spin_lock(lexpr); \
- p(hexpr, nexpr); \
+ bpf_list_##op(hexpr, nexpr); \
return 0; \
}
#define CHECK_OP(op) \
- CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \
- CHECK(kptr_global, op, &f1->lock, &ghead, f); \
- CHECK(kptr_map, op, &f1->lock, &v->head, f); \
- CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \
+ CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \
+ CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \
\
- CHECK(global_global, op, &glock2, &ghead, f); \
- CHECK(global_kptr, op, &glock, &f1->head, b); \
- CHECK(global_map, op, &glock, &v->head, f); \
- CHECK(global_inner_map, op, &glock, &iv->head, f); \
+ CHECK(global_global, op, &glock2, &ghead, &f->node2); \
+ CHECK(global_kptr, op, &glock, &f1->head, &b->node); \
+ CHECK(global_map, op, &glock, &v->head, &f->node2); \
+ CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \
\
- CHECK(map_map, op, &v->lock, &v2->head, f); \
- CHECK(map_kptr, op, &v->lock, &f2->head, b); \
- CHECK(map_global, op, &v->lock, &ghead, f); \
- CHECK(map_inner_map, op, &v->lock, &iv->head, f); \
+ CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \
+ CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \
+ CHECK(map_global, op, &v->lock, &ghead, &f->node2); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \
\
- CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \
- CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \
- CHECK(inner_map_global, op, &iv->lock, &ghead, f); \
- CHECK(inner_map_map, op, &iv->lock, &v->head, f);
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2);
CHECK_OP(push_front);
CHECK_OP(push_back);
@@ -340,7 +338,7 @@ int direct_read_node(void *ctx)
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
- return *(int *)&f->node;
+ return *(int *)&f->node2;
}
SEC("?tc")
@@ -351,12 +349,12 @@ int direct_write_node(void *ctx)
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
- *(int *)&f->node = 0;
+ *(int *)&f->node2 = 0;
return 0;
}
static __always_inline
-int use_after_unlock(void (*op)(void *head, void *node))
+int use_after_unlock(bool push_front)
{
struct foo *f;
@@ -365,7 +363,10 @@ int use_after_unlock(void (*op)(void *head, void *node))
return 0;
bpf_spin_lock(&glock);
f->data = 42;
- op(&ghead, &f->node);
+ if (push_front)
+ bpf_list_push_front(&ghead, &f->node2);
+ else
+ bpf_list_push_back(&ghead, &f->node2);
bpf_spin_unlock(&glock);
return f->data;
@@ -374,17 +375,17 @@ int use_after_unlock(void (*op)(void *head, void *node))
SEC("?tc")
int use_after_unlock_push_front(void *ctx)
{
- return use_after_unlock((void *)bpf_list_push_front);
+ return use_after_unlock(true);
}
SEC("?tc")
int use_after_unlock_push_back(void *ctx)
{
- return use_after_unlock((void *)bpf_list_push_back);
+ return use_after_unlock(false);
}
static __always_inline
-int list_double_add(void (*op)(void *head, void *node))
+int list_double_add(bool push_front)
{
struct foo *f;
@@ -392,8 +393,13 @@ int list_double_add(void (*op)(void *head, void *node))
if (!f)
return 0;
bpf_spin_lock(&glock);
- op(&ghead, &f->node);
- op(&ghead, &f->node);
+ if (push_front) {
+ bpf_list_push_front(&ghead, &f->node2);
+ bpf_list_push_front(&ghead, &f->node2);
+ } else {
+ bpf_list_push_back(&ghead, &f->node2);
+ bpf_list_push_back(&ghead, &f->node2);
+ }
bpf_spin_unlock(&glock);
return 0;
@@ -402,13 +408,13 @@ int list_double_add(void (*op)(void *head, void *node))
SEC("?tc")
int double_push_front(void *ctx)
{
- return list_double_add((void *)bpf_list_push_front);
+ return list_double_add(true);
}
SEC("?tc")
int double_push_back(void *ctx)
{
- return list_double_add((void *)bpf_list_push_back);
+ return list_double_add(false);
}
SEC("?tc")
@@ -450,7 +456,7 @@ int incorrect_node_var_off(struct __sk_buff *ctx)
if (!f)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol);
bpf_spin_unlock(&glock);
return 0;
@@ -465,7 +471,7 @@ int incorrect_node_off1(void *ctx)
if (!f)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front(&ghead, (void *)&f->node + 1);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + 1);
bpf_spin_unlock(&glock);
return 0;
@@ -480,7 +486,7 @@ int incorrect_node_off2(void *ctx)
if (!f)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front(&ghead, &f->node2);
+ bpf_list_push_front(&ghead, &f->node);
bpf_spin_unlock(&glock);
return 0;
@@ -510,7 +516,7 @@ int incorrect_head_var_off1(struct __sk_buff *ctx)
if (!f)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node);
+ bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2);
bpf_spin_unlock(&glock);
return 0;
@@ -525,7 +531,7 @@ int incorrect_head_var_off2(struct __sk_buff *ctx)
if (!f)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node);
+ bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2);
bpf_spin_unlock(&glock);
return 0;
@@ -563,7 +569,7 @@ int incorrect_head_off2(void *ctx)
return 0;
bpf_spin_lock(&glock);
- bpf_list_push_front((void *)&ghead + 1, &f->node);
+ bpf_list_push_front((void *)&ghead + 1, &f->node2);
bpf_spin_unlock(&glock);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
index 4c90aa6abddd..b09f4fffe57c 100644
--- a/tools/testing/selftests/bpf/progs/rbtree.c
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -93,9 +93,11 @@ long rbtree_add_and_remove(void *ctx)
res = bpf_rbtree_remove(&groot, &n->node);
bpf_spin_unlock(&glock);
+ if (!res)
+ return 1;
+
n = container_of(res, struct node_data, node);
removed_key = n->key;
-
bpf_obj_drop(n);
return 0;
@@ -148,9 +150,11 @@ long rbtree_first_and_remove(void *ctx)
res = bpf_rbtree_remove(&groot, &o->node);
bpf_spin_unlock(&glock);
+ if (!res)
+ return 5;
+
o = container_of(res, struct node_data, node);
removed_key = o->key;
-
bpf_obj_drop(o);
bpf_spin_lock(&glock);
@@ -173,4 +177,70 @@ err_out:
return 1;
}
+SEC("tc")
+long rbtree_api_release_aliasing(void *ctx)
+{
+ struct node_data *n, *m, *o;
+ struct bpf_rb_node *res, *res2;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 41;
+ n->data = 42;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+
+ /* m and o point to the same node,
+ * but verifier doesn't know this
+ */
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ o = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ m = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_remove(&groot, &m->node);
+ /* Retval of previous remove returns an owning reference to m,
+ * which is the same node non-owning ref o is pointing at.
+ * We can safely try to remove o as the second rbtree_remove will
+ * return NULL since the node isn't in a tree.
+ *
+ * Previously we relied on the verifier type system + rbtree_remove
+ * invalidating non-owning refs to ensure that rbtree_remove couldn't
+ * fail, but now rbtree_remove does runtime checking so we no longer
+ * invalidate non-owning refs after remove.
+ */
+ res2 = bpf_rbtree_remove(&groot, &o->node);
+
+ bpf_spin_unlock(&glock);
+
+ if (res) {
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+ bpf_obj_drop(o);
+ }
+ if (res2) {
+ /* The second remove fails, so res2 is null and this doesn't
+ * execute
+ */
+ m = container_of(res2, struct node_data, node);
+ first_data[1] = m->data;
+ bpf_obj_drop(m);
+ }
+ return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
index 46d7d18a218f..3fecf1c6dfe5 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=2 alloc_insn=10")
+__failure __msg("Unreleased reference id=3 alloc_insn=10")
long rbtree_api_remove_no_drop(void *ctx)
{
struct bpf_rb_node *res;
@@ -118,11 +118,13 @@ long rbtree_api_remove_no_drop(void *ctx)
res = bpf_rbtree_remove(&groot, res);
- n = container_of(res, struct node_data, node);
- __sink(n);
+ if (res) {
+ n = container_of(res, struct node_data, node);
+ __sink(n);
+ }
bpf_spin_unlock(&glock);
- /* bpf_obj_drop(n) is missing here */
+ /* if (res) { bpf_obj_drop(n); } is missing here */
return 0;
unlock_err:
@@ -150,35 +152,36 @@ long rbtree_api_add_to_multiple_trees(void *ctx)
}
SEC("?tc")
-__failure __msg("rbtree_remove node input must be non-owning ref")
-long rbtree_api_add_release_unlock_escape(void *ctx)
+__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
+long rbtree_api_use_unchecked_remove_retval(void *ctx)
{
- struct node_data *n;
-
- n = bpf_obj_new(typeof(*n));
- if (!n)
- return 1;
+ struct bpf_rb_node *res;
bpf_spin_lock(&glock);
- bpf_rbtree_add(&groot, &n->node, less);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ res = bpf_rbtree_remove(&groot, res);
+
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
- /* After add() in previous critical section, n should be
- * release_on_unlock and released after previous spin_unlock,
- * so should not be possible to use it here
- */
- bpf_rbtree_remove(&groot, &n->node);
+ /* Must check res for NULL before using in rbtree_add below */
+ bpf_rbtree_add(&groot, res, less);
bpf_spin_unlock(&glock);
return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
}
SEC("?tc")
__failure __msg("rbtree_remove node input must be non-owning ref")
-long rbtree_api_release_aliasing(void *ctx)
+long rbtree_api_add_release_unlock_escape(void *ctx)
{
- struct node_data *n, *m, *o;
- struct bpf_rb_node *res;
+ struct node_data *n;
n = bpf_obj_new(typeof(*n));
if (!n)
@@ -189,37 +192,11 @@ long rbtree_api_release_aliasing(void *ctx)
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
-
- /* m and o point to the same node,
- * but verifier doesn't know this
- */
- res = bpf_rbtree_first(&groot);
- if (!res)
- return 1;
- o = container_of(res, struct node_data, node);
-
- res = bpf_rbtree_first(&groot);
- if (!res)
- return 1;
- m = container_of(res, struct node_data, node);
-
- bpf_rbtree_remove(&groot, &m->node);
- /* This second remove shouldn't be possible. Retval of previous
- * remove returns owning reference to m, which is the same
- * node o's non-owning ref is pointing at
- *
- * In order to preserve property
- * * owning ref must not be in rbtree
- * * non-owning ref must be in rbtree
- *
- * o's ref must be invalidated after previous remove. Otherwise
- * we'd have non-owning ref to node that isn't in rbtree, and
- * verifier wouldn't be able to use type system to prevent remove
- * of ref that already isn't in any tree. Would have to do runtime
- * checks in that case.
+ /* After add() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
*/
- bpf_rbtree_remove(&groot, &o->node);
-
+ bpf_rbtree_remove(&groot, &n->node);
bpf_spin_unlock(&glock);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
new file mode 100644
index 000000000000..1d348a225140
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ long key;
+ long list_data;
+ struct bpf_rb_node r;
+ struct bpf_list_node l;
+ struct bpf_refcount ref;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} stashed_nodes SEC(".maps");
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root root __contains(node_data, r);
+private(A) struct bpf_list_head head __contains(node_data, l);
+
+private(B) struct bpf_spin_lock alock;
+private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
+
+static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
+{
+ struct node_data *a;
+ struct node_data *b;
+
+ a = container_of(node_a, struct node_data, r);
+ b = container_of(node_b, struct node_data, r);
+
+ return a->key < b->key;
+}
+
+static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+static long __insert_in_tree_and_list(struct bpf_list_head *head,
+ struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 123;
+ m->list_data = 456;
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &n->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ bpf_obj_drop(m);
+ return -2;
+ }
+ bpf_spin_unlock(lock);
+
+ bpf_spin_lock(lock);
+ if (bpf_list_push_front(head, &m->l)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -3;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct map_value *mapval;
+ struct node_data *n, *m;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -2;
+
+ n->key = val;
+ m = bpf_refcount_acquire(n);
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (n) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(m);
+ return -3;
+ }
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &m->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -4;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __read_from_tree(struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock,
+ bool remove_from_tree)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ rb = bpf_rbtree_first(root);
+ if (!rb) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ res = n->key;
+
+ if (!remove_from_tree) {
+ bpf_spin_unlock(lock);
+ return res;
+ }
+
+ rb = bpf_rbtree_remove(root, rb);
+ bpf_spin_unlock(lock);
+ if (!rb)
+ return -2;
+ n = container_of(rb, struct node_data, r);
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock,
+ bool remove_from_list)
+{
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ l = bpf_list_pop_front(head);
+ if (!l) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(l, struct node_data, l);
+ res = n->list_data;
+
+ if (!remove_from_list) {
+ if (bpf_list_push_back(head, &n->l)) {
+ bpf_spin_unlock(lock);
+ return -2;
+ }
+ }
+
+ bpf_spin_unlock(lock);
+
+ if (remove_from_list)
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_unstash(int idx)
+{
+ struct node_data *n = NULL;
+ struct map_value *mapval;
+ long val = -99;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (!n)
+ return -2;
+
+ val = n->key;
+ bpf_obj_drop(n);
+ return val;
+}
+
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* After successful insert of struct node_data into both collections:
+ * - it should have refcount = 2
+ * - removing / not removing the node_data from a collection after
+ * reading should have no effect on ability to read / remove from
+ * the other collection
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both: remove from list");
+
+#undef INSERT_READ_BOTH
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* Similar to insert_read_both, but list data is read and possibly removed
+ * first
+ *
+ * Results should be no different than reading and possibly removing rbtree
+ * node first
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list");
+
+#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(-1) \
+long insert_double_##read_fn##_and_del_##read_root(void *ctx) \
+{ \
+ long err, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ \
+ return err + list_data; \
+}
+
+/* Insert into both tree and list, then try reading-and-removing from either twice
+ *
+ * The second read-and-remove should fail on read step since the node has
+ * already been removed
+ */
+INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree");
+INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list");
+
+#define INSERT_STASH_READ(rem_tree, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(84) \
+long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
+{ \
+ long err, tree_data, map_data; \
+ \
+ err = __stash_map_insert_tree(0, 42, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_unstash(0); \
+ if (err < 0) \
+ return err; \
+ else \
+ map_data = err; \
+ \
+ return tree_data + map_data; \
+}
+
+/* Stash a refcounted node in map_val, insert same node into tree, then try
+ * reading data from tree then unstashed map_val, possibly removing from tree
+ *
+ * Removing from tree should have no effect on map_val kptr validity
+ */
+INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
+INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&alock);
+
+ m->key = 2;
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ bpf_spin_unlock(&alock);
+
+ bpf_obj_drop(m);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
new file mode 100644
index 000000000000..efcb308f80ad
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_acquire, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn=21")
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&glock);
+
+ m->key = 2;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn=9")
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";