diff options
-rw-r--r-- | include/linux/bpf.h | 3 | ||||
-rw-r--r-- | include/linux/bpf_verifier.h | 2 | ||||
-rw-r--r-- | kernel/bpf/helpers.c | 8 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 41 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c | 26 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/refcounted_kptr.c | 71 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c | 28 |
7 files changed, 165 insertions, 14 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index eced6400f778..12596af59c00 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -653,7 +653,8 @@ enum bpf_type_flag { MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. - * Currently only valid for linked-list and rbtree nodes. + * Currently only valid for linked-list and rbtree nodes. If the nodes + * have a bpf_refcount_field, they must be tagged MEM_RCU as well. */ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f70f9ac884d2..b6e58dab8e27 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -745,7 +745,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) } } -#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED) +#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF) static inline bool bpf_type_has_unsafe_modifiers(u32 type) { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index eb91cae0612a..8bd3812fb8df 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -286,6 +286,7 @@ static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); + preempt_disable(); arch_spin_lock(l); } @@ -294,6 +295,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) arch_spinlock_t *l = (void *)lock; arch_spin_unlock(l); + preempt_enable(); } #else @@ -1913,7 +1915,11 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) if (rec) bpf_obj_free_fields(rec, p); - bpf_mem_free(&bpf_global_ma, p); + + if (rec && rec->refcount_off >= 0) + bpf_mem_free_rcu(&bpf_global_ma, p); + else + bpf_mem_free(&bpf_global_ma, p); } __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0680569f9bd0..bb78212fa5b2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5064,7 +5064,9 @@ bad_type: */ static bool in_rcu_cs(struct bpf_verifier_env *env) { - return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable; + return env->cur_state->active_rcu_lock || + env->cur_state->active_lock.ptr || + !env->prog->aux->sleepable; } /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ @@ -8007,6 +8009,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, case PTR_TO_BTF_ID | PTR_TRUSTED: case PTR_TO_BTF_ID | MEM_RCU: case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: + case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: /* When referenced PTR_TO_BTF_ID is passed to release function, * its fixed offset must be 0. In the other cases, fixed offset * can be non-zero. This was already checked above. So pass @@ -10473,6 +10476,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_verifier_state *state = env->cur_state; + struct btf_record *rec = reg_btf_record(reg); if (!state->active_lock.ptr) { verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); @@ -10485,6 +10489,9 @@ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state } reg->type |= NON_OWN_REF; + if (rec->refcount_off >= 0) + reg->type |= MEM_RCU; + return 0; } @@ -11217,10 +11224,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); return -EINVAL; } - if (rec->refcount_off >= 0) { - verbose(env, "bpf_refcount_acquire calls are disabled for now\n"); - return -EINVAL; - } + meta->arg_btf = reg->btf; meta->arg_btf_id = reg->btf_id; break; @@ -11325,6 +11329,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_func_state *state; struct bpf_reg_state *reg; + if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { + verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); + return -EACCES; + } + if (rcu_lock) { verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); return -EINVAL; @@ -16687,7 +16696,8 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } - if (env->cur_state->active_rcu_lock) { + if (env->cur_state->active_rcu_lock && + !in_rbtree_lock_required_cb(env)) { verbose(env, "bpf_rcu_read_unlock is missing\n"); return -EINVAL; } @@ -16967,11 +16977,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); return -EINVAL; } - - if (prog->aux->sleepable) { - verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); - return -EINVAL; - } } if (btf_record_has_field(map->record, BPF_TIMER)) { @@ -18276,6 +18281,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; + if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && + !kptr_struct_meta) { + verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", + insn_idx); + return -EFAULT; + } + insn_buf[0] = addr[0]; insn_buf[1] = addr[1]; insn_buf[2] = *insn; @@ -18283,6 +18295,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; int struct_meta_reg = BPF_REG_3; int node_offset_reg = BPF_REG_4; @@ -18292,6 +18305,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, node_offset_reg = BPF_REG_5; } + if (!kptr_struct_meta) { + verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", + insn_idx); + return -EFAULT; + } + __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, node_offset_reg, insn, insn_buf, cnt); } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 7423983472c7..d6bd5e16e637 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,12 +9,38 @@ void test_refcounted_kptr(void) { + RUN_TESTS(refcounted_kptr); } void test_refcounted_kptr_fail(void) { + RUN_TESTS(refcounted_kptr_fail); } void test_refcounted_kptr_wrong_owner(void) { + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct refcounted_kptr *skel; + int ret; + + skel = refcounted_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); + refcounted_kptr__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index c55652fdc63a..893a4fdb4b6e 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -8,6 +8,9 @@ #include "bpf_misc.h" #include "bpf_experimental.h" +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + struct node_data { long key; long list_data; @@ -497,4 +500,72 @@ long rbtree_wrong_owner_remove_fail_a2(void *ctx) return 0; } +SEC("?fentry.s/bpf_testmod_test_read") +__success +int BPF_PROG(rbtree_sleepable_rcu, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct bpf_rb_node *rb; + struct node_data *n, *m = NULL; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + bpf_rcu_read_lock(); + bpf_spin_lock(&lock); + bpf_rbtree_add(&root, &n->r, less); + rb = bpf_rbtree_first(&root); + if (!rb) + goto err_out; + + rb = bpf_rbtree_remove(&root, rb); + if (!rb) + goto err_out; + + m = container_of(rb, struct node_data, r); + +err_out: + bpf_spin_unlock(&lock); + bpf_rcu_read_unlock(); + if (m) + bpf_obj_drop(m); + return 0; +} + +SEC("?fentry.s/bpf_testmod_test_read") +__success +int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct bpf_rb_node *rb; + struct node_data *n, *m = NULL; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + /* No explicit bpf_rcu_read_lock */ + bpf_spin_lock(&lock); + bpf_rbtree_add(&root, &n->r, less); + rb = bpf_rbtree_first(&root); + if (!rb) + goto err_out; + + rb = bpf_rbtree_remove(&root, rb); + if (!rb) + goto err_out; + + m = container_of(rb, struct node_data, r); + +err_out: + bpf_spin_unlock(&lock); + /* No explicit bpf_rcu_read_unlock */ + if (m) + bpf_obj_drop(m); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index 0b09e5c915b1..1ef07f6ee580 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -13,6 +13,9 @@ struct node_acquire { struct bpf_refcount refcount; }; +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; private(A) struct bpf_rb_root groot __contains(node_acquire, node); @@ -71,4 +74,29 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) return 0; } +SEC("?fentry.s/bpf_testmod_test_read") +__failure __msg("function calls are not allowed while holding a lock") +int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct node_acquire *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + /* spin_{lock,unlock} are in different RCU CS */ + bpf_rcu_read_lock(); + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + bpf_spin_unlock(&glock); + bpf_rcu_read_unlock(); + + return 0; +} + char _license[] SEC("license") = "GPL"; |