diff options
-rw-r--r-- | include/linux/bpf.h | 17 | ||||
-rw-r--r-- | include/uapi/linux/bpf.h | 10 | ||||
-rw-r--r-- | kernel/bpf/btf.c | 11 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 2 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 333 | ||||
-rw-r--r-- | tools/include/uapi/linux/bpf.h | 10 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.c | 3 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.h | 5 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/fd_array.c | 441 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/syscall.c | 6 |
10 files changed, 701 insertions, 137 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index eaee2a819f4c..ac44b857b2f9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2301,6 +2301,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); +/* + * The __bpf_map_get() and __btf_get_by_fd() functions parse a file + * descriptor and return a corresponding map or btf object. + * Their names are double underscored to emphasize the fact that they + * do not increase refcnt. To also increase refcnt use corresponding + * bpf_map_get() and btf_get_by_fd() functions. + */ + static inline struct bpf_map *__bpf_map_get(struct fd f) { if (fd_empty(f)) @@ -2310,6 +2318,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f) return fd_file(f)->private_data; } +static inline struct btf *__btf_get_by_fd(struct fd f) +{ + if (fd_empty(f)) + return ERR_PTR(-EBADF); + if (unlikely(fd_file(f)->f_op != &btf_fops)) + return ERR_PTR(-EINVAL); + return fd_file(f)->private_data; +} + void bpf_map_inc(struct bpf_map *map); void bpf_map_inc_with_uref(struct bpf_map *map); struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4162afc6b5d0..2acf9b336371 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1573,6 +1573,16 @@ union bpf_attr { * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. */ __s32 prog_token_fd; + /* The fd_array_cnt can be used to pass the length of the + * fd_array array. In this case all the [map] file descriptors + * passed in this array will be bound to the program, even if + * the maps are not referenced directly. The functionality is + * similar to the BPF_PROG_BIND_MAP syscall, but maps can be + * used by the verifier during the program load. If provided, + * then the fd_array[0,...,fd_array_cnt-1] is expected to be + * continuous. + */ + __u32 fd_array_cnt; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e7a59e6462a9..2ef4fef71910 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7746,14 +7746,9 @@ struct btf *btf_get_by_fd(int fd) struct btf *btf; CLASS(fd, f)(fd); - if (fd_empty(f)) - return ERR_PTR(-EBADF); - - if (fd_file(f)->f_op != &btf_fops) - return ERR_PTR(-EINVAL); - - btf = fd_file(f)->private_data; - refcount_inc(&btf->refcnt); + btf = __btf_get_by_fd(f); + if (!IS_ERR(btf)) + refcount_inc(&btf->refcnt); return btf; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5684e8ce132d..4e88797fdbeb 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2730,7 +2730,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd +#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c855e7905c35..c0e772996c52 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19218,50 +19218,68 @@ static int find_btf_percpu_datasec(struct btf *btf) return -ENOENT; } +/* + * Add btf to the used_btfs array and return the index. (If the btf was + * already added, then just return the index.) Upon successful insertion + * increase btf refcnt, and, if present, also refcount the corresponding + * kernel module. + */ +static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf) +{ + struct btf_mod_pair *btf_mod; + int i; + + /* check whether we recorded this BTF (and maybe module) already */ + for (i = 0; i < env->used_btf_cnt; i++) + if (env->used_btfs[i].btf == btf) + return i; + + if (env->used_btf_cnt >= MAX_USED_BTFS) + return -E2BIG; + + btf_get(btf); + + btf_mod = &env->used_btfs[env->used_btf_cnt]; + btf_mod->btf = btf; + btf_mod->module = NULL; + + /* if we reference variables from kernel module, bump its refcount */ + if (btf_is_module(btf)) { + btf_mod->module = btf_try_get_module(btf); + if (!btf_mod->module) { + btf_put(btf); + return -ENXIO; + } + } + + return env->used_btf_cnt++; +} + /* replace pseudo btf_id with kernel symbol address */ -static int check_pseudo_btf_id(struct bpf_verifier_env *env, - struct bpf_insn *insn, - struct bpf_insn_aux_data *aux) +static int __check_pseudo_btf_id(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_insn_aux_data *aux, + struct btf *btf) { const struct btf_var_secinfo *vsi; const struct btf_type *datasec; - struct btf_mod_pair *btf_mod; const struct btf_type *t; const char *sym_name; bool percpu = false; u32 type, id = insn->imm; - struct btf *btf; s32 datasec_id; u64 addr; - int i, btf_fd, err; - - btf_fd = insn[1].imm; - if (btf_fd) { - btf = btf_get_by_fd(btf_fd); - if (IS_ERR(btf)) { - verbose(env, "invalid module BTF object FD specified.\n"); - return -EINVAL; - } - } else { - if (!btf_vmlinux) { - verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); - return -EINVAL; - } - btf = btf_vmlinux; - btf_get(btf); - } + int i; t = btf_type_by_id(btf, id); if (!t) { verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); - err = -ENOENT; - goto err_put; + return -ENOENT; } if (!btf_type_is_var(t) && !btf_type_is_func(t)) { verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); - err = -EINVAL; - goto err_put; + return -EINVAL; } sym_name = btf_name_by_offset(btf, t->name_off); @@ -19269,8 +19287,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, if (!addr) { verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", sym_name); - err = -ENOENT; - goto err_put; + return -ENOENT; } insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; @@ -19278,7 +19295,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, if (btf_type_is_func(t)) { aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; aux->btf_var.mem_size = 0; - goto check_btf; + return 0; } datasec_id = find_btf_percpu_datasec(btf); @@ -19309,8 +19326,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, tname = btf_name_by_offset(btf, t->name_off); verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", tname, PTR_ERR(ret)); - err = -EINVAL; - goto err_put; + return -EINVAL; } aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; aux->btf_var.mem_size = tsize; @@ -19319,39 +19335,43 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, aux->btf_var.btf = btf; aux->btf_var.btf_id = type; } -check_btf: - /* check whether we recorded this BTF (and maybe module) already */ - for (i = 0; i < env->used_btf_cnt; i++) { - if (env->used_btfs[i].btf == btf) { - btf_put(btf); - return 0; - } - } - if (env->used_btf_cnt >= MAX_USED_BTFS) { - err = -E2BIG; - goto err_put; - } + return 0; +} - btf_mod = &env->used_btfs[env->used_btf_cnt]; - btf_mod->btf = btf; - btf_mod->module = NULL; +static int check_pseudo_btf_id(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_insn_aux_data *aux) +{ + struct btf *btf; + int btf_fd; + int err; - /* if we reference variables from kernel module, bump its refcount */ - if (btf_is_module(btf)) { - btf_mod->module = btf_try_get_module(btf); - if (!btf_mod->module) { - err = -ENXIO; - goto err_put; + btf_fd = insn[1].imm; + if (btf_fd) { + CLASS(fd, f)(btf_fd); + + btf = __btf_get_by_fd(f); + if (IS_ERR(btf)) { + verbose(env, "invalid module BTF object FD specified.\n"); + return -EINVAL; + } + } else { + if (!btf_vmlinux) { + verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); + return -EINVAL; } + btf = btf_vmlinux; } - env->used_btf_cnt++; + err = __check_pseudo_btf_id(env, insn, aux, btf); + if (err) + return err; + err = __add_used_btf(env, btf); + if (err < 0) + return err; return 0; -err_put: - btf_put(btf); - return err; } static bool is_tracing_prog_type(enum bpf_prog_type type) @@ -19368,6 +19388,12 @@ static bool is_tracing_prog_type(enum bpf_prog_type type) } } +static bool bpf_map_is_cgroup_storage(struct bpf_map *map) +{ + return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); +} + static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) @@ -19446,39 +19472,47 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return -EINVAL; } - return 0; -} + if (bpf_map_is_cgroup_storage(map) && + bpf_cgroup_storage_assign(env->prog->aux, map)) { + verbose(env, "only one cgroup storage of each type is allowed\n"); + return -EBUSY; + } -static bool bpf_map_is_cgroup_storage(struct bpf_map *map) -{ - return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || - map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + if (map->map_type == BPF_MAP_TYPE_ARENA) { + if (env->prog->aux->arena) { + verbose(env, "Only one arena per program\n"); + return -EBUSY; + } + if (!env->allow_ptr_leaks || !env->bpf_capable) { + verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); + return -EPERM; + } + if (!env->prog->jit_requested) { + verbose(env, "JIT is required to use arena\n"); + return -EOPNOTSUPP; + } + if (!bpf_jit_supports_arena()) { + verbose(env, "JIT doesn't support arena\n"); + return -EOPNOTSUPP; + } + env->prog->aux->arena = (void *)map; + if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { + verbose(env, "arena's user address must be set via map_extra or mmap()\n"); + return -EINVAL; + } + } + + return 0; } -/* Add map behind fd to used maps list, if it's not already there, and return - * its index. Also set *reused to true if this map was already in the list of - * used maps. - * Returns <0 on error, or >= 0 index, on success. - */ -static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused) +static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map) { - CLASS(fd, f)(fd); - struct bpf_map *map; - int i; - - map = __bpf_map_get(f); - if (IS_ERR(map)) { - verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); - return PTR_ERR(map); - } + int i, err; /* check whether we recorded this map already */ - for (i = 0; i < env->used_map_cnt; i++) { - if (env->used_maps[i] == map) { - *reused = true; + for (i = 0; i < env->used_map_cnt; i++) + if (env->used_maps[i] == map) return i; - } - } if (env->used_map_cnt >= MAX_USED_MAPS) { verbose(env, "The total number of maps per program has reached the limit of %u\n", @@ -19486,6 +19520,10 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus return -E2BIG; } + err = check_map_prog_compatibility(env, map, env->prog); + if (err) + return err; + if (env->prog->sleepable) atomic64_inc(&map->sleepable_refcnt); @@ -19496,12 +19534,29 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus */ bpf_map_inc(map); - *reused = false; env->used_maps[env->used_map_cnt++] = map; return env->used_map_cnt - 1; } +/* Add map behind fd to used maps list, if it's not already there, and return + * its index. + * Returns <0 on error, or >= 0 index, on success. + */ +static int add_used_map(struct bpf_verifier_env *env, int fd) +{ + struct bpf_map *map; + CLASS(fd, f)(fd); + + map = __bpf_map_get(f); + if (IS_ERR(map)) { + verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); + return PTR_ERR(map); + } + + return __add_used_map(env, map); +} + /* find and rewrite pseudo imm in ld_imm64 instructions: * * 1. if it accesses map FD, replace it with actual map pointer. @@ -19533,7 +19588,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) int map_idx; u64 addr; u32 fd; - bool reused; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || @@ -19594,7 +19648,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) break; } - map_idx = add_used_map_from_fd(env, fd, &reused); + map_idx = add_used_map(env, fd); if (map_idx < 0) return map_idx; map = env->used_maps[map_idx]; @@ -19602,10 +19656,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) aux = &env->insn_aux_data[i]; aux->map_index = map_idx; - err = check_map_prog_compatibility(env, map, env->prog); - if (err) - return err; - if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { addr = (unsigned long)map; @@ -19636,39 +19686,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; - /* proceed with extra checks only if its newly added used map */ - if (reused) - goto next_insn; - - if (bpf_map_is_cgroup_storage(map) && - bpf_cgroup_storage_assign(env->prog->aux, map)) { - verbose(env, "only one cgroup storage of each type is allowed\n"); - return -EBUSY; - } - if (map->map_type == BPF_MAP_TYPE_ARENA) { - if (env->prog->aux->arena) { - verbose(env, "Only one arena per program\n"); - return -EBUSY; - } - if (!env->allow_ptr_leaks || !env->bpf_capable) { - verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); - return -EPERM; - } - if (!env->prog->jit_requested) { - verbose(env, "JIT is required to use arena\n"); - return -EOPNOTSUPP; - } - if (!bpf_jit_supports_arena()) { - verbose(env, "JIT doesn't support arena\n"); - return -EOPNOTSUPP; - } - env->prog->aux->arena = (void *)map; - if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { - verbose(env, "arena's user address must be set via map_extra or mmap()\n"); - return -EINVAL; - } - } - next_insn: insn++; i++; @@ -22839,6 +22856,73 @@ struct btf *bpf_get_btf_vmlinux(void) return btf_vmlinux; } +/* + * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In + * this case expect that every file descriptor in the array is either a map or + * a BTF. Everything else is considered to be trash. + */ +static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd) +{ + struct bpf_map *map; + struct btf *btf; + CLASS(fd, f)(fd); + int err; + + map = __bpf_map_get(f); + if (!IS_ERR(map)) { + err = __add_used_map(env, map); + if (err < 0) + return err; + return 0; + } + + btf = __btf_get_by_fd(f); + if (!IS_ERR(btf)) { + err = __add_used_btf(env, btf); + if (err < 0) + return err; + return 0; + } + + verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd); + return PTR_ERR(map); +} + +static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr) +{ + size_t size = sizeof(int); + int ret; + int fd; + u32 i; + + env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); + + /* + * The only difference between old (no fd_array_cnt is given) and new + * APIs is that in the latter case the fd_array is expected to be + * continuous and is scanned for map fds right away + */ + if (!attr->fd_array_cnt) + return 0; + + /* Check for integer overflow */ + if (attr->fd_array_cnt >= (U32_MAX / size)) { + verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt); + return -EINVAL; + } + + for (i = 0; i < attr->fd_array_cnt; i++) { + if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size)) + return -EFAULT; + + ret = add_fd_from_fd_array(env, fd); + if (ret) + return ret; + } + + return 0; +} + int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) { u64 start_time = ktime_get_ns(); @@ -22870,7 +22954,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; - env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); @@ -22893,6 +22976,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (ret) goto err_unlock; + ret = process_fd_array(env, attr, uattr); + if (ret) + goto skip_full_check; + mark_verifier_state_clean(env); if (IS_ERR(btf_vmlinux)) { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4162afc6b5d0..2acf9b336371 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1573,6 +1573,16 @@ union bpf_attr { * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. */ __s32 prog_token_fd; + /* The fd_array_cnt can be used to pass the length of the + * fd_array array. In this case all the [map] file descriptors + * passed in this array will be bound to the program, even if + * the maps are not referenced directly. The functionality is + * similar to the BPF_PROG_BIND_MAP syscall, but maps can be + * used by the verifier during the program load. If provided, + * then the fd_array[0,...,fd_array_cnt-1] is expected to be + * continuous. + */ + __u32 fd_array_cnt; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index becdfa701c75..359f73ead613 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -238,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); + const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt); void *finfo = NULL, *linfo = NULL; const char *func_info, *line_info; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; @@ -311,6 +311,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0); if (log_level) { attr.log_buf = ptr_to_u64(log_buf); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index a4a7b1ad1b63..435da95d2058 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -107,9 +107,12 @@ struct bpf_prog_load_opts { */ __u32 log_true_size; __u32 token_fd; + + /* if set, provides the length of fd_array */ + __u32 fd_array_cnt; size_t :0; }; -#define bpf_prog_load_opts__last_field token_fd +#define bpf_prog_load_opts__last_field fd_array_cnt LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c new file mode 100644 index 000000000000..a1d52e73fb16 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include <linux/btf.h> +#include <bpf/bpf.h> + +#include "../test_btf.h" + +static inline int new_map(void) +{ + const char *name = NULL; + __u32 max_entries = 1; + __u32 value_size = 8; + __u32 key_size = 4; + + return bpf_map_create(BPF_MAP_TYPE_ARRAY, name, + key_size, value_size, + max_entries, NULL); +} + +static int new_btf(void) +{ + struct btf_blob { + struct btf_header btf_hdr; + __u32 types[8]; + __u32 str; + } raw_btf = { + .btf_hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), + }, + .types = { + /* long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */ + /* unsigned long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + }, + }; + + return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL); +} + +#define Close(FD) do { \ + if ((FD) >= 0) { \ + close(FD); \ + FD = -1; \ + } \ +} while(0) + +static bool map_exists(__u32 id) +{ + int fd; + + fd = bpf_map_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static bool btf_exists(__u32 id) +{ + int fd; + + fd = bpf_btf_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct bpf_prog_info info; + int err; + + memset(&info, 0, len); + info.nr_map_ids = *nr_map_ids, + info.map_ids = ptr_to_u64(map_ids), + + err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) + return -1; + + *nr_map_ids = info.nr_map_ids; + + return 0; +} + +static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt) +{ + /* A trivial program which uses one map */ + struct bpf_insn insns[] = { + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + + opts.fd_array = fd_array; + opts.fd_array_cnt = fd_array_cnt; + + return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts); +} + +static int load_test_prog(const int *fd_array, int fd_array_cnt) +{ + int map_fd; + int ret; + + map_fd = new_map(); + if (!ASSERT_GE(map_fd, 0, "new_map")) + return map_fd; + + ret = __load_test_prog(map_fd, fd_array, fd_array_cnt); + close(map_fd); + return ret; +} + +static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids) +{ + int err; + + err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids); + if (!ASSERT_OK(err, "bpf_prog_get_map_ids")) + return false; + if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids")) + return false; + + return true; +} + +/* + * Load a program, which uses one map. No fd_array maps are present. + * On return only one map is expected to be bound to prog. + */ +static void check_fd_array_cnt__no_fd_array(void) +{ + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + prog_fd = load_test_prog(NULL, 0); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + return; + nr_map_ids = ARRAY_SIZE(map_ids); + check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids); + close(prog_fd); +} + +/* + * Load a program, which uses one map, and pass two extra, non-equal, maps in + * fd_array with fd_array_cnt=2. On return three maps are expected to be bound + * to the program. + */ +static void check_fd_array_cnt__fd_array_ok(void) +{ + int extra_fds[2] = { -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 2); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Load a program with a few extra maps duplicated in the fd_array. + * After the load maps should only be referenced once. + */ +static void check_fd_array_cnt__duplicated_maps(void) +{ + int extra_fds[4] = { -1, -1, -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = extra_fds[2] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = extra_fds[3] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 4); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Check that if maps which are referenced by a program are + * passed in fd_array, then they will be referenced only once + */ +static void check_fd_array_cnt__referenced_maps_in_fd_array(void) +{ + int extra_fds[1] = { -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids)) + goto cleanup; + + /* map should still exist when original file descriptor is closed */ + Close(extra_fds[0]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +static int get_btf_id_by_fd(int btf_fd, __u32 *id) +{ + struct bpf_btf_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len); + if (err) + return err; + if (id) + *id = info.id; + return 0; +} + +/* + * Check that fd_array operates properly for btfs. Namely, to check that + * passing a btf fd in fd_array increases its reference count, do the + * following: + * 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1 + * 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2 + * 3) Close the btf_fd, now refcnt=1 + * Wait and check that BTF stil exists. + */ +static void check_fd_array_cnt__referenced_btfs(void) +{ + int extra_fds[1] = { -1 }; + int prog_fd = -1; + __u32 btf_id; + int tries; + int err; + + extra_fds[0] = new_btf(); + if (!ASSERT_GE(extra_fds[0], 0, "new_btf")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + + /* btf should still exist when original file descriptor is closed */ + err = get_btf_id_by_fd(extra_fds[0], &btf_id); + if (!ASSERT_GE(err, 0, "get_btf_id_by_fd")) + goto cleanup; + + Close(extra_fds[0]); + + if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1")) + goto cleanup; + + if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist")) + goto cleanup; + + Close(prog_fd); + + /* The program is freed by a workqueue, so no reliable + * way to sync, so just wait a bit (max ~1 second). */ + for (tries = 100; tries >= 0; tries--) { + usleep(1000); + + if (!btf_exists(btf_id)) + break; + + if (tries) + continue; + + PRINT_FAIL("btf should have been freed"); + } + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Test that a program with trash in fd_array can't be loaded: + * only map and BTF file descriptors should be accepted. + */ +static void check_fd_array_cnt__fd_array_with_trash(void) +{ + int extra_fds[3] = { -1, -1, -1 }; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_btf(); + if (!ASSERT_GE(extra_fds[1], 0, "new_btf")) + goto cleanup; + + /* trash 1: not a file descriptor */ + extra_fds[2] = 0xbeef; + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF")) + goto cleanup; + + /* trash 2: not a map or btf */ + extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(extra_fds[2], 0, "socket")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL")) + goto cleanup; + + /* Validate that the prog is ok if trash is removed */ + Close(extra_fds[2]); + extra_fds[2] = new_btf(); + if (!ASSERT_GE(extra_fds[2], 0, "new_btf")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[2]); + Close(extra_fds[1]); + Close(extra_fds[0]); +} + +/* + * Test that a program with too big fd_array can't be loaded. + */ +static void check_fd_array_cnt__fd_array_too_big(void) +{ + int extra_fds[65]; + int prog_fd = -1; + int i; + + for (i = 0; i < 65; i++) { + extra_fds[i] = new_map(); + if (!ASSERT_GE(extra_fds[i], 0, "new_map")) + goto cleanup_fds; + } + + prog_fd = load_test_prog(extra_fds, 65); + ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG"); + +cleanup_fds: + while (i > 0) + Close(extra_fds[--i]); +} + +void test_fd_array_cnt(void) +{ + if (test__start_subtest("no-fd-array")) + check_fd_array_cnt__no_fd_array(); + + if (test__start_subtest("fd-array-ok")) + check_fd_array_cnt__fd_array_ok(); + + if (test__start_subtest("fd-array-dup-input")) + check_fd_array_cnt__duplicated_maps(); + + if (test__start_subtest("fd-array-ref-maps-in-array")) + check_fd_array_cnt__referenced_maps_in_fd_array(); + + if (test__start_subtest("fd-array-ref-btfs")) + check_fd_array_cnt__referenced_btfs(); + + if (test__start_subtest("fd-array-trash-input")) + check_fd_array_cnt__fd_array_with_trash(); + + if (test__start_subtest("fd-array-2big")) + check_fd_array_cnt__fd_array_too_big(); +} diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c index 0f4dfb770c32..b698cc62a371 100644 --- a/tools/testing/selftests/bpf/progs/syscall.c +++ b/tools/testing/selftests/bpf/progs/syscall.c @@ -76,9 +76,9 @@ static int btf_load(void) .magic = BTF_MAGIC, .version = BTF_VERSION, .hdr_len = sizeof(struct btf_header), - .type_len = sizeof(__u32) * 8, - .str_off = sizeof(__u32) * 8, - .str_len = sizeof(__u32), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), }, .types = { /* long */ |