summaryrefslogtreecommitdiff
path: root/tools/bpf
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-02-09 18:17:54 -0800
committerJakub Kicinski <kuba@kernel.org>2022-02-09 18:40:56 -0800
commit1127170d457eb9bcc839ef7f2064634f92fe83e2 (patch)
tree228996f3ae0b734cadc7118a4d10efc1635acf23 /tools/bpf
parent5cad527d5ffa9a1c4731bb9c97d2ee93f8960d50 (diff)
parente5313968c41ba890a91344773a0474d0246d20a3 (diff)
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2022-02-09 We've added 126 non-merge commits during the last 16 day(s) which contain a total of 201 files changed, 4049 insertions(+), 2215 deletions(-). The main changes are: 1) Add custom BPF allocator for JITs that pack multiple programs into a huge page to reduce iTLB pressure, from Song Liu. 2) Add __user tagging support in vmlinux BTF and utilize it from BPF verifier when generating loads, from Yonghong Song. 3) Add per-socket fast path check guarding from cgroup/BPF overhead when used by only some sockets, from Pavel Begunkov. 4) Continued libbpf deprecation work of APIs/features and removal of their usage from samples, selftests, libbpf & bpftool, from Andrii Nakryiko and various others. 5) Improve BPF instruction set documentation by adding byte swap instructions and cleaning up load/store section, from Christoph Hellwig. 6) Switch BPF preload infra to light skeleton and remove libbpf dependency from it, from Alexei Starovoitov. 7) Fix architecture-agnostic macros in libbpf for accessing syscall arguments from BPF progs for non-x86 architectures, from Ilya Leoshkevich. 8) Rework port members in struct bpf_sk_lookup and struct bpf_sock to be of 16-bit field with anonymous zero padding, from Jakub Sitnicki. 9) Add new bpf_copy_from_user_task() helper to read memory from a different task than current. Add ability to create sleepable BPF iterator progs, from Kenny Yu. 10) Implement XSK batching for ice's zero-copy driver used by AF_XDP and utilize TX batching API from XSK buffer pool, from Maciej Fijalkowski. 11) Generate temporary netns names for BPF selftests to avoid naming collisions, from Hangbin Liu. 12) Implement bpf_core_types_are_compat() with limited recursion for in-kernel usage, from Matteo Croce. 13) Simplify pahole version detection and finally enable CONFIG_DEBUG_INFO_DWARF5 to be selected with CONFIG_DEBUG_INFO_BTF, from Nathan Chancellor. 14) Misc minor fixes to libbpf and selftests from various folks. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (126 commits) selftests/bpf: Cover 4-byte load from remote_port in bpf_sk_lookup bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide libbpf: Fix compilation warning due to mismatched printf format selftests/bpf: Test BPF_KPROBE_SYSCALL macro libbpf: Add BPF_KPROBE_SYSCALL macro libbpf: Fix accessing the first syscall argument on s390 libbpf: Fix accessing the first syscall argument on arm64 libbpf: Allow overriding PT_REGS_PARM1{_CORE}_SYSCALL selftests/bpf: Skip test_bpf_syscall_macro's syscall_arg1 on arm64 and s390 libbpf: Fix accessing syscall arguments on riscv libbpf: Fix riscv register names libbpf: Fix accessing syscall arguments on powerpc selftests/bpf: Use PT_REGS_SYSCALL_REGS in bpf_syscall_macro libbpf: Add PT_REGS_SYSCALL_REGS macro selftests/bpf: Fix an endianness issue in bpf_syscall_macro test bpf: Fix bpf_prog_pack build HPAGE_PMD_SIZE bpf: Fix leftover header->pages in sparc and powerpc code. libbpf: Fix signedness bug in btf_dump_array_data() selftests/bpf: Do not export subtest as standalone test bpf, x86_64: Fail gracefully on bpf_jit_binary_pack_finalize failures ... ==================== Link: https://lore.kernel.org/r/20220209210050.8425-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/bpf')
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/feature.c29
-rw-r--r--tools/bpf/bpftool/gen.c9
-rw-r--r--tools/bpf/bpftool/main.c5
-rw-r--r--tools/bpf/bpftool/prog.c13
5 files changed, 31 insertions, 27 deletions
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 111dff809c7b..606743c6db41 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -310,7 +310,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
{
const char *prog_name = prog_info->name;
const struct btf_type *func_type;
- const struct bpf_func_info finfo;
+ const struct bpf_func_info finfo = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *prog_btf = NULL;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index e999159fa28d..9c894b1447de 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -487,17 +487,12 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
size_t maxlen;
bool res;
- if (ifindex)
- /* Only test offload-able program types */
- switch (prog_type) {
- case BPF_PROG_TYPE_SCHED_CLS:
- case BPF_PROG_TYPE_XDP:
- break;
- default:
- return;
- }
+ if (ifindex) {
+ p_info("BPF offload feature probing is not supported");
+ return;
+ }
- res = bpf_probe_prog_type(prog_type, ifindex);
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for unprivileged users
* check that we did not fail because of insufficient permissions
@@ -535,7 +530,12 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
size_t maxlen;
bool res;
- res = bpf_probe_map_type(map_type, ifindex);
+ if (ifindex) {
+ p_info("BPF offload feature probing is not supported");
+ return;
+ }
+
+ res = libbpf_probe_bpf_map_type(map_type, NULL);
/* Probe result depends on the success of map creation, no additional
* check required for unprivileged users
@@ -567,7 +567,12 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
bool res = false;
if (supported_type) {
- res = bpf_probe_helper(id, prog_type, ifindex);
+ if (ifindex) {
+ p_info("BPF offload feature probing is not supported");
+ return;
+ }
+
+ res = libbpf_probe_bpf_helper(prog_type, id, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for
* unprivileged users check that we did not fail because of
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 43e3f8700ecc..eacfc6a2060d 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -378,13 +378,16 @@ static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
int prog_fd = skel->progs.%2$s.prog_fd; \n\
", obj_name, bpf_program__name(prog));
- switch (bpf_program__get_type(prog)) {
+ switch (bpf_program__type(prog)) {
case BPF_PROG_TYPE_RAW_TRACEPOINT:
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
- printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
+ printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
break;
case BPF_PROG_TYPE_TRACING:
- printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
+ if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
+ printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
+ else
+ printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
break;
default:
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 9d01fa9de033..490f7bd54e4c 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -478,14 +478,11 @@ int main(int argc, char **argv)
}
if (!legacy_libbpf) {
- enum libbpf_strict_mode mode;
-
/* Allow legacy map definitions for skeleton generation.
* It will still be rejected if users use LIBBPF_STRICT_ALL
* mode for loading generated skeleton.
*/
- mode = (__LIBBPF_STRICT_LAST - 1) & ~LIBBPF_STRICT_MAP_DEFINITIONS;
- ret = libbpf_set_strict_mode(mode);
+ ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
if (ret)
p_err("failed to enable libbpf strict mode: %d", ret);
}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index cf935c63e6f5..92a6f679ef7d 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1272,12 +1272,12 @@ static int do_run(int argc, char **argv)
{
char *data_fname_in = NULL, *data_fname_out = NULL;
char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
- struct bpf_prog_test_run_attr test_attr = {0};
const unsigned int default_size = SZ_32K;
void *data_in = NULL, *data_out = NULL;
void *ctx_in = NULL, *ctx_out = NULL;
unsigned int repeat = 1;
int fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, test_attr);
if (!REQ_ARGS(4))
return -1;
@@ -1395,14 +1395,13 @@ static int do_run(int argc, char **argv)
goto free_ctx_in;
}
- test_attr.prog_fd = fd;
test_attr.repeat = repeat;
test_attr.data_in = data_in;
test_attr.data_out = data_out;
test_attr.ctx_in = ctx_in;
test_attr.ctx_out = ctx_out;
- err = bpf_prog_test_run_xattr(&test_attr);
+ err = bpf_prog_test_run_opts(fd, &test_attr);
if (err) {
p_err("failed to run program: %s", strerror(errno));
goto free_ctx_out;
@@ -2283,10 +2282,10 @@ static int do_profile(int argc, char **argv)
profile_obj->rodata->num_metric = num_metric;
/* adjust map sizes */
- bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
- bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
- bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
- bpf_map__resize(profile_obj->maps.counts, 1);
+ bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.counts, 1);
/* change target name */
profile_tgt_name = profile_target_name(profile_tgt_fd);