summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
authorJackie Liu <liuyun01@kylinos.cn>2023-07-05 17:12:09 +0800
committerAndrii Nakryiko <andrii@kernel.org>2023-07-06 16:05:08 -0700
commit56baeeba0a355ecf4d0c71bfbb1d5ba09501dbfc (patch)
tree547d48b0aad61f19e5ebc5d8b83f4aa81c9cf114 /tools/lib/bpf/libbpf.c
parent8a3fe76f8718bad4056d665bee1a0c43d128af72 (diff)
libbpf: Use available_filter_functions_addrs with multi-kprobes
Now that kernel provides a new available_filter_functions_addrs file which can help us avoid the need to cross-validate available_filter_functions and kallsyms, we can improve efficiency of multi-attach kprobes. For example, on my device, the sample program [1] of start time: $ sudo ./funccount "tcp_*" before after 1.2s 1.0s [1]: https://github.com/JackieLiu1/ketones/tree/master/src/funccount Signed-off-by: Jackie Liu <liuyun01@kylinos.cn> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20230705091209.3803873-2-liu.yun@linux.dev
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c62
1 files changed, 61 insertions, 1 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 924bf52f06e4..fd4f1875df65 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10234,6 +10234,12 @@ static const char *tracefs_available_filter_functions(void)
: TRACEFS"/available_filter_functions";
}
+static const char *tracefs_available_filter_functions_addrs(void)
+{
+ return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
+ : TRACEFS"/available_filter_functions_addrs";
+}
+
static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *kfunc_name, size_t offset)
{
@@ -10650,6 +10656,57 @@ cleanup:
return err;
}
+static bool has_available_filter_functions_addrs(void)
+{
+ return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
+}
+
+static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
+{
+ const char *available_path = tracefs_available_filter_functions_addrs();
+ char sym_name[500];
+ FILE *f;
+ int ret, err = 0;
+ unsigned long long sym_addr;
+
+ f = fopen(available_path, "re");
+ if (!f) {
+ err = -errno;
+ pr_warn("failed to open %s: %d\n", available_path, err);
+ return err;
+ }
+
+ while (true) {
+ ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
+ if (ret == EOF && feof(f))
+ break;
+
+ if (ret != 2) {
+ pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
+ ret);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ if (!glob_match(sym_name, res->pattern))
+ continue;
+
+ err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
+ sizeof(*res->addrs), res->cnt + 1);
+ if (err)
+ goto cleanup;
+
+ res->addrs[res->cnt++] = (unsigned long)sym_addr;
+ }
+
+ if (res->cnt == 0)
+ err = -ENOENT;
+
+cleanup:
+ fclose(f);
+ return err;
+}
+
struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const char *pattern,
@@ -10686,7 +10743,10 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
if (pattern) {
- err = libbpf_available_kallsyms_parse(&res);
+ if (has_available_filter_functions_addrs())
+ err = libbpf_available_kprobes_parse(&res);
+ else
+ err = libbpf_available_kallsyms_parse(&res);
if (err)
goto error;
addrs = res.addrs;