summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-03-06 20:36:39 -0800
committerJakub Kicinski <kuba@kernel.org>2023-03-06 20:36:39 -0800
commit36e5e391a25af28dc1f4586f95d577b38ff4ed72 (patch)
tree3b58175e4a148f54338d22c926d7dd2a6283d317 /tools/lib/bpf/libbpf.c
parent5ca26d6039a6b42341f7f5cc8d10d30ca1561a7b (diff)
parent8f4c92f0024ff2a30f002e85f87e531d49dc023c (diff)
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2023-03-06 We've added 85 non-merge commits during the last 13 day(s) which contain a total of 131 files changed, 7102 insertions(+), 1792 deletions(-). The main changes are: 1) Add skb and XDP typed dynptrs which allow BPF programs for more ergonomic and less brittle iteration through data and variable-sized accesses, from Joanne Koong. 2) Bigger batch of BPF verifier improvements to prepare for upcoming BPF open-coded iterators allowing for less restrictive looping capabilities, from Andrii Nakryiko. 3) Rework RCU enforcement in the verifier, add kptr_rcu and enforce BPF programs to NULL-check before passing such pointers into kfunc, from Alexei Starovoitov. 4) Add support for kptrs in percpu hashmaps, percpu LRU hashmaps and in local storage maps, from Kumar Kartikeya Dwivedi. 5) Add BPF verifier support for ST instructions in convert_ctx_access() which will help new -mcpu=v4 clang flag to start emitting them, from Eduard Zingerman. 6) Make uprobe attachment Android APK aware by supporting attachment to functions inside ELF objects contained in APKs via function names, from Daniel Müller. 7) Add a new flag BPF_F_TIMER_ABS flag for bpf_timer_start() helper to start the timer with absolute expiration value instead of relative one, from Tero Kristo. 8) Add a new kfunc bpf_cgroup_from_id() to look up cgroups via id, from Tejun Heo. 9) Extend libbpf to support users manually attaching kprobes/uprobes in the legacy/perf/link mode, from Menglong Dong. 10) Implement workarounds in the mips BPF JIT for DADDI/R4000, from Jiaxun Yang. 11) Enable mixing bpf2bpf and tailcalls for the loongarch BPF JIT, from Hengqi Chen. 12) Extend BPF instruction set doc with describing the encoding of BPF instructions in terms of how bytes are stored under big/little endian, from Jose E. Marchesi. 13) Follow-up to enable kfunc support for riscv BPF JIT, from Pu Lehui. 14) Fix bpf_xdp_query() backwards compatibility on old kernels, from Yonghong Song. 15) Fix BPF selftest cross compilation with CLANG_CROSS_FLAGS, from Florent Revest. 16) Improve bpf_cpumask_ma to only allocate one bpf_mem_cache, from Hou Tao. 17) Fix BPF verifier's check_subprogs to not unnecessarily mark a subprogram with has_tail_call, from Ilya Leoshkevich. 18) Fix arm syscall regs spec in libbpf's bpf_tracing.h, from Puranjay Mohan. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (85 commits) selftests/bpf: Add test for legacy/perf kprobe/uprobe attach mode selftests/bpf: Split test_attach_probe into multi subtests libbpf: Add support to set kprobe/uprobe attach mode tools/resolve_btfids: Add /libsubcmd to .gitignore bpf: add support for fixed-size memory pointer returns for kfuncs bpf: generalize dynptr_get_spi to be usable for iters bpf: mark PTR_TO_MEM as non-null register type bpf: move kfunc_call_arg_meta higher in the file bpf: ensure that r0 is marked scratched after any function call bpf: fix visit_insn()'s detection of BPF_FUNC_timer_set_callback helper bpf: clean up visit_insn()'s instruction processing selftests/bpf: adjust log_fixup's buffer size for proper truncation bpf: honor env->test_state_freq flag in is_state_visited() selftests/bpf: enhance align selftest's expected log matching bpf: improve regsafe() checks for PTR_TO_{MEM,BUF,TP_BUFFER} bpf: improve stack slot state printing selftests/bpf: Disassembler tests for verifier.c:convert_ctx_access() selftests/bpf: test if pointer type is tracked for BPF_ST_MEM bpf: allow ctx writes using BPF_ST_MEM instruction bpf: Use separate RCU callbacks for freeing selem ... ==================== Link: https://lore.kernel.org/r/20230307004346.27578-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c197
1 files changed, 167 insertions, 30 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 05c4db355f28..a557718401e4 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -53,6 +53,7 @@
#include "libbpf_internal.h"
#include "hashmap.h"
#include "bpf_gen_internal.h"
+#include "zip.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
@@ -798,7 +799,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
progs = obj->programs;
nr_progs = obj->nr_programs;
nr_syms = symbols->d_size / sizeof(Elf64_Sym);
- sec_off = 0;
for (i = 0; i < nr_syms; i++) {
sym = elf_sym_by_idx(obj, i);
@@ -2615,7 +2615,7 @@ static int bpf_object__init_maps(struct bpf_object *obj,
strict = !OPTS_GET(opts, relaxed_maps, false);
pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
- err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
+ err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
err = err ?: bpf_object__init_struct_ops_maps(obj);
@@ -9724,6 +9724,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
char errmsg[STRERR_BUFSIZE];
struct bpf_link_perf *link;
int prog_fd, link_fd = -1, err;
+ bool force_ioctl_attach;
if (!OPTS_VALID(opts, bpf_perf_event_opts))
return libbpf_err_ptr(-EINVAL);
@@ -9747,7 +9748,8 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
link->link.dealloc = &bpf_link_perf_dealloc;
link->perf_event_fd = pfd;
- if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
+ force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
+ if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
.perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
@@ -10106,6 +10108,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const struct bpf_kprobe_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+ enum probe_attach_mode attach_mode;
char errmsg[STRERR_BUFSIZE];
char *legacy_probe = NULL;
struct bpf_link *link;
@@ -10116,11 +10119,32 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_kprobe_opts))
return libbpf_err_ptr(-EINVAL);
+ attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
retprobe = OPTS_GET(opts, retprobe, false);
offset = OPTS_GET(opts, offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
legacy = determine_kprobe_perf_type() < 0;
+ switch (attach_mode) {
+ case PROBE_ATTACH_MODE_LEGACY:
+ legacy = true;
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_PERF:
+ if (legacy)
+ return libbpf_err_ptr(-ENOTSUP);
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_LINK:
+ if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+ return libbpf_err_ptr(-ENOTSUP);
+ break;
+ case PROBE_ATTACH_MODE_DEFAULT:
+ break;
+ default:
+ return libbpf_err_ptr(-EINVAL);
+ }
+
if (!legacy) {
pfd = perf_event_open_probe(false /* uprobe */, retprobe,
func_name, offset,
@@ -10531,32 +10555,19 @@ static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
return NULL;
}
-/* Find offset of function name in object specified by path. "name" matches
- * symbol name or name@@LIB for library functions.
+/* Find offset of function name in the provided ELF object. "binary_path" is
+ * the path to the ELF binary represented by "elf", and only used for error
+ * reporting matters. "name" matches symbol name or name@@LIB for library
+ * functions.
*/
-static long elf_find_func_offset(const char *binary_path, const char *name)
+static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
{
- int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
bool is_shared_lib, is_name_qualified;
- char errmsg[STRERR_BUFSIZE];
long ret = -ENOENT;
size_t name_len;
GElf_Ehdr ehdr;
- Elf *elf;
- fd = open(binary_path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = -errno;
- pr_warn("failed to open %s: %s\n", binary_path,
- libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
- return ret;
- }
- elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
- if (!elf) {
- pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
- close(fd);
- return -LIBBPF_ERRNO__FORMAT;
- }
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
ret = -LIBBPF_ERRNO__FORMAT;
@@ -10569,7 +10580,7 @@ static long elf_find_func_offset(const char *binary_path, const char *name)
/* Does name specify "@@LIB"? */
is_name_qualified = strstr(name, "@@") != NULL;
- /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
+ /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
* a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
* linked binary may not have SHT_DYMSYM, so absence of a section should not be
* reported as a warning/error.
@@ -10682,11 +10693,101 @@ static long elf_find_func_offset(const char *binary_path, const char *name)
}
}
out:
+ return ret;
+}
+
+/* Find offset of function name in ELF object specified by path. "name" matches
+ * symbol name or name@@LIB for library functions.
+ */
+static long elf_find_func_offset_from_file(const char *binary_path, const char *name)
+{
+ char errmsg[STRERR_BUFSIZE];
+ long ret = -ENOENT;
+ Elf *elf;
+ int fd;
+
+ fd = open(binary_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to open %s: %s\n", binary_path,
+ libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ return ret;
+ }
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
+ close(fd);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ ret = elf_find_func_offset(elf, binary_path, name);
elf_end(elf);
close(fd);
return ret;
}
+/* Find offset of function name in archive specified by path. Currently
+ * supported are .zip files that do not compress their contents, as used on
+ * Android in the form of APKs, for example. "file_name" is the name of the ELF
+ * file inside the archive. "func_name" matches symbol name or name@@LIB for
+ * library functions.
+ *
+ * An overview of the APK format specifically provided here:
+ * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
+ */
+static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
+ const char *func_name)
+{
+ struct zip_archive *archive;
+ struct zip_entry entry;
+ long ret;
+ Elf *elf;
+
+ archive = zip_archive_open(archive_path);
+ if (IS_ERR(archive)) {
+ ret = PTR_ERR(archive);
+ pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
+ return ret;
+ }
+
+ ret = zip_archive_find_entry(archive, file_name, &entry);
+ if (ret) {
+ pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
+ archive_path, ret);
+ goto out;
+ }
+ pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
+ (unsigned long)entry.data_offset);
+
+ if (entry.compression) {
+ pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
+ archive_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+
+ elf = elf_memory((void *)entry.data, entry.data_length);
+ if (!elf) {
+ pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
+ elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__LIBELF;
+ goto out;
+ }
+
+ ret = elf_find_func_offset(elf, file_name, func_name);
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
+ func_name, file_name, archive_path, entry.data_offset, ret,
+ ret + entry.data_offset);
+ ret += entry.data_offset;
+ }
+ elf_end(elf);
+
+out:
+ zip_archive_close(archive);
+ return ret;
+}
+
static const char *arch_specific_lib_paths(void)
{
/*
@@ -10772,9 +10873,11 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
const struct bpf_uprobe_opts *opts)
{
- DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+ const char *archive_path = NULL, *archive_sep = NULL;
char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
- char full_binary_path[PATH_MAX];
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+ enum probe_attach_mode attach_mode;
+ char full_path[PATH_MAX];
struct bpf_link *link;
size_t ref_ctr_off;
int pfd, err;
@@ -10784,6 +10887,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
if (!OPTS_VALID(opts, bpf_uprobe_opts))
return libbpf_err_ptr(-EINVAL);
+ attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
retprobe = OPTS_GET(opts, retprobe, false);
ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
@@ -10791,27 +10895,60 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
if (!binary_path)
return libbpf_err_ptr(-EINVAL);
- if (!strchr(binary_path, '/')) {
- err = resolve_full_path(binary_path, full_binary_path,
- sizeof(full_binary_path));
+ /* Check if "binary_path" refers to an archive. */
+ archive_sep = strstr(binary_path, "!/");
+ if (archive_sep) {
+ full_path[0] = '\0';
+ libbpf_strlcpy(full_path, binary_path,
+ min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
+ archive_path = full_path;
+ binary_path = archive_sep + 2;
+ } else if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, full_path, sizeof(full_path));
if (err) {
pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
prog->name, binary_path, err);
return libbpf_err_ptr(err);
}
- binary_path = full_binary_path;
+ binary_path = full_path;
}
func_name = OPTS_GET(opts, func_name, NULL);
if (func_name) {
long sym_off;
- sym_off = elf_find_func_offset(binary_path, func_name);
+ if (archive_path) {
+ sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
+ func_name);
+ binary_path = archive_path;
+ } else {
+ sym_off = elf_find_func_offset_from_file(binary_path, func_name);
+ }
if (sym_off < 0)
return libbpf_err_ptr(sym_off);
func_offset += sym_off;
}
legacy = determine_uprobe_perf_type() < 0;
+ switch (attach_mode) {
+ case PROBE_ATTACH_MODE_LEGACY:
+ legacy = true;
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_PERF:
+ if (legacy)
+ return libbpf_err_ptr(-ENOTSUP);
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_LINK:
+ if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+ return libbpf_err_ptr(-ENOTSUP);
+ break;
+ case PROBE_ATTACH_MODE_DEFAULT:
+ break;
+ default:
+ return libbpf_err_ptr(-EINVAL);
+ }
+
if (!legacy) {
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);