summaryrefslogtreecommitdiff
path: root/tools/lib/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf')
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile29
-rw-r--r--tools/lib/bpf/bpf.c77
-rw-r--r--tools/lib/bpf/bpf.h43
-rw-r--r--tools/lib/bpf/bpf_core_read.h6
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h3
-rw-r--r--tools/lib/bpf/bpf_helpers.h28
-rw-r--r--tools/lib/bpf/bpf_tracing.h27
-rw-r--r--tools/lib/bpf/btf.c724
-rw-r--r--tools/lib/bpf/btf.h19
-rw-r--r--tools/lib/bpf/btf_dump.c79
-rw-r--r--tools/lib/bpf/btf_relocate.c6
-rw-r--r--tools/lib/bpf/elf.c8
-rw-r--r--tools/lib/bpf/features.c16
-rw-r--r--tools/lib/bpf/gen_loader.c238
-rw-r--r--tools/lib/bpf/hashmap.h20
-rw-r--r--tools/lib/bpf/libbpf.c1477
-rw-r--r--tools/lib/bpf/libbpf.h151
-rw-r--r--tools/lib/bpf/libbpf.map25
-rw-r--r--tools/lib/bpf/libbpf_errno.c75
-rw-r--r--tools/lib/bpf/libbpf_internal.h74
-rw-r--r--tools/lib/bpf/libbpf_legacy.h4
-rw-r--r--tools/lib/bpf/libbpf_probes.c4
-rw-r--r--tools/lib/bpf/libbpf_utils.c256
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/linker.c351
-rw-r--r--tools/lib/bpf/netlink.c20
-rw-r--r--tools/lib/bpf/nlattr.c15
-rw-r--r--tools/lib/bpf/relo_core.c27
-rw-r--r--tools/lib/bpf/ringbuf.c33
-rw-r--r--tools/lib/bpf/skel_internal.h81
-rw-r--r--tools/lib/bpf/str_error.c33
-rw-r--r--tools/lib/bpf/str_error.h9
-rw-r--r--tools/lib/bpf/usdt.bpf.h78
-rw-r--r--tools/lib/bpf/usdt.c117
-rw-r--r--tools/lib/bpf/zip.c2
37 files changed, 3048 insertions, 1112 deletions
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 0da84cb9e66d..f02725b123b3 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -5,3 +5,4 @@ TAGS
tags
cscope.*
/bpf_helper_defs.h
+fixdep
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index e2cd558ca0b4..c80204bb72a2 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_utils.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 2cf892774346..168140f8e646 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -53,15 +53,9 @@ include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
INCLUDES = -I$(or $(OUTPUT),.) \
- -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
+ -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \
+ -I$(srctree)/tools/arch/$(SRCARCH)/include
export prefix libdir src obj
@@ -95,12 +89,6 @@ override CFLAGS += $(CLANG_CROSS_FLAGS)
# flags specific for shared library
SHLIB_FLAGS := -DSHARED -fPIC
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Disable command line variables (CFLAGS) override from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
@@ -108,6 +96,8 @@ MAKEOVERRIDES=
all:
+OUTPUT ?= ./
+OUTPUT := $(abspath $(OUTPUT))/
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
@@ -141,7 +131,10 @@ all: fixdep
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN_SHARED): force $(BPF_GENERATED)
+$(SHARED_OBJDIR) $(STATIC_OBJDIR):
+ $(Q)mkdir -p $@
+
+$(BPF_IN_SHARED): force $(BPF_GENERATED) | $(SHARED_OBJDIR)
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -151,9 +144,11 @@ $(BPF_IN_SHARED): force $(BPF_GENERATED)
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
+ $(SILENT_MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= OUTPUT=$(SHARED_OBJDIR) $(SHARED_OBJDIR)fixdep
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
-$(BPF_IN_STATIC): force $(BPF_GENERATED)
+$(BPF_IN_STATIC): force $(BPF_GENERATED) | $(STATIC_OBJDIR)
+ $(SILENT_MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= OUTPUT=$(STATIC_OBJDIR) $(STATIC_OBJDIR)fixdep
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
@@ -263,7 +258,7 @@ install_pkgconfig: $(PC_FILE)
install: install_lib install_pkgconfig install_headers
-clean:
+clean: fixdep-clean
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 2a4c71501a17..b66f5fbfbbb2 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -154,7 +154,7 @@ int bump_rlimit_memlock(void)
memlock_bumped = true;
- /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
+ /* zero memlock_rlim disables auto-bumping RLIMIT_MEMLOCK */
if (memlock_rlim == 0)
return 0;
@@ -172,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
union bpf_attr attr;
int fd;
@@ -203,6 +203,8 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+ attr.excl_prog_hash = ptr_to_u64(OPTS_GET(opts, excl_prog_hash, NULL));
+ attr.excl_prog_hash_size = OPTS_GET(opts, excl_prog_hash_size, 0);
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
@@ -238,7 +240,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, keyring_id);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -311,6 +313,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
+ attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0);
if (log_level) {
attr.log_buf = ptr_to_u64(log_buf);
@@ -776,6 +779,7 @@ int bpf_link_create(int prog_fd, int target_fd,
return libbpf_err(-EINVAL);
break;
case BPF_TRACE_UPROBE_MULTI:
+ case BPF_TRACE_UPROBE_SESSION:
attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
@@ -835,6 +839,50 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, netkit))
return libbpf_err(-EINVAL);
break;
+ case BPF_CGROUP_INET_INGRESS:
+ case BPF_CGROUP_INET_EGRESS:
+ case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET_SOCK_RELEASE:
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ case BPF_CGROUP_UNIX_CONNECT:
+ case BPF_CGROUP_INET4_GETPEERNAME:
+ case BPF_CGROUP_INET6_GETPEERNAME:
+ case BPF_CGROUP_UNIX_GETPEERNAME:
+ case BPF_CGROUP_INET4_GETSOCKNAME:
+ case BPF_CGROUP_INET6_GETSOCKNAME:
+ case BPF_CGROUP_UNIX_GETSOCKNAME:
+ case BPF_CGROUP_UDP4_SENDMSG:
+ case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UNIX_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
+ case BPF_CGROUP_UNIX_RECVMSG:
+ case BPF_CGROUP_SOCK_OPS:
+ case BPF_CGROUP_DEVICE:
+ case BPF_CGROUP_SYSCTL:
+ case BPF_CGROUP_GETSOCKOPT:
+ case BPF_CGROUP_SETSOCKOPT:
+ case BPF_LSM_CGROUP:
+ relative_fd = OPTS_GET(opts, cgroup.relative_fd, 0);
+ relative_id = OPTS_GET(opts, cgroup.relative_id, 0);
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+ if (relative_id) {
+ attr.link_create.cgroup.relative_id = relative_id;
+ attr.link_create.flags |= BPF_F_ID;
+ } else {
+ attr.link_create.cgroup.relative_fd = relative_fd;
+ }
+ attr.link_create.cgroup.expected_revision =
+ OPTS_GET(opts, cgroup.expected_revision, 0);
+ if (!OPTS_ZEROED(opts, cgroup))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -1095,7 +1143,7 @@ int bpf_map_get_fd_by_id(__u32 id)
int bpf_btf_get_fd_by_id_opts(__u32 id,
const struct bpf_get_fd_by_id_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
+ const size_t attr_sz = offsetofend(union bpf_attr, fd_by_id_token_fd);
union bpf_attr attr;
int fd;
@@ -1105,6 +1153,7 @@ int bpf_btf_get_fd_by_id_opts(__u32 id,
memset(&attr, 0, attr_sz);
attr.btf_id = id;
attr.open_flags = OPTS_GET(opts, open_flags, 0);
+ attr.fd_by_id_token_fd = OPTS_GET(opts, token_fd, 0);
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
@@ -1328,3 +1377,23 @@ int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
+
+int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len,
+ struct bpf_prog_stream_read_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_stream_read);
+ union bpf_attr attr;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_prog_stream_read_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_stream_read.stream_buf = ptr_to_u64(buf);
+ attr.prog_stream_read.stream_buf_len = buf_len;
+ attr.prog_stream_read.stream_id = stream_id;
+ attr.prog_stream_read.prog_fd = prog_fd;
+
+ err = sys_bpf(BPF_PROG_STREAM_READ_BY_FD, &attr, attr_sz);
+ return libbpf_err_errno(err);
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 972e17ec0c09..e983a3e40d61 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -54,9 +54,12 @@ struct bpf_map_create_opts {
__s32 value_type_btf_obj_fd;
__u32 token_fd;
+
+ const void *excl_prog_hash;
+ __u32 excl_prog_hash_size;
size_t :0;
};
-#define bpf_map_create_opts__last_field token_fd
+#define bpf_map_create_opts__last_field excl_prog_hash_size
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
const char *map_name,
@@ -100,16 +103,19 @@ struct bpf_prog_load_opts {
__u32 log_level;
__u32 log_size;
char *log_buf;
- /* output: actual total log contents size (including termintaing zero).
+ /* output: actual total log contents size (including terminating zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
__u32 token_fd;
+
+ /* if set, provides the length of fd_array */
+ __u32 fd_array_cnt;
size_t :0;
};
-#define bpf_prog_load_opts__last_field token_fd
+#define bpf_prog_load_opts__last_field fd_array_cnt
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
@@ -129,7 +135,7 @@ struct bpf_btf_load_opts {
char *log_buf;
__u32 log_level;
__u32 log_size;
- /* output: actual total log contents size (including termintaing zero).
+ /* output: actual total log contents size (including terminating zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.
@@ -435,6 +441,11 @@ struct bpf_link_create_opts {
__u32 relative_id;
__u64 expected_revision;
} netkit;
+ struct {
+ __u32 relative_fd;
+ __u32 relative_id;
+ __u64 expected_revision;
+ } cgroup;
};
size_t :0;
};
@@ -484,9 +495,10 @@ LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
struct bpf_get_fd_by_id_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 open_flags; /* permissions requested for the operation on fd */
+ __u32 token_fd;
size_t :0;
};
-#define bpf_get_fd_by_id_opts__last_field open_flags
+#define bpf_get_fd_by_id_opts__last_field token_fd
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
@@ -700,6 +712,27 @@ struct bpf_token_create_opts {
LIBBPF_API int bpf_token_create(int bpffs_fd,
struct bpf_token_create_opts *opts);
+struct bpf_prog_stream_read_opts {
+ size_t sz;
+ size_t :0;
+};
+#define bpf_prog_stream_read_opts__last_field sz
+/**
+ * @brief **bpf_prog_stream_read** reads data from the BPF stream of a given BPF
+ * program.
+ *
+ * @param prog_fd FD for the BPF program whose BPF stream is to be read.
+ * @param stream_id ID of the BPF stream to be read.
+ * @param buf Buffer to read data into from the BPF stream.
+ * @param buf_len Maximum number of bytes to read from the BPF stream.
+ * @param opts optional options, can be NULL
+ *
+ * @return The number of bytes read, on success; negative error code, otherwise
+ * (errno is also set to the error code)
+ */
+LIBBPF_API int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len,
+ struct bpf_prog_stream_read_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index c0e13cdf9660..b997c68bd945 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
+#if defined(__clang__) && (__clang_major__ >= 19)
+#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
+#elif defined(__GNUC__) && (__GNUC__ >= 14)
+#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
+#else
#define ___type(...) typeof(___arrow(__VA_ARGS__))
+#endif
#define ___read(read_fn, dst, src_type, src, accessor) \
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index fdf44403ff36..49af4260b8e6 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -4,6 +4,7 @@
#define __BPF_GEN_INTERNAL_H
#include "bpf.h"
+#include "libbpf_internal.h"
struct ksym_relo_desc {
const char *name;
@@ -34,6 +35,7 @@ struct bpf_gen {
void *data_cur;
void *insn_start;
void *insn_cur;
+ bool swapped_endian;
ssize_t cleanup_label;
__u32 nr_progs;
__u32 nr_maps;
@@ -49,6 +51,7 @@ struct bpf_gen {
__u32 nr_ksyms;
int fd_array;
int nr_fd_array;
+ int hash_insn_offset[SHA256_DWORD_SIZE];
};
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 305c62817dd3..d4e4e388e625 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -15,6 +15,14 @@
#define __array(name, val) typeof(val) *name[]
#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
+#ifndef likely
+#define likely(x) (__builtin_expect(!!(x), 1))
+#endif
+
+#ifndef unlikely
+#define unlikely(x) (__builtin_expect(!!(x), 0))
+#endif
+
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
@@ -185,6 +193,7 @@ enum libbpf_tristate {
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
#define __kptr __attribute__((btf_type_tag("kptr")))
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
+#define __uptr __attribute__((btf_type_tag("uptr")))
#if defined (__clang__)
#define bpf_ksym_exists(sym) ({ \
@@ -206,6 +215,7 @@ enum libbpf_tristate {
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
+#define __arg_untrusted __attribute((btf_decl_tag("arg:untrusted")))
#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
#ifndef ___bpf_concat
@@ -305,6 +315,22 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
+extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
+ __u32 len__sz, void *aux__prog) __weak __ksym;
+
+#define bpf_stream_printk(stream_id, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \
+})
+
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
* Otherwise use __bpf_vprintk
*/
@@ -341,7 +367,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
* I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier.
*
- * For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
+ * For iterating integers, the difference between bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient.
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 9314fa95f04e..dbe32a5d02cd 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -163,7 +163,7 @@
struct pt_regs___s390 {
unsigned long orig_gpr2;
-};
+} __attribute__((preserve_access_index));
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
@@ -179,7 +179,7 @@ struct pt_regs___s390 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG gprs[7]
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -222,7 +222,7 @@ struct pt_regs___s390 {
struct pt_regs___arm64 {
unsigned long orig_x0;
-};
+} __attribute__((preserve_access_index));
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
@@ -241,7 +241,7 @@ struct pt_regs___arm64 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -311,7 +311,7 @@ struct pt_regs___arm64 {
#define __PT_RET_REG regs[31]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG gpr[3]
-#define __PT_SP_REG sp
+#define __PT_SP_REG gpr[1]
#define __PT_IP_REG nip
#elif defined(bpf_target_sparc)
@@ -351,6 +351,10 @@ struct pt_regs___arm64 {
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
+struct pt_regs___riscv {
+ unsigned long orig_a0;
+} __attribute__((preserve_access_index));
+
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
@@ -362,12 +366,15 @@ struct pt_regs___arm64 {
#define __PT_PARM7_REG a6
#define __PT_PARM8_REG a7
-#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM1_SYSCALL_REG orig_a0
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG)
#define __PT_RET_REG ra
#define __PT_FP_REG s0
@@ -473,7 +480,7 @@ struct pt_regs;
#endif
/*
* Similarly, syscall-specific conventions might differ between function call
- * conventions within each architecutre. All supported architectures pass
+ * conventions within each architecture. All supported architectures pass
* either 6 or 7 syscall arguments in registers.
*
* See syscall(2) manpage for succinct table with information on each arch.
@@ -515,7 +522,7 @@ struct pt_regs;
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#elif defined(bpf_target_sparc)
+#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
@@ -651,7 +658,7 @@ struct pt_regs;
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
* similar kinds of BPF programs, that accept input arguments as a single
* pointer to untyped u64 array, where each u64 can actually be a typed
- * pointer or integer of different size. Instead of requring user to write
+ * pointer or integer of different size. Instead of requiring user to write
* manual casts and work with array elements by index, BPF_PROG macro
* allows user to declare a list of named and typed input arguments in the
* same syntax as for normal C function. All the casting is hidden and
@@ -801,7 +808,7 @@ struct pt_regs;
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and
- * semantics of accessing kprobe input paremeters.
+ * semantics of accessing kprobe input parameters.
*
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 32c00db3b91b..84a4b0abc8be 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -12,6 +12,7 @@
#include <sys/utsname.h>
#include <sys/param.h>
#include <sys/stat.h>
+#include <sys/mman.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/btf.h>
@@ -119,6 +120,9 @@ struct btf {
/* whether base_btf should be freed in btf_free for this instance */
bool owns_base;
+ /* whether raw_data is a (read-only) mmap */
+ bool raw_data_is_mmap;
+
/* BTF object FD, if loaded into kernel */
int fd;
@@ -282,7 +286,7 @@ static int btf_parse_str_sec(struct btf *btf)
return -EINVAL;
}
if (!btf->base_btf && start[0]) {
- pr_debug("Invalid BTF string section\n");
+ pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
return -EINVAL;
}
return 0;
@@ -950,6 +954,17 @@ static bool btf_is_modifiable(const struct btf *btf)
return (void *)btf->hdr != btf->raw_data;
}
+static void btf_free_raw_data(struct btf *btf)
+{
+ if (btf->raw_data_is_mmap) {
+ munmap(btf->raw_data, btf->raw_size);
+ btf->raw_data_is_mmap = false;
+ } else {
+ free(btf->raw_data);
+ }
+ btf->raw_data = NULL;
+}
+
void btf__free(struct btf *btf)
{
if (IS_ERR_OR_NULL(btf))
@@ -969,7 +984,7 @@ void btf__free(struct btf *btf)
free(btf->types_data);
strset__free(btf->strs_set);
}
- free(btf->raw_data);
+ btf_free_raw_data(btf);
free(btf->raw_data_swapped);
free(btf->type_offs);
if (btf->owns_base)
@@ -995,7 +1010,8 @@ static struct btf *btf_new_empty(struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
+ btf->swapped_endian = base_btf->swapped_endian;
}
/* +1 for empty string at offset 0 */
@@ -1028,7 +1044,7 @@ struct btf *btf__new_empty_split(struct btf *base_btf)
return libbpf_ptr(btf_new_empty(base_btf));
}
-static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
+static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap)
{
struct btf *btf;
int err;
@@ -1045,15 +1061,21 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
}
- btf->raw_data = malloc(size);
- if (!btf->raw_data) {
- err = -ENOMEM;
- goto done;
+ if (is_mmap) {
+ btf->raw_data = (void *)data;
+ btf->raw_data_is_mmap = true;
+ } else {
+ btf->raw_data = malloc(size);
+ if (!btf->raw_data) {
+ err = -ENOMEM;
+ goto done;
+ }
+ memcpy(btf->raw_data, data, size);
}
- memcpy(btf->raw_data, data, size);
+
btf->raw_size = size;
btf->hdr = btf->raw_data;
@@ -1081,12 +1103,12 @@ done:
struct btf *btf__new(const void *data, __u32 size)
{
- return libbpf_ptr(btf_new(data, size, NULL));
+ return libbpf_ptr(btf_new(data, size, NULL, false));
}
struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
{
- return libbpf_ptr(btf_new(data, size, base_btf));
+ return libbpf_ptr(btf_new(data, size, base_btf, false));
}
struct btf_elf_secs {
@@ -1146,6 +1168,12 @@ static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs
else
continue;
+ if (sh.sh_type != SHT_PROGBITS) {
+ pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n",
+ sh.sh_type, idx, name, path);
+ goto err;
+ }
+
data = elf_getdata(scn, 0);
if (!data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
@@ -1178,12 +1206,13 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
- pr_warn("failed to open %s: %s\n", path, strerror(errno));
+ pr_warn("failed to open %s: %s\n", path, errstr(err));
return ERR_PTR(err);
}
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
+ err = -LIBBPF_ERRNO__FORMAT;
pr_warn("failed to open %s as ELF file\n", path);
goto done;
}
@@ -1200,7 +1229,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
if (secs.btf_base_data) {
dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
- NULL);
+ NULL, false);
if (IS_ERR(dist_base_btf)) {
err = PTR_ERR(dist_base_btf);
dist_base_btf = NULL;
@@ -1209,7 +1238,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
}
btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
- dist_base_btf ?: base_btf);
+ dist_base_btf ?: base_btf, false);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
goto done;
@@ -1326,7 +1355,7 @@ static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
}
/* finally parse BTF data */
- btf = btf_new(data, sz, base_btf);
+ btf = btf_new(data, sz, base_btf, false);
err_out:
free(data);
@@ -1345,6 +1374,37 @@ struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
return libbpf_ptr(btf_parse_raw(path, base_btf));
}
+static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf)
+{
+ struct stat st;
+ void *data;
+ struct btf *btf;
+ int fd, err;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return ERR_PTR(-errno);
+
+ if (fstat(fd, &st) < 0) {
+ err = -errno;
+ close(fd);
+ return ERR_PTR(err);
+ }
+
+ data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ err = -errno;
+ close(fd);
+
+ if (data == MAP_FAILED)
+ return ERR_PTR(err);
+
+ btf = btf_new(data, st.st_size, base_btf, true);
+ if (IS_ERR(btf))
+ munmap(data, st.st_size);
+
+ return btf;
+}
+
static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
{
struct btf *btf;
@@ -1444,7 +1504,7 @@ retry_load:
goto retry_load;
err = -errno;
- pr_warn("BTF loading error: %d\n", err);
+ pr_warn("BTF loading error: %s\n", errstr(err));
/* don't print out contents of custom log_buf */
if (!log_buf && buf[0])
pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
@@ -1609,19 +1669,25 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
goto exit_free;
}
- btf = btf_new(ptr, btf_info.btf_size, base_btf);
+ btf = btf_new(ptr, btf_info.btf_size, base_btf, false);
exit_free:
free(ptr);
return btf;
}
-struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
+struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd)
{
struct btf *btf;
int btf_fd;
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
+
+ if (token_fd) {
+ opts.open_flags |= BPF_F_TOKEN_FD;
+ opts.token_fd = token_fd;
+ }
- btf_fd = bpf_btf_get_fd_by_id(id);
+ btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts);
if (btf_fd < 0)
return libbpf_err_ptr(-errno);
@@ -1631,6 +1697,11 @@ struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
return libbpf_ptr(btf);
}
+struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
+{
+ return btf_load_from_kernel(id, base_btf, 0);
+}
+
struct btf *btf__load_from_kernel_by_id(__u32 id)
{
return btf__load_from_kernel_by_id_split(id, NULL);
@@ -1638,10 +1709,8 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
static void btf_invalidate_raw_data(struct btf *btf)
{
- if (btf->raw_data) {
- free(btf->raw_data);
- btf->raw_data = NULL;
- }
+ if (btf->raw_data)
+ btf_free_raw_data(btf);
if (btf->raw_data_swapped) {
free(btf->raw_data_swapped);
btf->raw_data_swapped = NULL;
@@ -2087,7 +2156,7 @@ static int validate_type_id(int id)
}
/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
-static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
+static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag)
{
struct btf_type *t;
int sz, name_off = 0;
@@ -2110,7 +2179,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref
}
t->name_off = name_off;
- t->info = btf_type_info(kind, 0, 0);
+ t->info = btf_type_info(kind, 0, kflag);
t->type = ref_type_id;
return btf_commit_type(btf, sz);
@@ -2125,7 +2194,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref
*/
int btf__add_ptr(struct btf *btf, int ref_type_id)
{
- return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0);
}
/*
@@ -2503,7 +2572,7 @@ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
struct btf_type *t;
int id;
- id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
+ id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0);
if (id <= 0)
return id;
t = btf_type_by_id(btf, id);
@@ -2533,7 +2602,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
if (!name || !name[0])
return libbpf_err(-EINVAL);
- return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0);
}
/*
@@ -2545,7 +2614,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
*/
int btf__add_volatile(struct btf *btf, int ref_type_id)
{
- return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0);
}
/*
@@ -2557,7 +2626,7 @@ int btf__add_volatile(struct btf *btf, int ref_type_id)
*/
int btf__add_const(struct btf *btf, int ref_type_id)
{
- return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0);
}
/*
@@ -2569,7 +2638,7 @@ int btf__add_const(struct btf *btf, int ref_type_id)
*/
int btf__add_restrict(struct btf *btf, int ref_type_id)
{
- return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0);
}
/*
@@ -2585,7 +2654,24 @@ int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
if (!value || !value[0])
return libbpf_err(-EINVAL);
- return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
+ return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0);
+}
+
+/*
+ * Append new BTF_KIND_TYPE_TAG type with:
+ * - *value*, non-empty/non-NULL tag value;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Set info->kflag to 1, indicating this tag is an __attribute__
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id)
+{
+ if (!value || !value[0])
+ return libbpf_err(-EINVAL);
+
+ return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1);
}
/*
@@ -2607,7 +2693,7 @@ int btf__add_func(struct btf *btf, const char *name,
linkage != BTF_FUNC_EXTERN)
return libbpf_err(-EINVAL);
- id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
+ id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0);
if (id > 0) {
struct btf_type *t = btf_type_by_id(btf, id);
@@ -2842,18 +2928,8 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
return 0;
}
-/*
- * Append new BTF_KIND_DECL_TAG type with:
- * - *value* - non-empty/non-NULL string;
- * - *ref_type_id* - referenced type ID, it might not exist yet;
- * - *component_idx* - -1 for tagging reference type, otherwise struct/union
- * member or function argument index;
- * Returns:
- * - >0, type ID of newly added BTF type;
- * - <0, on error.
- */
-int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
- int component_idx)
+static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx, int kflag)
{
struct btf_type *t;
int sz, value_off;
@@ -2877,14 +2953,47 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
return value_off;
t->name_off = value_off;
- t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
+ t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag);
t->type = ref_type_id;
btf_decl_tag(t)->component_idx = component_idx;
return btf_commit_type(btf, sz);
}
-struct btf_ext_sec_setup_param {
+/*
+ * Append new BTF_KIND_DECL_TAG type with:
+ * - *value* - non-empty/non-NULL string;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * - *component_idx* - -1 for tagging reference type, otherwise struct/union
+ * member or function argument index;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx)
+{
+ return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0);
+}
+
+/*
+ * Append new BTF_KIND_DECL_TAG type with:
+ * - *value* - non-empty/non-NULL string;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * - *component_idx* - -1 for tagging reference type, otherwise struct/union
+ * member or function argument index;
+ * Set info->kflag to 1, indicating this tag is an __attribute__
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx)
+{
+ return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1);
+}
+
+struct btf_ext_sec_info_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -2892,14 +3001,20 @@ struct btf_ext_sec_setup_param {
const char *desc;
};
-static int btf_ext_setup_info(struct btf_ext *btf_ext,
- struct btf_ext_sec_setup_param *ext_sec)
+/*
+ * Parse a single info subsection of the BTF.ext info data:
+ * - validate subsection structure and elements
+ * - save info subsection start and sizing details in struct btf_ext
+ * - endian-independent operation, for calling before byte-swapping
+ */
+static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
+ struct btf_ext_sec_info_param *ext_sec,
+ bool is_native)
{
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
size_t sec_cnt = 0;
- /* The start of the info sec (including the __u32 record_size). */
void *info;
if (ext_sec->len == 0)
@@ -2911,6 +3026,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
+ /* The start of the info sec (including the __u32 record_size). */
info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
info_left = ext_sec->len;
@@ -2926,9 +3042,13 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- /* The record size needs to meet the minimum standard */
- record_size = *(__u32 *)info;
+ /* The record size needs to meet either the minimum standard or, when
+ * handling non-native endianness data, the exact standard so as
+ * to allow safe byte-swapping.
+ */
+ record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
if (record_size < ext_sec->min_rec_size ||
+ (!is_native && record_size != ext_sec->min_rec_size) ||
record_size & 0x03) {
pr_debug("%s section in .BTF.ext has invalid record size %u\n",
ext_sec->desc, record_size);
@@ -2940,7 +3060,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */
if (!info_left) {
- pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
+ pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
return -EINVAL;
}
@@ -2955,7 +3075,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- num_records = sinfo->num_info;
+ num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
if (num_records == 0) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -2983,64 +3103,157 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return 0;
}
-static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
+/* Parse all info secs in the BTF.ext info data */
+static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
{
- struct btf_ext_sec_setup_param param = {
+ struct btf_ext_sec_info_param func_info = {
.off = btf_ext->hdr->func_info_off,
.len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
-
- return btf_ext_setup_info(btf_ext, &param);
-}
-
-static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
-{
- struct btf_ext_sec_setup_param param = {
+ struct btf_ext_sec_info_param line_info = {
.off = btf_ext->hdr->line_info_off,
.len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
-
- return btf_ext_setup_info(btf_ext, &param);
-}
-
-static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
-{
- struct btf_ext_sec_setup_param param = {
- .off = btf_ext->hdr->core_relo_off,
- .len = btf_ext->hdr->core_relo_len,
+ struct btf_ext_sec_info_param core_relo = {
.min_rec_size = sizeof(struct bpf_core_relo),
.ext_info = &btf_ext->core_relo_info,
.desc = "core_relo",
};
+ int err;
- return btf_ext_setup_info(btf_ext, &param);
+ err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
+ if (err)
+ return err;
+
+ err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
+ if (err)
+ return err;
+
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ return 0; /* skip core relos parsing */
+
+ core_relo.off = btf_ext->hdr->core_relo_off;
+ core_relo.len = btf_ext->hdr->core_relo_len;
+ err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
+ if (err)
+ return err;
+
+ return 0;
}
-static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
+/* Swap byte-order of BTF.ext header with any endianness */
+static void btf_ext_bswap_hdr(struct btf_ext_header *h)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
+ bool is_native = h->magic == BTF_MAGIC;
+ __u32 hdr_len;
+
+ hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
+
+ h->magic = bswap_16(h->magic);
+ h->hdr_len = bswap_32(h->hdr_len);
+ h->func_info_off = bswap_32(h->func_info_off);
+ h->func_info_len = bswap_32(h->func_info_len);
+ h->line_info_off = bswap_32(h->line_info_off);
+ h->line_info_len = bswap_32(h->line_info_len);
+
+ if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ return;
+
+ h->core_relo_off = bswap_32(h->core_relo_off);
+ h->core_relo_len = bswap_32(h->core_relo_len);
+}
+
+/* Swap byte-order of generic info subsection */
+static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
+ info_rec_bswap_fn bswap_fn)
+{
+ struct btf_ext_info_sec *sec;
+ __u32 info_left, rec_size, *rs;
+
+ if (len == 0)
+ return;
+
+ rs = info; /* info record size */
+ rec_size = is_native ? *rs : bswap_32(*rs);
+ *rs = bswap_32(*rs);
+
+ sec = info + sizeof(__u32); /* info sec #1 */
+ info_left = len - sizeof(__u32);
+ while (info_left) {
+ unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
+ __u32 i, num_recs;
+ void *p;
+
+ num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
+ sec->sec_name_off = bswap_32(sec->sec_name_off);
+ sec->num_info = bswap_32(sec->num_info);
+ p = sec->data; /* info rec #1 */
+ for (i = 0; i < num_recs; i++, p += rec_size)
+ bswap_fn(p);
+ sec = p;
+ info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
+ }
+}
+
+/*
+ * Swap byte-order of all info data in a BTF.ext section
+ * - requires BTF.ext hdr in native endianness
+ */
+static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
+{
+ const bool is_native = btf_ext->swapped_endian;
+ const struct btf_ext_header *h = data;
+ void *info;
- if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
- data_size < hdr->hdr_len) {
- pr_debug("BTF.ext header not found");
+ /* Swap func_info subsection byte-order */
+ info = data + h->hdr_len + h->func_info_off;
+ btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
+ (info_rec_bswap_fn)bpf_func_info_bswap);
+
+ /* Swap line_info subsection byte-order */
+ info = data + h->hdr_len + h->line_info_off;
+ btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
+ (info_rec_bswap_fn)bpf_line_info_bswap);
+
+ /* Swap core_relo subsection byte-order (if present) */
+ if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ return;
+
+ info = data + h->hdr_len + h->core_relo_off;
+ btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
+ (info_rec_bswap_fn)bpf_core_relo_bswap);
+}
+
+/* Parse hdr data and info sections: check and convert to native endianness */
+static int btf_ext_parse(struct btf_ext *btf_ext)
+{
+ __u32 hdr_len, data_size = btf_ext->data_size;
+ struct btf_ext_header *hdr = btf_ext->hdr;
+ bool swapped_endian = false;
+ int err;
+
+ if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
+ pr_debug("BTF.ext header too short\n");
return -EINVAL;
}
+ hdr_len = hdr->hdr_len;
if (hdr->magic == bswap_16(BTF_MAGIC)) {
- pr_warn("BTF.ext in non-native endianness is not supported\n");
- return -ENOTSUP;
+ swapped_endian = true;
+ hdr_len = bswap_32(hdr_len);
} else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
- if (hdr->version != BTF_VERSION) {
+ /* Ensure known version of structs, current BTF_VERSION == 1 */
+ if (hdr->version != 1) {
pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
@@ -3050,11 +3263,39 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
return -ENOTSUP;
}
- if (data_size == hdr->hdr_len) {
+ if (data_size < hdr_len) {
+ pr_debug("BTF.ext header not found\n");
+ return -EINVAL;
+ } else if (data_size == hdr_len) {
pr_debug("BTF.ext has no data\n");
return -EINVAL;
}
+ /* Verify mandatory hdr info details present */
+ if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
+ pr_warn("BTF.ext header missing func_info, line_info\n");
+ return -EINVAL;
+ }
+
+ /* Keep hdr native byte-order in memory for introspection */
+ if (swapped_endian)
+ btf_ext_bswap_hdr(btf_ext->hdr);
+
+ /* Validate info subsections and cache key metadata */
+ err = btf_ext_parse_info(btf_ext, !swapped_endian);
+ if (err)
+ return err;
+
+ /* Keep infos native byte-order in memory for introspection */
+ if (swapped_endian)
+ btf_ext_bswap_info(btf_ext, btf_ext->data);
+
+ /*
+ * Set btf_ext->swapped_endian only after all header and info data has
+ * been swapped, helping bswap functions determine if their data are
+ * in native byte-order when called.
+ */
+ btf_ext->swapped_endian = swapped_endian;
return 0;
}
@@ -3066,6 +3307,7 @@ void btf_ext__free(struct btf_ext *btf_ext)
free(btf_ext->line_info.sec_idxs);
free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
+ free(btf_ext->data_swapped);
free(btf_ext);
}
@@ -3086,29 +3328,7 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
- err = btf_ext_parse_hdr(btf_ext->data, size);
- if (err)
- goto done;
-
- if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
- err = -EINVAL;
- goto done;
- }
-
- err = btf_ext_setup_func_info(btf_ext);
- if (err)
- goto done;
-
- err = btf_ext_setup_line_info(btf_ext);
- if (err)
- goto done;
-
- if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
- goto done; /* skip core relos parsing */
-
- err = btf_ext_setup_core_relos(btf_ext);
- if (err)
- goto done;
+ err = btf_ext_parse(btf_ext);
done:
if (err) {
@@ -3119,15 +3339,66 @@ done:
return btf_ext;
}
+static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
+{
+ struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
+ const __u32 data_sz = btf_ext->data_size;
+ void *data;
+
+ /* Return native data (always present) or swapped data if present */
+ if (!swap_endian)
+ return btf_ext->data;
+ else if (btf_ext->data_swapped)
+ return btf_ext->data_swapped;
+
+ /* Recreate missing swapped data, then cache and return */
+ data = calloc(1, data_sz);
+ if (!data)
+ return NULL;
+ memcpy(data, btf_ext->data, data_sz);
+
+ btf_ext_bswap_info(btf_ext, data);
+ btf_ext_bswap_hdr(data);
+ btf_ext->data_swapped = data;
+ return data;
+}
+
const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
{
+ void *data;
+
+ data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
+ if (!data)
+ return errno = ENOMEM, NULL;
+
*size = btf_ext->data_size;
- return btf_ext->data;
+ return data;
}
__attribute__((alias("btf_ext__raw_data")))
const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
+{
+ if (is_host_big_endian())
+ return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
+ else
+ return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
+}
+
+int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
+{
+ if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
+ return libbpf_err(-EINVAL);
+
+ btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
+
+ if (!btf_ext->swapped_endian) {
+ free(btf_ext->data_swapped);
+ btf_ext->data_swapped = NULL;
+ }
+ return 0;
+}
struct btf_dedup;
@@ -3290,7 +3561,7 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
d = btf_dedup_new(btf, opts);
if (IS_ERR(d)) {
- pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
+ pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
return libbpf_err(-EINVAL);
}
@@ -3301,42 +3572,42 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
err = btf_dedup_prep(d);
if (err) {
- pr_debug("btf_dedup_prep failed:%d\n", err);
+ pr_debug("btf_dedup_prep failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_strings(d);
if (err < 0) {
- pr_debug("btf_dedup_strings failed:%d\n", err);
+ pr_debug("btf_dedup_strings failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_prim_types(d);
if (err < 0) {
- pr_debug("btf_dedup_prim_types failed:%d\n", err);
+ pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_struct_types(d);
if (err < 0) {
- pr_debug("btf_dedup_struct_types failed:%d\n", err);
+ pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_resolve_fwds(d);
if (err < 0) {
- pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
+ pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_ref_types(d);
if (err < 0) {
- pr_debug("btf_dedup_ref_types failed:%d\n", err);
+ pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_compact_types(d);
if (err < 0) {
- pr_debug("btf_dedup_compact_types failed:%d\n", err);
+ pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err));
goto done;
}
err = btf_dedup_remap_types(d);
if (err < 0) {
- pr_debug("btf_dedup_remap_types failed:%d\n", err);
+ pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err));
goto done;
}
@@ -3384,7 +3655,7 @@ struct btf_dedup {
struct strset *strs_set;
};
-static long hash_combine(long h, long value)
+static unsigned long hash_combine(unsigned long h, unsigned long value)
{
return h * 31 + value;
}
@@ -3630,6 +3901,20 @@ err_out:
return err;
}
+/*
+ * Calculate type signature hash of TYPEDEF, ignoring referenced type IDs,
+ * as referenced type IDs equivalence is established separately during type
+ * graph equivalence check algorithm.
+ */
+static long btf_hash_typedef(struct btf_type *t)
+{
+ long h;
+
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info);
+ return h;
+}
+
static long btf_hash_common(struct btf_type *t)
{
long h;
@@ -3647,6 +3932,13 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
t1->size == t2->size;
}
+/* Check structural compatibility of two TYPEDEF. */
+static bool btf_equal_typedef(struct btf_type *t1, struct btf_type *t2)
+{
+ return t1->name_off == t2->name_off &&
+ t1->info == t2->info;
+}
+
/* Calculate type signature hash of INT or TAG. */
static long btf_hash_int_decl_tag(struct btf_type *t)
{
@@ -4134,46 +4426,109 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)
return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
}
-/* Check if given two types are identical ARRAY definitions */
-static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
+static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth)
{
struct btf_type *t1, *t2;
+ int k1, k2;
+recur:
+ if (depth <= 0)
+ return false;
t1 = btf_type_by_id(d->btf, id1);
t2 = btf_type_by_id(d->btf, id2);
- if (!btf_is_array(t1) || !btf_is_array(t2))
+
+ k1 = btf_kind(t1);
+ k2 = btf_kind(t2);
+ if (k1 != k2)
return false;
- return btf_equal_array(t1, t2);
-}
+ switch (k1) {
+ case BTF_KIND_UNKN: /* VOID */
+ return true;
+ case BTF_KIND_INT:
+ return btf_equal_int_tag(t1, t2);
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ return btf_compat_enum(t1, t2);
+ case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
+ return btf_equal_common(t1, t2);
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_TYPE_TAG:
+ if (t1->info != t2->info || t1->name_off != t2->name_off)
+ return false;
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_ARRAY: {
+ struct btf_array *a1, *a2;
-/* Check if given two types are identical STRUCT/UNION definitions */
-static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
-{
- const struct btf_member *m1, *m2;
- struct btf_type *t1, *t2;
- int n, i;
+ if (!btf_compat_array(t1, t2))
+ return false;
- t1 = btf_type_by_id(d->btf, id1);
- t2 = btf_type_by_id(d->btf, id2);
+ a1 = btf_array(t1);
+ a2 = btf_array(t1);
- if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
- return false;
+ if (a1->index_type != a2->index_type &&
+ !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1))
+ return false;
- if (!btf_shallow_equal_struct(t1, t2))
- return false;
+ if (a1->type != a2->type &&
+ !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1))
+ return false;
- m1 = btf_members(t1);
- m2 = btf_members(t2);
- for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
- if (m1->type != m2->type &&
- !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
- !btf_dedup_identical_structs(d, m1->type, m2->type))
+ return true;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m1, *m2;
+ int i, n;
+
+ if (!btf_shallow_equal_struct(t1, t2))
return false;
+
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
+ if (m1->type == m2->type)
+ continue;
+ if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1))
+ return false;
+ }
+ return true;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p1, *p2;
+ int i, n;
+
+ if (!btf_compat_fnproto(t1, t2))
+ return false;
+
+ if (t1->type != t2->type &&
+ !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1))
+ return false;
+
+ p1 = btf_params(t1);
+ p2 = btf_params(t2);
+ for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) {
+ if (p1->type == p2->type)
+ continue;
+ if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1))
+ return false;
+ }
+ return true;
+ }
+ default:
+ return false;
}
- return true;
}
+
/*
* Check equivalence of BTF type graph formed by candidate struct/union (we'll
* call it "candidate graph" in this description for brevity) to a type graph
@@ -4191,7 +4546,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* and canonical graphs are not compatible structurally, whole graphs are
* incompatible. If types are structurally equivalent (i.e., all information
* except referenced type IDs is exactly the same), a mapping from `canon_id` to
- * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
+ * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
* If a type references other types, then those referenced types are checked
* for equivalence recursively.
*
@@ -4229,7 +4584,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* consists of portions of the graph that come from multiple compilation units.
* This is due to the fact that types within single compilation unit are always
* deduplicated and FWDs are already resolved, if referenced struct/union
- * definiton is available. So, if we had unresolved FWD and found corresponding
+ * definition is available. So, if we had unresolved FWD and found corresponding
* STRUCT/UNION, they will be from different compilation units. This
* consequently means that when we "link" FWD to corresponding STRUCT/UNION,
* type graph will likely have at least two different BTF types that describe
@@ -4292,19 +4647,13 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
* different fields within the *same* struct. This breaks type
* equivalence check, which makes an assumption that candidate
* types sub-graph has a consistent and deduped-by-compiler
- * types within a single CU. So work around that by explicitly
- * allowing identical array types here.
+ * types within a single CU. And similar situation can happen
+ * with struct/union sometimes, and event with pointers.
+ * So accommodate cases like this doing a structural
+ * comparison recursively, but avoiding being stuck in endless
+ * loops by limiting the depth up to which we check.
*/
- if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
- return 1;
- /* It turns out that similar situation can happen with
- * struct/union sometimes, sigh... Handle the case where
- * structs/unions are exactly the same, down to the referenced
- * type IDs. Anything more complicated (e.g., if referenced
- * types are different, but equivalent) is *way more*
- * complicated and requires a many-to-many equivalence mapping.
- */
- if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
+ if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16))
return 1;
return 0;
}
@@ -4516,13 +4865,30 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
}
}
+static inline long btf_hash_by_kind(struct btf_type *t, __u16 kind)
+{
+ if (kind == BTF_KIND_TYPEDEF)
+ return btf_hash_typedef(t);
+ else
+ return btf_hash_struct(t);
+}
+
+static inline bool btf_equal_by_kind(struct btf_type *t1, struct btf_type *t2, __u16 kind)
+{
+ if (kind == BTF_KIND_TYPEDEF)
+ return btf_equal_typedef(t1, t2);
+ else
+ return btf_shallow_equal_struct(t1, t2);
+}
+
/*
- * Deduplicate struct/union types.
+ * Deduplicate struct/union and typedef types.
*
* For each struct/union type its type signature hash is calculated, taking
* into account type's name, size, number, order and names of fields, but
* ignoring type ID's referenced from fields, because they might not be deduped
- * completely until after reference types deduplication phase. This type hash
+ * completely until after reference types deduplication phase. For each typedef
+ * type, the hash is computed based on the type’s name and size. This type hash
* is used to iterate over all potential canonical types, sharing same hash.
* For each canonical candidate we check whether type graphs that they form
* (through referenced types in fields and so on) are equivalent using algorithm
@@ -4554,18 +4920,20 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
t = btf_type_by_id(d->btf, type_id);
kind = btf_kind(t);
- if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ if (kind != BTF_KIND_STRUCT &&
+ kind != BTF_KIND_UNION &&
+ kind != BTF_KIND_TYPEDEF)
return 0;
- h = btf_hash_struct(t);
+ h = btf_hash_by_kind(t, kind);
for_each_dedup_cand(d, hash_entry, h) {
__u32 cand_id = hash_entry->value;
int eq;
/*
* Even though btf_dedup_is_equiv() checks for
- * btf_shallow_equal_struct() internally when checking two
- * structs (unions) for equivalence, we need to guard here
+ * btf_equal_by_kind() internally when checking two
+ * structs (unions) or typedefs for equivalence, we need to guard here
* from picking matching FWD type as a dedup candidate.
* This can happen due to hash collision. In such case just
* relying on btf_dedup_is_equiv() would lead to potentially
@@ -4573,7 +4941,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
cand_type = btf_type_by_id(d->btf, cand_id);
- if (!btf_shallow_equal_struct(t, cand_type))
+ if (!btf_equal_by_kind(t, cand_type, kind))
continue;
btf_dedup_clear_hypot_map(d);
@@ -4611,18 +4979,18 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
/*
* Deduplicate reference type.
*
- * Once all primitive and struct/union types got deduplicated, we can easily
+ * Once all primitive, struct/union and typedef types got deduplicated, we can easily
* deduplicate all other (reference) BTF types. This is done in two steps:
*
* 1. Resolve all referenced type IDs into their canonical type IDs. This
- * resolution can be done either immediately for primitive or struct/union types
- * (because they were deduped in previous two phases) or recursively for
+ * resolution can be done either immediately for primitive, struct/union, and typedef
+ * types (because they were deduped in previous two phases) or recursively for
* reference types. Recursion will always terminate at either primitive or
- * struct/union type, at which point we can "unwind" chain of reference types
- * one by one. There is no danger of encountering cycles because in C type
- * system the only way to form type cycle is through struct/union, so any chain
- * of reference types, even those taking part in a type cycle, will inevitably
- * reach struct/union at some point.
+ * struct/union and typedef types, at which point we can "unwind" chain of reference
+ * types one by one. There is no danger of encountering cycles in C, as the only way to
+ * form a type cycle is through struct or union types. Go can form such cycles through
+ * typedef. Thus, any chain of reference types, even those taking part in a type cycle,
+ * will inevitably reach a struct/union or typedef type at some point.
*
* 2. Once all referenced type IDs are resolved into canonical ones, BTF type
* becomes "stable", in the sense that no further deduplication will cause
@@ -4654,7 +5022,6 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
- case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_TYPE_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
@@ -5052,10 +5419,14 @@ struct btf *btf__load_vmlinux_btf(void)
pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
sysfs_btf_path);
} else {
- btf = btf__parse(sysfs_btf_path, NULL);
+ btf = btf_parse_raw_mmap(sysfs_btf_path, NULL);
+ if (IS_ERR(btf))
+ btf = btf__parse(sysfs_btf_path, NULL);
+
if (!btf) {
err = -errno;
- pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
+ pr_warn("failed to read kernel BTF from '%s': %s\n",
+ sysfs_btf_path, errstr(err));
return libbpf_err_ptr(err);
}
pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
@@ -5072,7 +5443,7 @@ struct btf *btf__load_vmlinux_btf(void)
btf = btf__parse(path, NULL);
err = libbpf_get_error(btf);
- pr_debug("loading kernel BTF '%s': %d\n", path, err);
+ pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err));
if (err)
continue;
@@ -5394,6 +5765,9 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
new_base = btf__new_empty();
if (!new_base)
return libbpf_err(-ENOMEM);
+
+ btf__set_endianness(new_base, btf__endianness(src_btf));
+
dist.id_map = calloc(n, sizeof(*dist.id_map));
if (!dist.id_map) {
err = -ENOMEM;
@@ -5483,7 +5857,7 @@ void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
btf->base_btf = (struct btf *)base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
}
int btf__relocate(struct btf *btf, const struct btf *base_btf)
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b68d216837a9..cc01494d6210 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -94,6 +94,7 @@ LIBBPF_API struct btf *btf__new_empty(void);
* @brief **btf__new_empty_split()** creates an unpopulated BTF object from an
* ELF BTF section except with a base BTF on top of which split BTF should be
* based
+ * @param base_btf base BTF object
* @return new BTF object instance which has to be eventually freed with
* **btf__free()**
*
@@ -115,6 +116,10 @@ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
* When that split BTF is loaded against a (possibly changed) base, this
* distilled base BTF will help update references to that (possibly changed)
* base BTF.
+ * @param src_btf source split BTF object
+ * @param new_base_btf pointer to where the new base BTF object pointer will be stored
+ * @param new_split_btf pointer to where the new split BTF object pointer will be stored
+ * @return 0 on success; negative error code, otherwise
*
* Both the new split and its associated new base BTF must be freed by
* the caller.
@@ -167,6 +172,9 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
+LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext);
+LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext,
+ enum btf_endianness endian);
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
@@ -224,6 +232,7 @@ LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
+LIBBPF_API int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id);
/* func and func_proto construction APIs */
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
@@ -240,6 +249,8 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
/* tag construction API */
LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
int component_idx);
+LIBBPF_API int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx);
struct btf_dedup_opts {
size_t sz;
@@ -258,6 +269,9 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
* to base BTF kinds, and verify those references are compatible with
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
* *base_btf* and type ids and strings are adjusted to accommodate this.
+ * @param btf split BTF object to relocate
+ * @param base_btf base BTF object
+ * @return 0 on success; negative error code, otherwise
*
* If successful, 0 is returned and **btf** now has **base_btf** as its
* base.
@@ -286,7 +300,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
struct btf_dump_emit_type_decl_opts {
- /* size of this struct, for forward/backward compatiblity */
+ /* size of this struct, for forward/backward compatibility */
size_t sz;
/* optional field name for type declaration, e.g.:
* - struct my_struct <FNAME>
@@ -320,9 +334,10 @@ struct btf_dump_type_data_opts {
bool compact; /* no newlines/indentation */
bool skip_names; /* skip member/type names */
bool emit_zeroes; /* show 0-valued fields */
+ bool emit_strings; /* print char arrays as strings */
size_t :0;
};
-#define btf_dump_type_data_opts__last_field emit_zeroes
+#define btf_dump_type_data_opts__last_field emit_strings
LIBBPF_API int
btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 5dbca76b953f..6388392f49a0 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -67,6 +67,7 @@ struct btf_dump_data {
bool compact;
bool skip_names;
bool emit_zeroes;
+ bool emit_strings;
__u8 indent_lvl; /* base indent level */
char indent_str[BTF_DATA_INDENT_STR_LEN];
/* below are used during iteration */
@@ -225,6 +226,9 @@ static void btf_dump_free_names(struct hashmap *map)
size_t bkt;
struct hashmap_entry *cur;
+ if (!map)
+ return;
+
hashmap__for_each_entry(map, cur, bkt)
free((void *)cur->pkey);
@@ -304,7 +308,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
* definition, in which case they have to be declared inline as part of field
* type declaration; or as a top-level anonymous enum, typically used for
* declaring global constants. It's impossible to distinguish between two
- * without knowning whether given enum type was referenced from other type:
+ * without knowing whether given enum type was referenced from other type:
* top-level anonymous enum won't be referenced by anything, while embedded
* one will.
*/
@@ -867,8 +871,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
} pads[] = {
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
};
- int new_off, pad_bits, bits, i;
- const char *pad_type;
+ int new_off = 0, pad_bits = 0, bits, i;
+ const char *pad_type = NULL;
if (cur_off >= next_off)
return; /* no gap */
@@ -1304,7 +1308,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
* chain, restore stack, emit warning, and try to
* proceed nevertheless
*/
- pr_warn("not enough memory for decl stack:%d", err);
+ pr_warn("not enough memory for decl stack: %s\n", errstr(err));
d->decl_stack_cnt = stack_start;
return;
}
@@ -1493,7 +1497,10 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
case BTF_KIND_TYPE_TAG:
btf_dump_emit_mods(d, decls);
name = btf_name_of(d, t->name_off);
- btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
+ if (btf_kflag(t))
+ btf_dump_printf(d, " __attribute__((%s))", name);
+ else
+ btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = btf_array(t);
@@ -1559,10 +1566,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
* Clang for BPF target generates func_proto with no
* args as a func_proto with a single void arg (e.g.,
* `int (*f)(void)` vs just `int (*f)()`). We are
- * going to pretend there are no args for such case.
+ * going to emit valid empty args (void) syntax for
+ * such case. Similarly and conveniently, valid
+ * no args case can be special-cased here as well.
*/
- if (vlen == 1 && p->type == 0) {
- btf_dump_printf(d, ")");
+ if (vlen == 0 || (vlen == 1 && p->type == 0)) {
+ btf_dump_printf(d, "void)");
return;
}
@@ -2022,6 +2031,52 @@ static int btf_dump_var_data(struct btf_dump *d,
return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0);
}
+static int btf_dump_string_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ const struct btf_array *array = btf_array(t);
+ const char *chars = data;
+ __u32 i;
+
+ /* Make sure it is a NUL-terminated string. */
+ for (i = 0; i < array->nelems; i++) {
+ if ((void *)(chars + i) >= d->typed_dump->data_end)
+ return -E2BIG;
+ if (chars[i] == '\0')
+ break;
+ }
+ if (i == array->nelems) {
+ /* The caller will print this as a regular array. */
+ return -EINVAL;
+ }
+
+ btf_dump_data_pfx(d);
+ btf_dump_printf(d, "\"");
+
+ for (i = 0; i < array->nelems; i++) {
+ char c = chars[i];
+
+ if (c == '\0') {
+ /*
+ * When printing character arrays as strings, NUL bytes
+ * are always treated as string terminators; they are
+ * never printed.
+ */
+ break;
+ }
+ if (isprint(c))
+ btf_dump_printf(d, "%c", c);
+ else
+ btf_dump_printf(d, "\\x%02x", (__u8)c);
+ }
+
+ btf_dump_printf(d, "\"");
+
+ return 0;
+}
+
static int btf_dump_array_data(struct btf_dump *d,
const struct btf_type *t,
__u32 id,
@@ -2049,8 +2104,13 @@ static int btf_dump_array_data(struct btf_dump *d,
* char arrays, so if size is 1 and element is
* printable as a char, we'll do that.
*/
- if (elem_size == 1)
+ if (elem_size == 1) {
+ if (d->typed_dump->emit_strings &&
+ btf_dump_string_data(d, t, id, data) == 0) {
+ return 0;
+ }
d->typed_dump->is_array_char = true;
+ }
}
/* note that we increment depth before calling btf_dump_print() below;
@@ -2538,6 +2598,7 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
d->typed_dump->compact = OPTS_GET(opts, compact, false);
d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false);
+ d->typed_dump->emit_strings = OPTS_GET(opts, emit_strings, false);
ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0);
diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
index 17f8b32f94a0..53d1f3541bce 100644
--- a/tools/lib/bpf/btf_relocate.c
+++ b/tools/lib/bpf/btf_relocate.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifndef _GNU_SOURCE
@@ -212,7 +212,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
* need to match both name and size, otherwise embedding the base
* struct/union in the split type is invalid.
*/
- for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
+ for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) {
err = btf_mark_embedded_composite_type_ids(r, id);
if (err)
goto done;
@@ -428,7 +428,7 @@ static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
} else {
off = r->str_map[*str_off];
if (!off) {
- pr_warn("string '%s' [offset %u] is not mapped to base BTF",
+ pr_warn("string '%s' [offset %u] is not mapped to base BTF\n",
btf__str_by_offset(r->btf, off), *str_off);
return -ENOENT;
}
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index c92e02394159..295dbda24580 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include "libbpf_internal.h"
-#include "str_error.h"
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
* the symbol is hidden and can only be seen when referenced using an
@@ -24,10 +23,12 @@
int elf_open(const char *binary_path, struct elf_fd *elf_fd)
{
- char errmsg[STRERR_BUFSIZE];
int fd, ret;
Elf *elf;
+ elf_fd->elf = NULL;
+ elf_fd->fd = -1;
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("elf: failed to init libelf for %s\n", binary_path);
return -LIBBPF_ERRNO__LIBELF;
@@ -35,8 +36,7 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd)
fd = open(binary_path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = -errno;
- pr_warn("elf: failed to open %s: %s\n", binary_path,
- libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ pr_warn("elf: failed to open %s: %s\n", binary_path, errstr(ret));
return ret;
}
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index 50befe125ddc..b842b83e2480 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -6,7 +6,6 @@
#include "libbpf.h"
#include "libbpf_common.h"
#include "libbpf_internal.h"
-#include "str_error.h"
static inline __u64 ptr_to_u64(const void *ptr)
{
@@ -47,7 +46,6 @@ static int probe_kern_prog_name(int token_fd)
static int probe_kern_global_data(int token_fd)
{
- char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
@@ -67,9 +65,8 @@ static int probe_kern_global_data(int token_fd)
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
if (map < 0) {
ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
+ pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
+ __func__, errstr(ret));
return ret;
}
@@ -267,7 +264,6 @@ static int probe_kern_probe_read_kernel(int token_fd)
static int probe_prog_bind_map(int token_fd)
{
- char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -285,9 +281,8 @@ static int probe_prog_bind_map(int token_fd)
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
if (map < 0) {
ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
+ pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
+ __func__, errstr(ret));
return ret;
}
@@ -604,7 +599,8 @@ bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_
} else if (ret == 0) {
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
} else {
- pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ pr_warn("Detection of kernel %s support failed: %s\n",
+ feat->desc, errstr(ret));
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
}
}
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index cf3323fd47b8..cd5c2543f54d 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -4,6 +4,7 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <asm/byteorder.h>
#include <linux/filter.h>
#include <sys/param.h>
#include "btf.h"
@@ -13,7 +14,6 @@
#include "hashmap.h"
#include "bpf_gen_internal.h"
#include "skel_internal.h"
-#include <asm/byteorder.h>
#define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32
@@ -109,6 +109,7 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
+static void emit_signature_match(struct bpf_gen *gen);
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
{
@@ -151,6 +152,8 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
emit(gen, BPF_EXIT_INSN());
+ if (OPTS_GET(gen->opts, gen_hash, false))
+ emit_signature_match(gen);
}
static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
@@ -367,6 +370,8 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
__emit_sys_close(gen);
}
+static void compute_sha_update_offsets(struct bpf_gen *gen);
+
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
{
int i;
@@ -393,7 +398,10 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
blob_fd_array_off(gen, i));
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
emit(gen, BPF_EXIT_INSN());
- pr_debug("gen: finish %d\n", gen->error);
+ if (OPTS_GET(gen->opts, gen_hash, false))
+ compute_sha_update_offsets(gen);
+
+ pr_debug("gen: finish %s\n", errstr(gen->error));
if (!gen->error) {
struct gen_loader_opts *opts = gen->opts;
@@ -401,6 +409,15 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
opts->insns_sz = gen->insn_cur - gen->insn_start;
opts->data = gen->data_start;
opts->data_sz = gen->data_cur - gen->data_start;
+
+ /* use target endianness for embedded loader */
+ if (gen->swapped_endian) {
+ struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
+ int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
+
+ for (i = 0; i < insn_cnt; i++)
+ bpf_insn_bswap(insn++);
+ }
}
return gen->error;
}
@@ -414,6 +431,44 @@ void bpf_gen__free(struct bpf_gen *gen)
free(gen);
}
+/*
+ * Fields of bpf_attr are set to values in native byte-order before being
+ * written to the target-bound data blob, and may need endian conversion.
+ * This macro allows providing the correct value in situ more simply than
+ * writing a separate converter for *all fields* of *all records* included
+ * in union bpf_attr. Note that sizeof(rval) should match the assignment
+ * target to avoid runtime problems.
+ */
+#define tgt_endian(rval) ({ \
+ typeof(rval) _val = (rval); \
+ if (gen->swapped_endian) { \
+ switch (sizeof(_val)) { \
+ case 1: break; \
+ case 2: _val = bswap_16(_val); break; \
+ case 4: _val = bswap_32(_val); break; \
+ case 8: _val = bswap_64(_val); break; \
+ default: pr_warn("unsupported bswap size!\n"); \
+ } \
+ } \
+ _val; \
+})
+
+static void compute_sha_update_offsets(struct bpf_gen *gen)
+{
+ __u64 sha[SHA256_DWORD_SIZE];
+ __u64 sha_dw;
+ int i;
+
+ libbpf_sha256(gen->data_start, gen->data_cur - gen->data_start, (__u8 *)sha);
+ for (i = 0; i < SHA256_DWORD_SIZE; i++) {
+ struct bpf_insn *insn =
+ (struct bpf_insn *)(gen->insn_start + gen->hash_insn_offset[i]);
+ sha_dw = tgt_endian(sha[i]);
+ insn[0].imm = (__u32)sha_dw;
+ insn[1].imm = sha_dw >> 32;
+ }
+}
+
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
__u32 btf_raw_size)
{
@@ -422,11 +477,12 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
union bpf_attr attr;
memset(&attr, 0, attr_size);
- pr_debug("gen: load_btf: size %d\n", btf_raw_size);
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
- attr.btf_size = btf_raw_size;
+ attr.btf_size = tgt_endian(btf_raw_size);
btf_load_attr = add_data(gen, &attr, attr_size);
+ pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
+ btf_data, btf_raw_size, btf_load_attr, attr_size);
/* populate union bpf_attr with user provided log details */
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
@@ -457,28 +513,29 @@ void bpf_gen__map_create(struct bpf_gen *gen,
union bpf_attr attr;
memset(&attr, 0, attr_size);
- attr.map_type = map_type;
- attr.key_size = key_size;
- attr.value_size = value_size;
- attr.map_flags = map_attr->map_flags;
- attr.map_extra = map_attr->map_extra;
+ attr.map_type = tgt_endian(map_type);
+ attr.key_size = tgt_endian(key_size);
+ attr.value_size = tgt_endian(value_size);
+ attr.map_flags = tgt_endian(map_attr->map_flags);
+ attr.map_extra = tgt_endian(map_attr->map_extra);
if (map_name)
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
- attr.numa_node = map_attr->numa_node;
- attr.map_ifindex = map_attr->map_ifindex;
- attr.max_entries = max_entries;
- attr.btf_key_type_id = map_attr->btf_key_type_id;
- attr.btf_value_type_id = map_attr->btf_value_type_id;
-
- pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
- attr.map_name, map_idx, map_type, attr.btf_value_type_id);
+ attr.numa_node = tgt_endian(map_attr->numa_node);
+ attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
+ attr.max_entries = tgt_endian(max_entries);
+ attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
+ attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
map_create_attr = add_data(gen, &attr, attr_size);
- if (attr.btf_value_type_id)
+ pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
+ map_name, map_idx, map_type, map_attr->btf_value_type_id,
+ map_create_attr, attr_size);
+
+ if (map_attr->btf_value_type_id)
/* populate union bpf_attr with btf_fd saved in the stack earlier */
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
stack_off(btf_fd));
- switch (attr.map_type) {
+ switch (map_type) {
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
@@ -498,8 +555,8 @@ void bpf_gen__map_create(struct bpf_gen *gen,
/* emit MAP_CREATE command */
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
- attr.map_name, map_idx, map_type, value_size,
- attr.btf_value_type_id);
+ map_name, map_idx, map_type, value_size,
+ map_attr->btf_value_type_id);
emit_check_err(gen);
/* remember map_fd in the stack, if successful */
if (map_idx < 0) {
@@ -523,6 +580,29 @@ void bpf_gen__map_create(struct bpf_gen *gen,
emit_sys_close_stack(gen, stack_off(inner_map_fd));
}
+static void emit_signature_match(struct bpf_gen *gen)
+{
+ __s64 off;
+ int i;
+
+ for (i = 0; i < SHA256_DWORD_SIZE; i++) {
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX,
+ 0, 0, 0, 0));
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, i * sizeof(__u64)));
+ gen->hash_insn_offset[i] = gen->insn_cur - gen->insn_start;
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_3, 0, 0, 0, 0, 0));
+
+ off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
+ if (is_simm16(off)) {
+ emit(gen, BPF_MOV64_IMM(BPF_REG_7, -EINVAL));
+ emit(gen, BPF_JMP_REG(BPF_JNE, BPF_REG_2, BPF_REG_3, off));
+ } else {
+ gen->error = -ERANGE;
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
+ }
+ }
+}
+
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
enum bpf_attach_type type)
{
@@ -784,12 +864,12 @@ log:
emit_ksym_relo_log(gen, relo, kdesc->ref);
}
-static __u32 src_reg_mask(void)
+static __u32 src_reg_mask(struct bpf_gen *gen)
{
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- return 0x0f; /* src_reg,dst_reg,... */
-#elif defined(__BIG_ENDIAN_BITFIELD)
- return 0xf0; /* dst_reg,src_reg,... */
+#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
+ return gen->swapped_endian ? 0xf0 : 0x0f;
+#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
+ return gen->swapped_endian ? 0x0f : 0xf0;
#else
#error "Unsupported bit endianness, cannot proceed"
#endif
@@ -840,7 +920,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
clear_src_reg:
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
- reg_mask = src_reg_mask();
+ reg_mask = src_reg_mask(gen);
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
@@ -931,48 +1011,94 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
cleanup_core_relo(gen);
}
+/* Convert func, line, and core relo info blobs to target endianness */
+static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
+ int core_relos, struct bpf_prog_load_opts *load_attr)
+{
+ struct bpf_func_info *fi = gen->data_start + func_info;
+ struct bpf_line_info *li = gen->data_start + line_info;
+ struct bpf_core_relo *cr = gen->data_start + core_relos;
+ int i;
+
+ for (i = 0; i < load_attr->func_info_cnt; i++)
+ bpf_func_info_bswap(fi++);
+
+ for (i = 0; i < load_attr->line_info_cnt; i++)
+ bpf_line_info_bswap(li++);
+
+ for (i = 0; i < gen->core_relo_cnt; i++)
+ bpf_core_relo_bswap(cr++);
+}
+
void bpf_gen__prog_load(struct bpf_gen *gen,
enum bpf_prog_type prog_type, const char *prog_name,
const char *license, struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *load_attr, int prog_idx)
{
+ int func_info_tot_sz = load_attr->func_info_cnt *
+ load_attr->func_info_rec_size;
+ int line_info_tot_sz = load_attr->line_info_cnt *
+ load_attr->line_info_rec_size;
+ int core_relo_tot_sz = gen->core_relo_cnt *
+ sizeof(struct bpf_core_relo);
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
union bpf_attr attr;
memset(&attr, 0, attr_size);
- pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
- prog_type, insn_cnt, prog_idx);
/* add license string to blob of bytes */
license_off = add_data(gen, license, strlen(license) + 1);
/* add insns to blob of bytes */
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
+ pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
+ prog_idx, prog_type, insns_off, insn_cnt, license_off);
- attr.prog_type = prog_type;
- attr.expected_attach_type = load_attr->expected_attach_type;
- attr.attach_btf_id = load_attr->attach_btf_id;
- attr.prog_ifindex = load_attr->prog_ifindex;
- attr.kern_version = 0;
- attr.insn_cnt = (__u32)insn_cnt;
- attr.prog_flags = load_attr->prog_flags;
-
- attr.func_info_rec_size = load_attr->func_info_rec_size;
- attr.func_info_cnt = load_attr->func_info_cnt;
- func_info = add_data(gen, load_attr->func_info,
- attr.func_info_cnt * attr.func_info_rec_size);
+ /* convert blob insns to target endianness */
+ if (gen->swapped_endian) {
+ struct bpf_insn *insn = gen->data_start + insns_off;
+ int i;
- attr.line_info_rec_size = load_attr->line_info_rec_size;
- attr.line_info_cnt = load_attr->line_info_cnt;
- line_info = add_data(gen, load_attr->line_info,
- attr.line_info_cnt * attr.line_info_rec_size);
+ for (i = 0; i < insn_cnt; i++, insn++)
+ bpf_insn_bswap(insn);
+ }
- attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
- attr.core_relo_cnt = gen->core_relo_cnt;
- core_relos = add_data(gen, gen->core_relos,
- attr.core_relo_cnt * attr.core_relo_rec_size);
+ attr.prog_type = tgt_endian(prog_type);
+ attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
+ attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
+ attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
+ attr.kern_version = 0;
+ attr.insn_cnt = tgt_endian((__u32)insn_cnt);
+ attr.prog_flags = tgt_endian(load_attr->prog_flags);
+
+ attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
+ attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
+ func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
+ pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
+ func_info, load_attr->func_info_cnt,
+ load_attr->func_info_rec_size);
+
+ attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
+ attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
+ line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
+ pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
+ line_info, load_attr->line_info_cnt,
+ load_attr->line_info_rec_size);
+
+ attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
+ attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
+ core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
+ pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
+ core_relos, gen->core_relo_cnt,
+ sizeof(struct bpf_core_relo));
+
+ /* convert all info blobs to target endianness */
+ if (gen->swapped_endian)
+ info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
prog_load_attr = add_data(gen, &attr, attr_size);
+ pr_debug("gen: prog_load: attr: off %d size %d\n",
+ prog_load_attr, attr_size);
/* populate union bpf_attr with a pointer to license */
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
@@ -1040,7 +1166,6 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
int zero = 0;
memset(&attr, 0, attr_size);
- pr_debug("gen: map_update_elem: idx %d\n", map_idx);
value = add_data(gen, pvalue, value_size);
key = add_data(gen, &zero, sizeof(zero));
@@ -1068,6 +1193,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
map_update_attr = add_data(gen, &attr, attr_size);
+ pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
+ map_idx, value, value_size, map_update_attr, attr_size);
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
blob_fd_array_off(gen, map_idx));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
@@ -1084,14 +1211,16 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo
int attr_size = offsetofend(union bpf_attr, flags);
int map_update_attr, key;
union bpf_attr attr;
+ int tgt_slot;
memset(&attr, 0, attr_size);
- pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
- outer_map_idx, slot, inner_map_idx);
- key = add_data(gen, &slot, sizeof(slot));
+ tgt_slot = tgt_endian(slot);
+ key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
map_update_attr = add_data(gen, &attr, attr_size);
+ pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
+ outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
blob_fd_array_off(gen, outer_map_idx));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
@@ -1112,8 +1241,9 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
union bpf_attr attr;
memset(&attr, 0, attr_size);
- pr_debug("gen: map_freeze: idx %d\n", map_idx);
map_freeze_attr = add_data(gen, &attr, attr_size);
+ pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
+ map_idx, map_freeze_attr, attr_size);
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
blob_fd_array_off(gen, map_idx));
/* emit MAP_FREEZE command */
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index c12f8320e668..0c4f155e8eb7 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -166,8 +166,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry(map, cur, bkt) \
- for (bkt = 0; bkt < map->cap; bkt++) \
- for (cur = map->buckets[bkt]; cur; cur = cur->next)
+ for (bkt = 0; bkt < (map)->cap; bkt++) \
+ for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
/*
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
@@ -178,8 +178,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
- for (bkt = 0; bkt < map->cap; bkt++) \
- for (cur = map->buckets[bkt]; \
+ for (bkt = 0; bkt < (map)->cap; bkt++) \
+ for (cur = (map)->buckets[bkt]; \
cur && ({tmp = cur->next; true; }); \
cur = tmp)
@@ -190,19 +190,19 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @key: key to iterate entries for
*/
#define hashmap__for_each_key_entry(map, cur, _key) \
- for (cur = map->buckets \
- ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+ for (cur = (map)->buckets \
+ ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
: NULL; \
cur; \
cur = cur->next) \
- if (map->equal_fn(cur->key, (_key), map->ctx))
+ if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
- for (cur = map->buckets \
- ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+ for (cur = (map)->buckets \
+ ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
: NULL; \
cur && ({ tmp = cur->next; true; }); \
cur = tmp) \
- if (map->equal_fn(cur->key, (_key), map->ctx))
+ if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
#endif /* __LIBBPF_HASHMAP_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a3be6f8fac09..3dc8a8078815 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -50,7 +50,6 @@
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
-#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
#include "bpf_gen_internal.h"
@@ -60,6 +59,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define MAX_EVENT_NAME_LEN 64
+
#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
@@ -133,6 +134,7 @@ static const char * const attach_type_name[] = {
[BPF_NETKIT_PRIMARY] = "netkit_primary",
[BPF_NETKIT_PEER] = "netkit_peer",
[BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
+ [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session",
};
static const char * const link_type_name[] = {
@@ -188,6 +190,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
[BPF_MAP_TYPE_ARENA] = "arena",
+ [BPF_MAP_TYPE_INSN_ARRAY] = "insn_array",
};
static const char * const prog_type_name[] = {
@@ -283,7 +286,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
old_errno = errno;
va_start(args, format);
- __libbpf_pr(level, format, args);
+ print_fn(level, format, args);
va_end(args);
errno = old_errno;
@@ -315,8 +318,6 @@ static void pr_perm_msg(int err)
buf);
}
-#define STRERR_BUFSIZE 128
-
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
@@ -369,6 +370,7 @@ enum reloc_type {
RELO_EXTERN_CALL,
RELO_SUBPROG_ADDR,
RELO_CORE,
+ RELO_INSN_ARRAY,
};
struct reloc_desc {
@@ -379,7 +381,16 @@ struct reloc_desc {
struct {
int map_idx;
int sym_off;
- int ext_idx;
+ /*
+ * The following two fields can be unionized, as the
+ * ext_idx field is used for extern symbols, and the
+ * sym_size is used for jump tables, which are never
+ * extern
+ */
+ union {
+ int ext_idx;
+ int sym_size;
+ };
};
};
};
@@ -421,6 +432,11 @@ struct bpf_sec_def {
libbpf_prog_attach_fn_t prog_attach_fn;
};
+struct bpf_light_subprog {
+ __u32 sec_insn_off;
+ __u32 sub_insn_off;
+};
+
/*
* bpf_prog should be a better name but it has been used in
* linux/filter.h.
@@ -493,11 +509,13 @@ struct bpf_program {
__u32 line_info_rec_size;
__u32 line_info_cnt;
__u32 prog_flags;
+ __u8 hash[SHA256_DIGEST_LENGTH];
+
+ struct bpf_light_subprog *subprogs;
+ __u32 subprog_cnt;
};
struct bpf_struct_ops {
- const char *tname;
- const struct btf_type *type;
struct bpf_program **progs;
__u32 *kern_func_off;
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
@@ -574,6 +592,7 @@ struct bpf_map {
bool autocreate;
bool autoattach;
__u64 map_extra;
+ struct bpf_program *excl_prog;
};
enum extern_type {
@@ -596,7 +615,7 @@ struct extern_desc {
int sym_idx;
int btf_id;
int sec_btf_id;
- const char *name;
+ char *name;
char *essent_name;
bool is_set;
bool is_weak;
@@ -667,15 +686,23 @@ struct elf_state {
int symbols_shndx;
bool has_st_ops;
int arena_data_shndx;
+ int jumptables_data_shndx;
};
struct usdt_manager;
+enum bpf_object_state {
+ OBJ_OPEN,
+ OBJ_PREPARED,
+ OBJ_LOADED,
+};
+
struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
char license[64];
__u32 kern_version;
+ enum bpf_object_state state;
struct bpf_program *programs;
size_t nr_programs;
struct bpf_map *maps;
@@ -687,7 +714,6 @@ struct bpf_object {
int nr_extern;
int kconfig_map_idx;
- bool loaded;
bool has_subcalls;
bool has_rodata;
@@ -696,6 +722,8 @@ struct bpf_object {
/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
struct elf_state efile;
+ unsigned char byteorder;
+
struct btf *btf;
struct btf_ext *btf_ext;
@@ -726,10 +754,20 @@ struct bpf_object {
struct usdt_manager *usdt_man;
- struct bpf_map *arena_map;
+ int arena_map_idx;
void *arena_data;
size_t arena_data_sz;
+ void *jumptables_data;
+ size_t jumptables_data_sz;
+
+ struct {
+ struct bpf_program *prog;
+ int sym_off;
+ int fd;
+ } *jumptable_maps;
+ size_t jumptable_map_cnt;
+
struct kern_feature_cache *feat_cache;
char *token_path;
int token_fd;
@@ -756,6 +794,7 @@ void bpf_program__unload(struct bpf_program *prog)
zfree(&prog->func_info);
zfree(&prog->line_info);
+ zfree(&prog->subprogs);
}
static void bpf_program__exit(struct bpf_program *prog)
@@ -889,7 +928,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
- if (sec_off + prog_sz > sec_sz) {
+ if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {
pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
sec_name, sec_off);
return -LIBBPF_ERRNO__FORMAT;
@@ -942,6 +981,20 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return 0;
}
+static void bpf_object_bswap_progs(struct bpf_object *obj)
+{
+ struct bpf_program *prog = obj->programs;
+ struct bpf_insn *insn;
+ int p, i;
+
+ for (p = 0; p < obj->nr_programs; p++, prog++) {
+ insn = prog->insns;
+ for (i = 0; i < prog->insns_cnt; i++, insn++)
+ bpf_insn_bswap(insn);
+ }
+ pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs);
+}
+
static const struct btf_member *
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
{
@@ -988,37 +1041,35 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
- struct btf *btf;
+ struct btf *btf = NULL;
__s32 kern_vtype_id, kern_type_id;
- char tname[256];
+ char tname[192], stname[256];
__u32 i;
snprintf(tname, sizeof(tname), "%.*s",
(int)bpf_core_essential_name_len(tname_raw), tname_raw);
- kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
- &btf, mod_btf);
- if (kern_type_id < 0) {
- pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
- tname);
- return kern_type_id;
- }
- kern_type = btf__type_by_id(btf, kern_type_id);
+ snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname);
- /* Find the corresponding "map_value" type that will be used
- * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
- * find "struct bpf_struct_ops_tcp_congestion_ops" from the
- * btf_vmlinux.
+ /* Look for the corresponding "map_value" type that will be used
+ * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf
+ * and the mod_btf.
+ * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
*/
- kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
- tname, BTF_KIND_STRUCT);
+ kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf);
if (kern_vtype_id < 0) {
- pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
- STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname);
return kern_vtype_id;
}
kern_vtype = btf__type_by_id(btf, kern_vtype_id);
+ kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (kern_type_id < 0) {
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname);
+ return kern_type_id;
+ }
+ kern_type = btf__type_by_id(btf, kern_type_id);
+
/* Find "struct tcp_congestion_ops" from
* struct bpf_struct_ops_tcp_congestion_ops {
* [ ... ]
@@ -1031,8 +1082,8 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
break;
}
if (i == btf_vlen(kern_vtype)) {
- pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
- tname, STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
+ tname, stname);
return -EINVAL;
}
@@ -1083,11 +1134,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
continue;
for (j = 0; j < obj->nr_maps; ++j) {
+ const struct btf_type *type;
+
map = &obj->maps[j];
if (!bpf_map__is_struct_ops(map))
continue;
- vlen = btf_vlen(map->st_ops->type);
+ type = btf__type_by_id(obj->btf, map->st_ops->type_id);
+ vlen = btf_vlen(type);
for (k = 0; k < vlen; ++k) {
slot_prog = map->st_ops->progs[k];
if (prog != slot_prog)
@@ -1115,14 +1169,14 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
const struct btf *kern_btf;
- struct module_btf *mod_btf;
+ struct module_btf *mod_btf = NULL;
void *data, *kern_data;
const char *tname;
int err;
st_ops = map->st_ops;
- type = st_ops->type;
- tname = st_ops->tname;
+ type = btf__type_by_id(btf, st_ops->type_id);
+ tname = btf__name_by_offset(btf, type->name_off);
err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
@@ -1423,8 +1477,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
memcpy(st_ops->data,
data->d_buf + vsi->offset,
type->size);
- st_ops->tname = tname;
- st_ops->type = type;
st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
@@ -1493,9 +1545,10 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->arena_map_idx = -1;
obj->kern_version = get_kernel_version();
- obj->loaded = false;
+ obj->state = OBJ_OPEN;
return obj;
}
@@ -1507,6 +1560,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
elf_end(obj->efile.elf);
obj->efile.elf = NULL;
+ obj->efile.ehdr = NULL;
obj->efile.symbols = NULL;
obj->efile.arena_data = NULL;
@@ -1534,11 +1588,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
} else {
obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
if (obj->efile.fd < 0) {
- char errmsg[STRERR_BUFSIZE], *cp;
-
err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
+ pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err));
return err;
}
@@ -1572,6 +1623,16 @@ static int bpf_object__elf_init(struct bpf_object *obj)
goto errout;
}
+ /* Validate ELF object endianness... */
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
+ ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+ err = -LIBBPF_ERRNO__ENDIAN;
+ pr_warn("elf: '%s' has unknown byte order\n", obj->path);
+ goto errout;
+ }
+ /* and save after bpf_object_open() frees ELF data */
+ obj->byteorder = ehdr->e_ident[EI_DATA];
+
if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
pr_warn("elf: failed to get section names section index for %s: %s\n",
obj->path, elf_errmsg(-1));
@@ -1600,19 +1661,15 @@ errout:
return err;
}
-static int bpf_object__check_endianness(struct bpf_object *obj)
+static bool is_native_endianness(struct bpf_object *obj)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
- return 0;
+ return obj->byteorder == ELFDATA2LSB;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
- return 0;
+ return obj->byteorder == ELFDATA2MSB;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
- pr_warn("elf: endianness mismatch in %s.\n", obj->path);
- return -LIBBPF_ERRNO__ENDIAN;
}
static int
@@ -1699,24 +1756,27 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
return ERR_PTR(-ENOENT);
}
-/* Some versions of Android don't provide memfd_create() in their libc
- * implementation, so avoid complications and just go straight to Linux
- * syscall.
- */
-static int sys_memfd_create(const char *name, unsigned flags)
-{
- return syscall(__NR_memfd_create, name, flags);
-}
-
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
+#ifndef MFD_NOEXEC_SEAL
+#define MFD_NOEXEC_SEAL 0x0008U
+#endif
static int create_placeholder_fd(void)
{
+ unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL;
+ const char *name = "libbpf-placeholder-fd";
int fd;
- fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ fd = ensure_good_fd(sys_memfd_create(name, flags));
+ if (fd >= 0)
+ return fd;
+ else if (errno != EINVAL)
+ return -errno;
+
+ /* Possibly running on kernel without MFD_NOEXEC_SEAL */
+ fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL));
if (fd < 0)
return -errno;
return fd;
@@ -1849,7 +1909,7 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
sfx_len, real_name);
- /* sanitise map name to characters allowed by kernel */
+ /* sanities map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
if (!isalnum(*p) && *p != '_' && *p != '.')
*p = '_';
@@ -1938,8 +1998,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
if (map->mmaped == MAP_FAILED) {
err = -errno;
map->mmaped = NULL;
- pr_warn("failed to alloc map '%s' content buffer: %d\n",
- map->name, err);
+ pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err));
zfree(&map->real_name);
zfree(&map->name);
return err;
@@ -2075,7 +2134,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
}
len = strlen(value);
- if (value[len - 1] != '"') {
+ if (len < 2 || value[len - 1] != '"') {
pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
ext->name, value);
return -EINVAL;
@@ -2103,7 +2162,7 @@ static int parse_u64(const char *value, __u64 *res)
*res = strtoull(value, &value_end, 0);
if (errno) {
err = -errno;
- pr_warn("failed to parse '%s' as integer: %d\n", value, err);
+ pr_warn("failed to parse '%s': %s\n", value, errstr(err));
return err;
}
if (*value_end) {
@@ -2269,8 +2328,8 @@ static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
while (gzgets(file, buf, sizeof(buf))) {
err = bpf_object__process_kconfig_line(obj, buf, data);
if (err) {
- pr_warn("error parsing system Kconfig line '%s': %d\n",
- buf, err);
+ pr_warn("error parsing system Kconfig line '%s': %s\n",
+ buf, errstr(err));
goto out;
}
}
@@ -2290,15 +2349,15 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
file = fmemopen((void *)config, strlen(config), "r");
if (!file) {
err = -errno;
- pr_warn("failed to open in-memory Kconfig: %d\n", err);
+ pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err));
return err;
}
while (fgets(buf, sizeof(buf), file)) {
err = bpf_object__process_kconfig_line(obj, buf, data);
if (err) {
- pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
- buf, err);
+ pr_warn("error parsing in-memory Kconfig line '%s': %s\n",
+ buf, errstr(err));
break;
}
}
@@ -2934,7 +2993,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t mmap_sz;
- mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ mmap_sz = bpf_map_mmap_sz(map);
if (roundup(data_sz, page_sz) > mmap_sz) {
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
sec_name, mmap_sz, data_sz);
@@ -2968,7 +3027,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
data = elf_sec_data(obj, scn);
- if (!scn || !data) {
+ if (!data) {
pr_warn("elf: failed to get %s map definitions for %s\n",
MAPS_ELF_SEC, obj->path);
return -EINVAL;
@@ -3008,12 +3067,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (map->def.type != BPF_MAP_TYPE_ARENA)
continue;
- if (obj->arena_map) {
+ if (obj->arena_map_idx >= 0) {
pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
- map->name, obj->arena_map->name);
+ map->name, obj->maps[obj->arena_map_idx].name);
return -EINVAL;
}
- obj->arena_map = map;
+ obj->arena_map_idx = i;
if (obj->efile.arena_data) {
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
@@ -3023,7 +3082,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
}
- if (obj->efile.arena_data && !obj->arena_map) {
+ if (obj->efile.arena_data && obj->arena_map_idx < 0) {
pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
ARENA_SEC);
return -ENOENT;
@@ -3213,7 +3272,7 @@ static int bpf_object__init_btf(struct bpf_object *obj,
err = libbpf_get_error(obj->btf);
if (err) {
obj->btf = NULL;
- pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
+ pr_warn("Error loading ELF section %s: %s.\n", BTF_ELF_SEC, errstr(err));
goto out;
}
/* enforce 8-byte pointers for BPF-targeted BTFs */
@@ -3231,8 +3290,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
err = libbpf_get_error(obj->btf_ext);
if (err) {
- pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
- BTF_EXT_ELF_SEC, err);
+ pr_warn("Error loading ELF section %s: %s. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC, errstr(err));
obj->btf_ext = NULL;
goto out;
}
@@ -3324,8 +3383,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
if (t->size == 0) {
err = find_elf_sec_sz(obj, sec_name, &size);
if (err || !size) {
- pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
- sec_name, size, err);
+ pr_debug("sec '%s': failed to determine size from ELF: size %u, err %s\n",
+ sec_name, size, errstr(err));
return -ENOENT;
}
@@ -3479,7 +3538,7 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
obj->btf_vmlinux = btf__load_vmlinux_btf();
err = libbpf_get_error(obj->btf_vmlinux);
if (err) {
- pr_warn("Error loading vmlinux BTF: %d\n", err);
+ pr_warn("Error loading vmlinux BTF: %s\n", errstr(err));
obj->btf_vmlinux = NULL;
return err;
}
@@ -3582,11 +3641,14 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
report:
if (err) {
btf_mandatory = kernel_needs_btf(obj);
- pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
- btf_mandatory ? "BTF is mandatory, can't proceed."
- : "BTF is optional, ignoring.");
- if (!btf_mandatory)
+ if (btf_mandatory) {
+ pr_warn("Error loading .BTF into kernel: %s. BTF is mandatory, can't proceed.\n",
+ errstr(err));
+ } else {
+ pr_info("Error loading .BTF into kernel: %s. BTF is optional, ignoring.\n",
+ errstr(err));
err = 0;
+ }
}
return err;
}
@@ -3911,6 +3973,13 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, ARENA_SEC) == 0) {
obj->efile.arena_data = data;
obj->efile.arena_data_shndx = idx;
+ } else if (strcmp(name, JUMPTABLES_SEC) == 0) {
+ obj->jumptables_data = malloc(data->d_size);
+ if (!obj->jumptables_data)
+ return -ENOMEM;
+ memcpy(obj->jumptables_data, data->d_buf, data->d_size);
+ obj->jumptables_data_sz = data->d_size;
+ obj->efile.jumptables_data_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
@@ -3954,6 +4023,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return -LIBBPF_ERRNO__FORMAT;
}
+ /* change BPF program insns to native endianness for introspection */
+ if (!is_native_endianness(obj))
+ bpf_object_bswap_progs(obj);
+
/* sort BPF programs by section name and in-section instruction offset
* for faster search
*/
@@ -3986,7 +4059,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
return true;
/* global function */
- return bind == STB_GLOBAL && type == STT_FUNC;
+ return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
}
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
@@ -4222,7 +4295,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return ext->btf_id;
}
t = btf__type_by_id(obj->btf, ext->btf_id);
- ext->name = btf__name_by_offset(obj->btf, t->name_off);
+ ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
+ if (!ext->name)
+ return -ENOMEM;
ext->sym_idx = i;
ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
@@ -4390,7 +4465,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
{
- return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
+ return prog->sec_idx == obj->efile.text_shndx;
}
struct bpf_program *
@@ -4445,6 +4520,44 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
}
}
+static int bpf_prog_compute_hash(struct bpf_program *prog)
+{
+ struct bpf_insn *purged;
+ int i, err = 0;
+
+ purged = calloc(prog->insns_cnt, BPF_INSN_SZ);
+ if (!purged)
+ return -ENOMEM;
+
+ /* If relocations have been done, the map_fd needs to be
+ * discarded for the digest calculation.
+ */
+ for (i = 0; i < prog->insns_cnt; i++) {
+ purged[i] = prog->insns[i];
+ if (purged[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ (purged[i].src_reg == BPF_PSEUDO_MAP_FD ||
+ purged[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
+ purged[i].imm = 0;
+ i++;
+ if (i >= prog->insns_cnt ||
+ prog->insns[i].code != 0 ||
+ prog->insns[i].dst_reg != 0 ||
+ prog->insns[i].src_reg != 0 ||
+ prog->insns[i].off != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ purged[i] = prog->insns[i];
+ purged[i].imm = 0;
+ }
+ }
+ libbpf_sha256(purged, prog->insns_cnt * sizeof(struct bpf_insn),
+ prog->hash);
+out:
+ free(purged);
+ return err;
+}
+
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
__u32 insn_idx, const char *sym_name,
@@ -4542,10 +4655,30 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
/* arena data relocation */
if (shdr_idx == obj->efile.arena_data_shndx) {
+ if (obj->arena_map_idx < 0) {
+ pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n",
+ prog->name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
- reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->map_idx = obj->arena_map_idx;
+ reloc_desc->sym_off = sym->st_value;
+
+ map = &obj->maps[obj->arena_map_idx];
+ pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, obj->arena_map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
+ return 0;
+ }
+
+ /* jump table data relocation */
+ if (shdr_idx == obj->efile.jumptables_data_shndx) {
+ reloc_desc->type = RELO_INSN_ARRAY;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = -1;
reloc_desc->sym_off = sym->st_value;
+ reloc_desc->sym_size = sym->st_size;
return 0;
}
@@ -4784,8 +4917,8 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
fp = fopen(file, "re");
if (!fp) {
err = -errno;
- pr_warn("failed to open %s: %d. No procfs support?\n", file,
- err);
+ pr_warn("failed to open %s: %s. No procfs support?\n", file,
+ errstr(err));
return err;
}
@@ -4807,6 +4940,11 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
return 0;
}
+static bool map_is_created(const struct bpf_map *map)
+{
+ return map->obj->state >= OBJ_PREPARED || map->reused;
+}
+
bool bpf_map__autocreate(const struct bpf_map *map)
{
return map->autocreate;
@@ -4814,7 +4952,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
{
- if (map->obj->loaded)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->autocreate = autocreate;
@@ -4908,7 +5046,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
- if (map->obj->loaded)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->def.max_entries = max_entries;
@@ -4940,8 +5078,8 @@ static int bpf_object_prepare_token(struct bpf_object *obj)
bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
if (bpffs_fd < 0) {
err = -errno;
- __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
- obj->name, err, bpffs_path,
+ __pr(level, "object '%s': failed (%s) to open BPF FS mount at '%s'%s\n",
+ obj->name, errstr(err), bpffs_path,
mandatory ? "" : ", skipping optional step...");
return mandatory ? err : 0;
}
@@ -4975,7 +5113,6 @@ static int bpf_object_prepare_token(struct bpf_object *obj)
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
- char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -4991,7 +5128,8 @@ bpf_object__probe_loading(struct bpf_object *obj)
ret = bump_rlimit_memlock();
if (ret)
- pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
+ pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %s), you might need to do it explicitly!\n",
+ errstr(ret));
/* make sure basic loading works */
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
@@ -4999,11 +5137,8 @@ bpf_object__probe_loading(struct bpf_object *obj)
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0) {
ret = errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
- "program. Make sure your kernel supports BPF "
- "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
- "set to big enough value.\n", __func__, cp, ret);
+ pr_warn("Error in %s(): %s. Couldn't load trivial BPF program. Make sure your kernel supports BPF (CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is set to big enough value.\n",
+ __func__, errstr(ret));
return -ret;
}
close(ret);
@@ -5028,7 +5163,6 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
struct bpf_map_info map_info;
- char msg[STRERR_BUFSIZE];
__u32 map_info_len = sizeof(map_info);
int err;
@@ -5038,10 +5172,20 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
if (err) {
pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
- libbpf_strerror_r(errno, msg, sizeof(msg)));
+ errstr(err));
return false;
}
+ /*
+ * bpf_get_map_info_by_fd() for DEVMAP will always return flags with
+ * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time.
+ * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from
+ * bpf_get_map_info_by_fd() when checking for compatibility with an
+ * existing DEVMAP.
+ */
+ if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
+ map_info.map_flags &= ~BPF_F_RDONLY_PROG;
+
return (map_info.type == map->def.type &&
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
@@ -5053,7 +5197,6 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
static int
bpf_object__reuse_map(struct bpf_map *map)
{
- char *cp, errmsg[STRERR_BUFSIZE];
int err, pin_fd;
pin_fd = bpf_obj_get(map->pin_path);
@@ -5065,9 +5208,8 @@ bpf_object__reuse_map(struct bpf_map *map)
return 0;
}
- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
pr_warn("couldn't retrieve pinned map '%s': %s\n",
- map->pin_path, cp);
+ map->pin_path, errstr(err));
return err;
}
@@ -5093,8 +5235,8 @@ static int
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
enum libbpf_map_type map_type = map->libbpf_type;
- char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
+ size_t mmap_sz;
if (obj->gen_loader) {
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
@@ -5107,9 +5249,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error setting initial map(%s) contents: %s\n",
- map->name, cp);
+ pr_warn("map '%s': failed to set initial contents: %s\n",
+ bpf_map__name(map), errstr(err));
return err;
}
@@ -5118,22 +5259,48 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
err = bpf_map_freeze(map->fd);
if (err) {
err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error freezing map(%s) as read-only: %s\n",
- map->name, cp);
+ pr_warn("map '%s': failed to freeze as read-only: %s\n",
+ bpf_map__name(map), errstr(err));
return err;
}
}
+
+ /* Remap anonymous mmap()-ed "map initialization image" as
+ * a BPF map-backed mmap()-ed memory, but preserving the same
+ * memory address. This will cause kernel to change process'
+ * page table to point to a different piece of kernel memory,
+ * but from userspace point of view memory address (and its
+ * contents, being identical at this point) will stay the
+ * same. This mapping will be released by bpf_object__close()
+ * as per normal clean up procedure.
+ */
+ mmap_sz = bpf_map_mmap_sz(map);
+ if (map->def.map_flags & BPF_F_MMAPABLE) {
+ void *mmaped;
+ int prot;
+
+ if (map->def.map_flags & BPF_F_RDONLY_PROG)
+ prot = PROT_READ;
+ else
+ prot = PROT_READ | PROT_WRITE;
+ mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
+ if (mmaped == MAP_FAILED) {
+ err = -errno;
+ pr_warn("map '%s': failed to re-mmap() contents: %s\n",
+ bpf_map__name(map), errstr(err));
+ return err;
+ }
+ map->mmaped = mmaped;
+ } else if (map->mmaped) {
+ munmap(map->mmaped, mmap_sz);
+ map->mmaped = NULL;
+ }
+
return 0;
}
static void bpf_map__destroy(struct bpf_map *map);
-static bool map_is_created(const struct bpf_map *map)
-{
- return map->obj->loaded || map->reused;
-}
-
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
@@ -5150,6 +5317,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.token_fd = obj->token_fd;
if (obj->token_fd)
create_attr.map_flags |= BPF_F_TOKEN_FD;
+ if (map->excl_prog) {
+ err = bpf_prog_compute_hash(map->excl_prog);
+ if (err)
+ return err;
+
+ create_attr.excl_prog_hash = map->excl_prog->hash;
+ create_attr.excl_prog_hash_size = SHA256_DIGEST_LENGTH;
+ }
if (bpf_map__is_struct_ops(map)) {
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
@@ -5172,8 +5347,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
return err;
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
- pr_warn("map '%s': failed to create inner map: %d\n",
- map->name, err);
+ pr_warn("map '%s': failed to create inner map: %s\n",
+ map->name, errstr(err));
return err;
}
map->inner_map_fd = map->inner_map->fd;
@@ -5227,12 +5402,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
def->max_entries, &create_attr);
}
if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
- char *cp, errmsg[STRERR_BUFSIZE];
-
err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
- map->name, cp, err);
+ pr_warn("Error in bpf_create_map_xattr(%s): %s. Retrying without BTF.\n",
+ map->name, errstr(err));
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -5287,8 +5459,8 @@ static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
}
if (err) {
err = -errno;
- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
- map->name, i, targ_map->name, fd, err);
+ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %s\n",
+ map->name, i, targ_map->name, fd, errstr(err));
return err;
}
pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
@@ -5320,8 +5492,8 @@ static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
err = bpf_map_update_elem(map->fd, &i, &fd, 0);
if (err) {
err = -errno;
- pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
- map->name, i, targ_prog->name, fd, err);
+ pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %s\n",
+ map->name, i, targ_prog->name, fd, errstr(err));
return err;
}
pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
@@ -5374,7 +5546,6 @@ static int
bpf_object__create_maps(struct bpf_object *obj)
{
struct bpf_map *map;
- char *cp, errmsg[STRERR_BUFSIZE];
unsigned int i, j;
int err;
bool retried;
@@ -5440,8 +5611,7 @@ retry:
err = bpf_object__populate_internal_map(obj, map);
if (err < 0)
goto err_out;
- }
- if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ } else if (map->def.type == BPF_MAP_TYPE_ARENA) {
map->mmaped = mmap((void *)(long)map->map_extra,
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
@@ -5449,8 +5619,8 @@ retry:
if (map->mmaped == MAP_FAILED) {
err = -errno;
map->mmaped = NULL;
- pr_warn("map '%s': failed to mmap arena: %d\n",
- map->name, err);
+ pr_warn("map '%s': failed to mmap arena: %s\n",
+ map->name, errstr(err));
return err;
}
if (obj->arena_data) {
@@ -5472,8 +5642,8 @@ retry:
retried = true;
goto retry;
}
- pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
- map->name, map->pin_path, err);
+ pr_warn("map '%s': failed to auto-pin at '%s': %s\n",
+ map->name, map->pin_path, errstr(err));
goto err_out;
}
}
@@ -5482,8 +5652,7 @@ retry:
return 0;
err_out:
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
+ pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err));
pr_perm_msg(err);
for (j = 0; j < i; j++)
zclose(obj->maps[j].fd);
@@ -5607,7 +5776,7 @@ static int load_module_btfs(struct bpf_object *obj)
}
if (err) {
err = -errno;
- pr_warn("failed to iterate BTF objects: %d\n", err);
+ pr_warn("failed to iterate BTF objects: %s\n", errstr(err));
return err;
}
@@ -5616,7 +5785,7 @@ static int load_module_btfs(struct bpf_object *obj)
if (errno == ENOENT)
continue; /* expected race: BTF was unloaded */
err = -errno;
- pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
+ pr_warn("failed to get BTF object #%d FD: %s\n", id, errstr(err));
return err;
}
@@ -5628,7 +5797,7 @@ static int load_module_btfs(struct bpf_object *obj)
err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
err = -errno;
- pr_warn("failed to get BTF object #%d info: %d\n", id, err);
+ pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err));
goto err_out;
}
@@ -5641,8 +5810,8 @@ static int load_module_btfs(struct bpf_object *obj)
btf = btf_get_from_fd(fd, obj->btf_vmlinux);
err = libbpf_get_error(btf);
if (err) {
- pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
- name, id, err);
+ pr_warn("failed to load module [%s]'s BTF object #%d: %s\n",
+ name, id, errstr(err));
goto err_out;
}
@@ -5871,7 +6040,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
err = libbpf_get_error(obj->btf_vmlinux_override);
if (err) {
- pr_warn("failed to parse target BTF: %d\n", err);
+ pr_warn("failed to parse target BTF: %s\n", errstr(err));
return err;
}
}
@@ -5931,8 +6100,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
err = record_relo_core(prog, rec, insn_idx);
if (err) {
- pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
- prog->name, i, err);
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %s\n",
+ prog->name, i, errstr(err));
goto out;
}
@@ -5941,15 +6110,15 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
- pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
- prog->name, i, err);
+ pr_warn("prog '%s': relo #%d: failed to relocate: %s\n",
+ prog->name, i, errstr(err));
goto out;
}
err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
if (err) {
- pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
- prog->name, i, insn_idx, err);
+ pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %s\n",
+ prog->name, i, insn_idx, errstr(err));
goto out;
}
}
@@ -6023,6 +6192,157 @@ static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
}
+static int find_jt_map(struct bpf_object *obj, struct bpf_program *prog, int sym_off)
+{
+ size_t i;
+
+ for (i = 0; i < obj->jumptable_map_cnt; i++) {
+ /*
+ * This might happen that same offset is used for two different
+ * programs (as jump tables can be the same). However, for
+ * different programs different maps should be created.
+ */
+ if (obj->jumptable_maps[i].sym_off == sym_off &&
+ obj->jumptable_maps[i].prog == prog)
+ return obj->jumptable_maps[i].fd;
+ }
+
+ return -ENOENT;
+}
+
+static int add_jt_map(struct bpf_object *obj, struct bpf_program *prog, int sym_off, int map_fd)
+{
+ size_t cnt = obj->jumptable_map_cnt;
+ size_t size = sizeof(obj->jumptable_maps[0]);
+ void *tmp;
+
+ tmp = libbpf_reallocarray(obj->jumptable_maps, cnt + 1, size);
+ if (!tmp)
+ return -ENOMEM;
+
+ obj->jumptable_maps = tmp;
+ obj->jumptable_maps[cnt].prog = prog;
+ obj->jumptable_maps[cnt].sym_off = sym_off;
+ obj->jumptable_maps[cnt].fd = map_fd;
+ obj->jumptable_map_cnt++;
+
+ return 0;
+}
+
+static int find_subprog_idx(struct bpf_program *prog, int insn_idx)
+{
+ int i;
+
+ for (i = prog->subprog_cnt - 1; i >= 0; i--) {
+ if (insn_idx >= prog->subprogs[i].sub_insn_off)
+ return i;
+ }
+
+ return -1;
+}
+
+static int create_jt_map(struct bpf_object *obj, struct bpf_program *prog, struct reloc_desc *relo)
+{
+ const __u32 jt_entry_size = 8;
+ int sym_off = relo->sym_off;
+ int jt_size = relo->sym_size;
+ __u32 max_entries = jt_size / jt_entry_size;
+ __u32 value_size = sizeof(struct bpf_insn_array_value);
+ struct bpf_insn_array_value val = {};
+ int subprog_idx;
+ int map_fd, err;
+ __u64 insn_off;
+ __u64 *jt;
+ __u32 i;
+
+ map_fd = find_jt_map(obj, prog, sym_off);
+ if (map_fd >= 0)
+ return map_fd;
+
+ if (sym_off % jt_entry_size) {
+ pr_warn("map '.jumptables': jumptable start %d should be multiple of %u\n",
+ sym_off, jt_entry_size);
+ return -EINVAL;
+ }
+
+ if (jt_size % jt_entry_size) {
+ pr_warn("map '.jumptables': jumptable size %d should be multiple of %u\n",
+ jt_size, jt_entry_size);
+ return -EINVAL;
+ }
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_INSN_ARRAY, ".jumptables",
+ 4, value_size, max_entries, NULL);
+ if (map_fd < 0)
+ return map_fd;
+
+ if (!obj->jumptables_data) {
+ pr_warn("map '.jumptables': ELF file is missing jump table data\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (sym_off + jt_size > obj->jumptables_data_sz) {
+ pr_warn("map '.jumptables': jumptables_data size is %zd, trying to access %d\n",
+ obj->jumptables_data_sz, sym_off + jt_size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ subprog_idx = -1; /* main program */
+ if (relo->insn_idx < 0 || relo->insn_idx >= prog->insns_cnt) {
+ pr_warn("map '.jumptables': invalid instruction index %d\n", relo->insn_idx);
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (prog->subprogs)
+ subprog_idx = find_subprog_idx(prog, relo->insn_idx);
+
+ jt = (__u64 *)(obj->jumptables_data + sym_off);
+ for (i = 0; i < max_entries; i++) {
+ /*
+ * The offset should be made to be relative to the beginning of
+ * the main function, not the subfunction.
+ */
+ insn_off = jt[i]/sizeof(struct bpf_insn);
+ if (subprog_idx >= 0) {
+ insn_off -= prog->subprogs[subprog_idx].sec_insn_off;
+ insn_off += prog->subprogs[subprog_idx].sub_insn_off;
+ } else {
+ insn_off -= prog->sec_insn_off;
+ }
+
+ /*
+ * LLVM-generated jump tables contain u64 records, however
+ * should contain values that fit in u32.
+ */
+ if (insn_off > UINT32_MAX) {
+ pr_warn("map '.jumptables': invalid jump table value 0x%llx at offset %d\n",
+ (long long)jt[i], sym_off + i * jt_entry_size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ val.orig_off = insn_off;
+ err = bpf_map_update_elem(map_fd, &i, &val, 0);
+ if (err)
+ goto err_close;
+ }
+
+ err = bpf_map_freeze(map_fd);
+ if (err)
+ goto err_close;
+
+ err = add_jt_map(obj, prog, sym_off, map_fd);
+ if (err)
+ goto err_close;
+
+ return map_fd;
+
+err_close:
+ close(map_fd);
+ return err;
+}
+
/* Relocate data references within program code:
* - map references;
* - global variable references;
@@ -6114,6 +6434,20 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_CORE:
/* will be handled by bpf_program_record_relos() */
break;
+ case RELO_INSN_ARRAY: {
+ int map_fd;
+
+ map_fd = create_jt_map(obj, prog, relo);
+ if (map_fd < 0) {
+ pr_warn("prog '%s': relo #%d: can't create jump table: sym_off %u\n",
+ prog->name, i, relo->sym_off);
+ return map_fd;
+ }
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn->imm = map_fd;
+ insn->off = 0;
+ }
+ break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
prog->name, i, relo->type);
@@ -6217,8 +6551,8 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj,
&main_prog->func_info_rec_size);
if (err) {
if (err != -ENOENT) {
- pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': error relocating .BTF.ext function info: %s\n",
+ prog->name, errstr(err));
return err;
}
if (main_prog->func_info) {
@@ -6245,8 +6579,8 @@ line_info:
&main_prog->line_info_rec_size);
if (err) {
if (err != -ENOENT) {
- pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': error relocating .BTF.ext line info: %s\n",
+ prog->name, errstr(err));
return err;
}
if (main_prog->line_info) {
@@ -6311,36 +6645,62 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
return 0;
}
+static int save_subprog_offsets(struct bpf_program *main_prog, struct bpf_program *subprog)
+{
+ size_t size = sizeof(main_prog->subprogs[0]);
+ int cnt = main_prog->subprog_cnt;
+ void *tmp;
+
+ tmp = libbpf_reallocarray(main_prog->subprogs, cnt + 1, size);
+ if (!tmp)
+ return -ENOMEM;
+
+ main_prog->subprogs = tmp;
+ main_prog->subprogs[cnt].sec_insn_off = subprog->sec_insn_off;
+ main_prog->subprogs[cnt].sub_insn_off = subprog->sub_insn_off;
+ main_prog->subprog_cnt++;
+
+ return 0;
+}
+
static int
bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *subprog)
{
- struct bpf_insn *insns;
- size_t new_cnt;
- int err;
+ struct bpf_insn *insns;
+ size_t new_cnt;
+ int err;
+
+ subprog->sub_insn_off = main_prog->insns_cnt;
- subprog->sub_insn_off = main_prog->insns_cnt;
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
+ }
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
- new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
- insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
- if (!insns) {
- pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
- return -ENOMEM;
- }
- main_prog->insns = insns;
- main_prog->insns_cnt = new_cnt;
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
- memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
- subprog->insns_cnt * sizeof(*insns));
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
- pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
- main_prog->name, subprog->insns_cnt, subprog->name);
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
- /* The subprog insns are now appended. Append its relos too. */
- err = append_subprog_relos(main_prog, subprog);
- if (err)
- return err;
- return 0;
+ err = save_subprog_offsets(main_prog, subprog);
+ if (err) {
+ pr_warn("prog '%s': failed to add subprog offsets: %s\n",
+ main_prog->name, errstr(err));
+ return err;
+ }
+
+ return 0;
}
static int
@@ -7010,8 +7370,8 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat
if (obj->btf_ext) {
err = bpf_object__relocate_core(obj, targ_btf_path);
if (err) {
- pr_warn("failed to perform CO-RE relocations: %d\n",
- err);
+ pr_warn("failed to perform CO-RE relocations: %s\n",
+ errstr(err));
return err;
}
bpf_object__sort_relos(obj);
@@ -7055,8 +7415,8 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat
err = bpf_object__relocate_calls(obj, prog);
if (err) {
- pr_warn("prog '%s': failed to relocate calls: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': failed to relocate calls: %s\n",
+ prog->name, errstr(err));
return err;
}
@@ -7092,16 +7452,16 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat
/* Process data relos for main programs */
err = bpf_object__relocate_data(obj, prog);
if (err) {
- pr_warn("prog '%s': failed to relocate data references: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': failed to relocate data references: %s\n",
+ prog->name, errstr(err));
return err;
}
/* Fix up .BTF.ext information, if necessary */
err = bpf_program_fixup_func_info(obj, prog);
if (err) {
- pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %s\n",
+ prog->name, errstr(err));
return err;
}
}
@@ -7353,8 +7713,14 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
/* special check for usdt to use uprobe_multi link */
- if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
+ if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
+ /* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type
+ * in prog, and expected_attach_type we set in kernel is from opts, so we
+ * update both.
+ */
prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
+ opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
+ }
if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
int btf_obj_fd = 0, btf_type_id = 0, err;
@@ -7404,7 +7770,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
{
LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
const char *prog_name = NULL;
- char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL, *tmp;
bool own_log_buf = true;
@@ -7444,6 +7809,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.attach_btf_id = prog->attach_btf_id;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
+ load_attr.expected_attach_type = prog->expected_attach_type;
/* specify func_info/line_info only if kernel supports them */
if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
@@ -7467,17 +7833,14 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
if (err < 0) {
- pr_warn("prog '%s': failed to prepare load attributes: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': failed to prepare load attributes: %s\n",
+ prog->name, errstr(err));
return err;
}
insns = prog->insns;
insns_cnt = prog->insns_cnt;
}
- /* allow prog_prepare_load_fn to change expected_attach_type */
- load_attr.expected_attach_type = prog->expected_attach_type;
-
if (obj->gen_loader) {
bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
license, insns, insns_cnt, &load_attr,
@@ -7535,9 +7898,8 @@ retry_load:
continue;
if (bpf_prog_bind_map(ret, map->fd, NULL)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': failed to bind map '%s': %s\n",
- prog->name, map->real_name, cp);
+ prog->name, map->real_name, errstr(errno));
/* Don't fail hard if can't bind rodata. */
}
}
@@ -7567,8 +7929,7 @@ retry_load:
/* post-process verifier log to improve error descriptions */
fixup_verifier_log(prog, log_buf, log_buf_size);
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
+ pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno));
pr_perm_msg(ret);
if (own_log_buf && log_buf && log_buf[0] != '\0') {
@@ -7840,13 +8201,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- err = bpf_object__sanitize_prog(obj, prog);
- if (err)
- return err;
- }
-
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
if (!prog->autoload) {
@@ -7861,7 +8215,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
obj->license, obj->kern_version, &prog->fd);
if (err) {
- pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
+ pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err));
return err;
}
}
@@ -7870,6 +8224,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
return 0;
}
+static int bpf_object_prepare_progs(struct bpf_object *obj)
+{
+ struct bpf_program *prog;
+ size_t i;
+ int err;
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ err = bpf_object__sanitize_prog(obj, prog);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static const struct bpf_sec_def *find_sec_def(const char *sec_name);
static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
@@ -7895,8 +8264,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
if (prog->sec_def->prog_setup_fn) {
err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
if (err < 0) {
- pr_warn("prog '%s': failed to initialize: %d\n",
- prog->name, err);
+ pr_warn("prog '%s': failed to initialize: %s\n",
+ prog->name, errstr(err));
return err;
}
}
@@ -7906,16 +8275,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
}
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ const char *obj_name,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
+ const char *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
- char tmp_name[64];
int err;
char *log_buf;
size_t log_size;
__u32 log_level;
+ if (obj_buf && !obj_name)
+ return ERR_PTR(-EINVAL);
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
path ? : "(mem buf)");
@@ -7925,16 +8297,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
- obj_name = OPTS_GET(opts, object_name, NULL);
+ obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
if (obj_buf) {
- if (!obj_name) {
- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
- (unsigned long)obj_buf,
- (unsigned long)obj_buf_sz);
- obj_name = tmp_name;
- }
path = obj_name;
pr_debug("loading object '%s' from buffer\n", obj_name);
+ } else {
+ pr_debug("loading object from %s\n", path);
}
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
@@ -7994,7 +8362,6 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
}
err = bpf_object__elf_init(obj);
- err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__collect_externs(obj);
err = err ? : bpf_object_fixup_btf(obj);
@@ -8018,9 +8385,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
if (!path)
return libbpf_err_ptr(-EINVAL);
- pr_debug("loading %s\n", path);
-
- return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -8032,10 +8397,15 @@ struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
+ char tmp_name[64];
+
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
+ /* create a (quite useless) default "name" for this memory buffer object */
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
+
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -8084,7 +8454,7 @@ static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
f = fopen("/proc/kallsyms", "re");
if (!f) {
err = -errno;
- pr_warn("failed to open /proc/kallsyms: %d\n", err);
+ pr_warn("failed to open /proc/kallsyms: %s\n", errstr(err));
return err;
}
@@ -8445,11 +8815,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map)
{
+ const struct btf_type *type;
struct bpf_struct_ops *st_ops;
__u32 i;
st_ops = map->st_ops;
- for (i = 0; i < btf_vlen(st_ops->type); i++) {
+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
+ for (i = 0; i < btf_vlen(type); i++) {
struct bpf_program *prog = st_ops->progs[i];
void *kern_data;
int prog_fd;
@@ -8483,20 +8855,45 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
return 0;
}
-static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
+static void bpf_object_unpin(struct bpf_object *obj)
{
- int err, i;
+ int i;
- if (!obj)
- return libbpf_err(-EINVAL);
+ /* unpin any maps that were auto-pinned during load */
+ for (i = 0; i < obj->nr_maps; i++)
+ if (obj->maps[i].pinned && !obj->maps[i].reused)
+ bpf_map__unpin(&obj->maps[i], NULL);
+}
- if (obj->loaded) {
- pr_warn("object '%s': load can't be attempted twice\n", obj->name);
- return libbpf_err(-EINVAL);
+static void bpf_object_post_load_cleanup(struct bpf_object *obj)
+{
+ int i;
+
+ /* clean up fd_array */
+ zfree(&obj->fd_array);
+
+ /* clean up module BTFs */
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ close(obj->btf_modules[i].fd);
+ btf__free(obj->btf_modules[i].btf);
+ free(obj->btf_modules[i].name);
}
+ obj->btf_module_cnt = 0;
+ zfree(&obj->btf_modules);
- if (obj->gen_loader)
- bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
+ /* clean up vmlinux BTF */
+ btf__free(obj->btf_vmlinux);
+ obj->btf_vmlinux = NULL;
+}
+
+static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
+{
+ int err;
+
+ if (obj->state >= OBJ_PREPARED) {
+ pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
+ return -EINVAL;
+ }
err = bpf_object_prepare_token(obj);
err = err ? : bpf_object__probe_loading(obj);
@@ -8508,7 +8905,47 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
+ err = err ? : bpf_object_prepare_progs(obj);
+
+ if (err) {
+ bpf_object_unpin(obj);
+ bpf_object_unload(obj);
+ obj->state = OBJ_LOADED;
+ return err;
+ }
+
+ obj->state = OBJ_PREPARED;
+ return 0;
+}
+
+static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
+{
+ int err;
+
+ if (!obj)
+ return libbpf_err(-EINVAL);
+
+ if (obj->state >= OBJ_LOADED) {
+ pr_warn("object '%s': load can't be attempted twice\n", obj->name);
+ return libbpf_err(-EINVAL);
+ }
+
+ /* Disallow kernel loading programs of non-native endianness but
+ * permit cross-endian creation of "light skeleton".
+ */
+ if (obj->gen_loader) {
+ bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
+ } else if (!is_native_endianness(obj)) {
+ pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name);
+ return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
+ }
+
+ if (obj->state < OBJ_PREPARED) {
+ err = bpf_object_prepare(obj, target_btf_path);
+ if (err)
+ return libbpf_err(err);
+ }
+ err = bpf_object__load_progs(obj, extra_log_level);
err = err ? : bpf_object_init_prog_arrays(obj);
err = err ? : bpf_object_prepare_struct_ops(obj);
@@ -8520,36 +8957,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
}
- /* clean up fd_array */
- zfree(&obj->fd_array);
+ bpf_object_post_load_cleanup(obj);
+ obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */
- /* clean up module BTFs */
- for (i = 0; i < obj->btf_module_cnt; i++) {
- close(obj->btf_modules[i].fd);
- btf__free(obj->btf_modules[i].btf);
- free(obj->btf_modules[i].name);
+ if (err) {
+ bpf_object_unpin(obj);
+ bpf_object_unload(obj);
+ pr_warn("failed to load object '%s'\n", obj->path);
+ return libbpf_err(err);
}
- free(obj->btf_modules);
-
- /* clean up vmlinux BTF */
- btf__free(obj->btf_vmlinux);
- obj->btf_vmlinux = NULL;
-
- obj->loaded = true; /* doesn't matter if successfully or not */
-
- if (err)
- goto out;
return 0;
-out:
- /* unpin any maps that were auto-pinned during load */
- for (i = 0; i < obj->nr_maps; i++)
- if (obj->maps[i].pinned && !obj->maps[i].reused)
- bpf_map__unpin(&obj->maps[i], NULL);
+}
- bpf_object_unload(obj);
- pr_warn("failed to load object '%s'\n", obj->path);
- return libbpf_err(err);
+int bpf_object__prepare(struct bpf_object *obj)
+{
+ return libbpf_err(bpf_object_prepare(obj, NULL));
}
int bpf_object__load(struct bpf_object *obj)
@@ -8559,7 +8982,6 @@ int bpf_object__load(struct bpf_object *obj)
static int make_parent_dir(const char *path)
{
- char *cp, errmsg[STRERR_BUFSIZE];
char *dname, *dir;
int err = 0;
@@ -8573,15 +8995,13 @@ static int make_parent_dir(const char *path)
free(dname);
if (err) {
- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
- pr_warn("failed to mkdir %s: %s\n", path, cp);
+ pr_warn("failed to mkdir %s: %s\n", path, errstr(err));
}
return err;
}
static int check_path(const char *path)
{
- char *cp, errmsg[STRERR_BUFSIZE];
struct statfs st_fs;
char *dname, *dir;
int err = 0;
@@ -8595,8 +9015,7 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("failed to statfs %s: %s\n", dir, cp);
+ pr_warn("failed to statfs %s: %s\n", dir, errstr(errno));
err = -errno;
}
free(dname);
@@ -8611,7 +9030,6 @@ static int check_path(const char *path)
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
- char *cp, errmsg[STRERR_BUFSIZE];
int err;
if (prog->fd < 0) {
@@ -8629,8 +9047,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
if (bpf_obj_pin(prog->fd, path)) {
err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
+ pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err));
return libbpf_err(err);
}
@@ -8661,7 +9078,6 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
int bpf_map__pin(struct bpf_map *map, const char *path)
{
- char *cp, errmsg[STRERR_BUFSIZE];
int err;
if (map == NULL) {
@@ -8720,8 +9136,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
return 0;
out_err:
- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
- pr_warn("failed to pin map: %s\n", cp);
+ pr_warn("failed to pin map: %s\n", errstr(err));
return libbpf_err(err);
}
@@ -8807,7 +9222,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
if (!obj)
return libbpf_err(-ENOENT);
- if (!obj->loaded) {
+ if (obj->state < OBJ_PREPARED) {
pr_warn("object not yet loaded; load it first\n");
return libbpf_err(-ENOENT);
}
@@ -8886,7 +9301,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
if (!obj)
return libbpf_err(-ENOENT);
- if (!obj->loaded) {
+ if (obj->state < OBJ_LOADED) {
pr_warn("object not yet loaded; load it first\n");
return libbpf_err(-ENOENT);
}
@@ -9005,6 +9420,13 @@ void bpf_object__close(struct bpf_object *obj)
if (IS_ERR_OR_NULL(obj))
return;
+ /*
+ * if user called bpf_object__prepare() without ever getting to
+ * bpf_object__load(), we need to clean up stuff that is normally
+ * cleaned up at the end of loading step
+ */
+ bpf_object_post_load_cleanup(obj);
+
usdt_manager_free(obj->usdt_man);
obj->usdt_man = NULL;
@@ -9021,8 +9443,10 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
- for (i = 0; i < obj->nr_extern; i++)
+ for (i = 0; i < obj->nr_extern; i++) {
+ zfree(&obj->externs[i].name);
zfree(&obj->externs[i].essent_name);
+ }
zfree(&obj->externs);
obj->nr_extern = 0;
@@ -9043,6 +9467,13 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->arena_data);
+ zfree(&obj->jumptables_data);
+ obj->jumptables_data_sz = 0;
+
+ for (i = 0; i < obj->jumptable_map_cnt; i++)
+ close(obj->jumptable_maps[i].fd);
+ zfree(&obj->jumptable_maps);
+
free(obj);
}
@@ -9056,6 +9487,11 @@ unsigned int bpf_object__kversion(const struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+int bpf_object__token_fd(const struct bpf_object *obj)
+{
+ return obj->token_fd ?: -1;
+}
+
struct btf *bpf_object__btf(const struct bpf_object *obj)
{
return obj ? obj->btf : NULL;
@@ -9068,7 +9504,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
{
- if (obj->loaded)
+ if (obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);
obj->kern_version = kern_version;
@@ -9081,13 +9517,14 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
struct bpf_gen *gen;
if (!opts)
- return -EFAULT;
+ return libbpf_err(-EFAULT);
if (!OPTS_VALID(opts, gen_loader_opts))
- return -EINVAL;
- gen = calloc(sizeof(*gen), 1);
+ return libbpf_err(-EINVAL);
+ gen = calloc(1, sizeof(*gen));
if (!gen)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
gen->opts = opts;
+ gen->swapped_endian = !is_native_endianness(obj);
obj->gen_loader = gen;
return 0;
}
@@ -9164,7 +9601,7 @@ bool bpf_program__autoload(const struct bpf_program *prog)
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
{
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);
prog->autoload = autoload;
@@ -9196,14 +9633,14 @@ int bpf_program__set_insns(struct bpf_program *prog,
{
struct bpf_insn *insns;
- if (prog->obj->loaded)
- return -EBUSY;
+ if (prog->obj->state >= OBJ_LOADED)
+ return libbpf_err(-EBUSY);
insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
/* NULL is a valid return from reallocarray if the new count is zero */
if (!insns && new_insn_cnt) {
pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
}
memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
@@ -9239,7 +9676,7 @@ static int last_custom_sec_def_handler_id;
int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);
/* if type is not changed, do nothing */
@@ -9270,7 +9707,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);
prog->expected_attach_type = type;
@@ -9284,7 +9721,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog)
int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
{
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);
prog->prog_flags = flags;
@@ -9298,7 +9735,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog)
int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
{
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);
prog->log_level = log_level;
@@ -9314,17 +9751,41 @@ const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_siz
int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
{
if (log_size && !log_buf)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (prog->log_size > UINT_MAX)
- return -EINVAL;
- if (prog->obj->loaded)
- return -EBUSY;
+ return libbpf_err(-EINVAL);
+ if (prog->obj->state >= OBJ_LOADED)
+ return libbpf_err(-EBUSY);
prog->log_buf = log_buf;
prog->log_size = log_size;
return 0;
}
+struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog)
+{
+ if (prog->func_info_rec_size != sizeof(struct bpf_func_info))
+ return libbpf_err_ptr(-EOPNOTSUPP);
+ return prog->func_info;
+}
+
+__u32 bpf_program__func_info_cnt(const struct bpf_program *prog)
+{
+ return prog->func_info_cnt;
+}
+
+struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog)
+{
+ if (prog->line_info_rec_size != sizeof(struct bpf_line_info))
+ return libbpf_err_ptr(-EOPNOTSUPP);
+ return prog->line_info;
+}
+
+__u32 bpf_program__line_info_cnt(const struct bpf_program *prog)
+{
+ return prog->line_info_cnt;
+}
+
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = (char *)sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
@@ -9362,8 +9823,10 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
+ SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
+ SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi),
SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
@@ -9712,6 +10175,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
+ const struct btf_type *type;
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
struct bpf_program *prog;
@@ -9771,13 +10235,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
insn_idx = sym->st_value / BPF_INSN_SZ;
- member = find_member_by_offset(st_ops->type, moff * 8);
+ type = btf__type_by_id(btf, st_ops->type_id);
+ member = find_member_by_offset(type, moff * 8);
if (!member) {
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
map->name, moff);
return -EINVAL;
}
- member_idx = member - btf_members(st_ops->type);
+ member_idx = member - btf_members(type);
name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) {
@@ -9890,7 +10355,7 @@ int libbpf_find_vmlinux_btf_id(const char *name,
return libbpf_err(err);
}
-static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
+static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd)
{
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
@@ -9900,8 +10365,8 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
memset(&info, 0, info_len);
err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (err) {
- pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
- attach_prog_fd, err);
+ pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %s\n",
+ attach_prog_fd, errstr(err));
return err;
}
@@ -9910,10 +10375,10 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
pr_warn("The target program doesn't have BTF\n");
goto out;
}
- btf = btf__load_from_kernel_by_id(info.btf_id);
+ btf = btf_load_from_kernel(info.btf_id, NULL, token_fd);
err = libbpf_get_error(btf);
if (err) {
- pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
+ pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err));
goto out;
}
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
@@ -9930,7 +10395,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
enum bpf_attach_type attach_type,
int *btf_obj_fd, int *btf_type_id)
{
- int ret, i, mod_len;
+ int ret, i, mod_len = 0;
const char *fn_name, *mod_name = NULL;
fn_name = strchr(attach_name, ':');
@@ -9993,10 +10458,10 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
pr_warn("prog '%s': attach program FD is not set\n", prog->name);
return -EINVAL;
}
- err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
+ err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd);
if (err < 0) {
- pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
- prog->name, attach_prog_fd, attach_name, err);
+ pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n",
+ prog->name, attach_prog_fd, attach_name, errstr(err));
return err;
}
*btf_obj_fd = 0;
@@ -10015,8 +10480,8 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
btf_type_id);
}
if (err) {
- pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
- prog->name, attach_name, err);
+ pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n",
+ prog->name, attach_name, errstr(err));
return err;
}
return 0;
@@ -10230,7 +10695,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
- if (map->obj->loaded || map->reused)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
if (map->mmaped) {
@@ -10238,20 +10703,20 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
int err;
if (map->def.type != BPF_MAP_TYPE_ARRAY)
- return -EOPNOTSUPP;
+ return libbpf_err(-EOPNOTSUPP);
mmap_old_sz = bpf_map_mmap_sz(map);
mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
if (err) {
- pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
- bpf_map__name(map), err);
- return err;
+ pr_warn("map '%s': failed to resize memory-mapped region: %s\n",
+ bpf_map__name(map), errstr(err));
+ return libbpf_err(err);
}
err = map_btf_datasec_resize(map, size);
if (err && err != -ENOENT) {
- pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
- bpf_map__name(map), err);
+ pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %s\n",
+ bpf_map__name(map), errstr(err));
map->btf_value_type_id = 0;
map->btf_key_type_id = 0;
}
@@ -10276,7 +10741,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
{
size_t actual_sz;
- if (map->obj->loaded || map->reused)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
@@ -10348,6 +10813,27 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
return 0;
}
+int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog)
+{
+ if (map_is_created(map)) {
+ pr_warn("exclusive programs must be set before map creation\n");
+ return libbpf_err(-EINVAL);
+ }
+
+ if (map->obj != prog->obj) {
+ pr_warn("excl_prog and map must be from the same bpf object\n");
+ return libbpf_err(-EINVAL);
+ }
+
+ map->excl_prog = prog;
+ return 0;
+}
+
+struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map)
+{
+ return map->excl_prog;
+}
+
static struct bpf_map *
__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
{
@@ -10742,7 +11228,6 @@ static void bpf_link_perf_dealloc(struct bpf_link *link)
struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
const struct bpf_perf_event_opts *opts)
{
- char errmsg[STRERR_BUFSIZE];
struct bpf_link_perf *link;
int prog_fd, link_fd = -1, err;
bool force_ioctl_attach;
@@ -10777,9 +11262,8 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
if (link_fd < 0) {
err = -errno;
- pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
- prog->name, pfd,
- err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %s\n",
+ prog->name, pfd, errstr(err));
goto err_out;
}
link->link.fd = link_fd;
@@ -10793,7 +11277,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
err = -errno;
pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
- prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ prog->name, pfd, errstr(err));
if (err == -EPROTO)
pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
prog->name, pfd);
@@ -10801,11 +11285,14 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
}
link->link.fd = pfd;
}
- if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
- err = -errno;
- pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
- prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- goto err_out;
+
+ if (!OPTS_GET(opts, dont_enable, false)) {
+ if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
+ prog->name, pfd, errstr(err));
+ goto err_out;
+ }
}
return &link->link;
@@ -10828,22 +11315,19 @@ struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog,
*/
static int parse_uint_from_file(const char *file, const char *fmt)
{
- char buf[STRERR_BUFSIZE];
int err, ret;
FILE *f;
f = fopen(file, "re");
if (!f) {
err = -errno;
- pr_debug("failed to open '%s': %s\n", file,
- libbpf_strerror_r(err, buf, sizeof(buf)));
+ pr_debug("failed to open '%s': %s\n", file, errstr(err));
return err;
}
err = fscanf(f, fmt, &ret);
if (err != 1) {
err = err == EOF ? -EIO : -errno;
- pr_debug("failed to parse '%s': %s\n", file,
- libbpf_strerror_r(err, buf, sizeof(buf)));
+ pr_debug("failed to parse '%s': %s\n", file, errstr(err));
fclose(f);
return err;
}
@@ -10887,7 +11371,6 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
{
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_event_attr attr;
- char errmsg[STRERR_BUFSIZE];
int type, pfd;
if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
@@ -10900,7 +11383,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
if (type < 0) {
pr_warn("failed to determine %s perf type: %s\n",
uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
+ errstr(type));
return type;
}
if (retprobe) {
@@ -10910,7 +11393,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
if (bit < 0) {
pr_warn("failed to determine %s retprobe bit: %s\n",
uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
+ errstr(bit));
return bit;
}
attr.config |= 1 << bit;
@@ -10993,16 +11476,16 @@ static const char *tracefs_available_filter_functions_addrs(void)
: TRACEFS"/available_filter_functions_addrs";
}
-static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
- const char *kfunc_name, size_t offset)
+static void gen_probe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *name, size_t offset)
{
static int index = 0;
int i;
- snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
- __sync_fetch_and_add(&index, 1));
+ snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(),
+ __sync_fetch_and_add(&index, 1), name, offset);
- /* sanitize binary_path in the probe name */
+ /* sanitize name in the probe name */
for (i = 0; buf[i]; i++) {
if (!isalnum(buf[i]))
buf[i] = '_';
@@ -11039,14 +11522,13 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
{
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_event_attr attr;
- char errmsg[STRERR_BUFSIZE];
int type, pfd, err;
err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
if (err < 0) {
pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
kfunc_name, offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
return err;
}
type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
@@ -11054,7 +11536,7 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
err = type;
pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
kfunc_name, offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_clean_legacy;
}
@@ -11070,7 +11552,7 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
if (pfd < 0) {
err = -errno;
pr_warn("legacy kprobe perf_event_open() failed: %s\n",
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_clean_legacy;
}
return pfd;
@@ -11089,8 +11571,6 @@ static const char *arch_specific_syscall_pfx(void)
return "ia32";
#elif defined(__s390x__)
return "s390x";
-#elif defined(__s390__)
- return "s390";
#elif defined(__arm__)
return "arm";
#elif defined(__aarch64__)
@@ -11128,9 +11608,9 @@ int probe_kern_syscall_wrapper(int token_fd)
return pfd >= 0 ? 1 : 0;
} else { /* legacy mode */
- char probe_name[128];
+ char probe_name[MAX_EVENT_NAME_LEN];
- gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
return 0;
@@ -11146,7 +11626,6 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
enum probe_attach_mode attach_mode;
- char errmsg[STRERR_BUFSIZE];
char *legacy_probe = NULL;
struct bpf_link *link;
size_t offset;
@@ -11187,10 +11666,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
func_name, offset,
-1 /* pid */, 0 /* ref_ctr_off */);
} else {
- char probe_name[256];
+ char probe_name[MAX_EVENT_NAME_LEN];
- gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
- func_name, offset);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
+ func_name, offset);
legacy_probe = strdup(probe_name);
if (!legacy_probe)
@@ -11204,7 +11683,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe",
func_name, offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_out;
}
link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
@@ -11214,7 +11693,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe",
func_name, offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_clean_legacy;
}
if (legacy) {
@@ -11326,9 +11805,33 @@ static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
struct kprobe_multi_resolve *res = data->res;
int err;
- if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
+ if (!glob_match(sym_name, res->pattern))
return 0;
+ if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) {
+ /* Some versions of kernel strip out .llvm.<hash> suffix from
+ * function names reported in available_filter_functions, but
+ * don't do so for kallsyms. While this is clearly a kernel
+ * bug (fixed by [0]) we try to accommodate that in libbpf to
+ * make multi-kprobe usability a bit better: if no match is
+ * found, we will strip .llvm. suffix and try one more time.
+ *
+ * [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG")
+ */
+ char sym_trim[256], *psym_trim = sym_trim, *sym_sfx;
+
+ if (!(sym_sfx = strstr(sym_name, ".llvm.")))
+ return 0;
+
+ /* psym_trim vs sym_trim dance is done to avoid pointer vs array
+ * coercion differences and get proper `const char **` pointer
+ * which avail_func_cmp() expects
+ */
+ snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name);
+ if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
+ return 0;
+ }
+
err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
if (err)
return err;
@@ -11350,7 +11853,7 @@ static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
f = fopen(available_functions_file, "re");
if (!f) {
err = -errno;
- pr_warn("failed to open %s: %d\n", available_functions_file, err);
+ pr_warn("failed to open %s: %s\n", available_functions_file, errstr(err));
return err;
}
@@ -11425,7 +11928,7 @@ static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
f = fopen(available_path, "re");
if (!f) {
err = -errno;
- pr_warn("failed to open %s: %d\n", available_path, err);
+ pr_warn("failed to open %s: %s\n", available_path, errstr(err));
return err;
}
@@ -11471,10 +11974,9 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
};
enum bpf_attach_type attach_type;
struct bpf_link *link = NULL;
- char errmsg[STRERR_BUFSIZE];
const unsigned long *addrs;
int err, link_fd, prog_fd;
- bool retprobe, session;
+ bool retprobe, session, unique_match;
const __u64 *cookies;
const char **syms;
size_t cnt;
@@ -11493,6 +11995,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
addrs = OPTS_GET(opts, addrs, false);
cnt = OPTS_GET(opts, cnt, false);
cookies = OPTS_GET(opts, cookies, false);
+ unique_match = OPTS_GET(opts, unique_match, false);
if (!pattern && !addrs && !syms)
return libbpf_err_ptr(-EINVAL);
@@ -11500,6 +12003,8 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
if (!pattern && !cnt)
return libbpf_err_ptr(-EINVAL);
+ if (!pattern && unique_match)
+ return libbpf_err_ptr(-EINVAL);
if (addrs && syms)
return libbpf_err_ptr(-EINVAL);
@@ -11510,6 +12015,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
err = libbpf_available_kallsyms_parse(&res);
if (err)
goto error;
+
+ if (unique_match && res.cnt != 1) {
+ pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n",
+ prog->name, pattern, res.cnt);
+ err = -EINVAL;
+ goto error;
+ }
+
addrs = res.addrs;
cnt = res.cnt;
}
@@ -11539,7 +12052,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
if (link_fd < 0) {
err = -errno;
pr_warn("prog '%s': failed to attach: %s\n",
- prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ prog->name, errstr(err));
goto error;
}
link->fd = link_fd;
@@ -11683,7 +12196,9 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
ret = 0;
break;
case 3:
- opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
+ opts.session = str_has_pfx(probe_type, "uprobe.session");
+ opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
+
*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
ret = libbpf_get_error(*link);
break;
@@ -11698,20 +12213,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
return ret;
}
-static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
- const char *binary_path, uint64_t offset)
-{
- int i;
-
- snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
-
- /* sanitize binary_path in the probe name */
- for (i = 0; buf[i]; i++) {
- if (!isalnum(buf[i]))
- buf[i] = '_';
- }
-}
-
static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
const char *binary_path, size_t offset)
{
@@ -11746,15 +12247,15 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
if (err < 0) {
- pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
- binary_path, (size_t)offset, err);
+ pr_warn("failed to add legacy uprobe event for %s:0x%zx: %s\n",
+ binary_path, (size_t)offset, errstr(err));
return err;
}
type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
if (type < 0) {
err = type;
- pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
- binary_path, offset, err);
+ pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %s\n",
+ binary_path, offset, errstr(err));
goto err_clean_legacy;
}
@@ -11769,7 +12270,7 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
if (pfd < 0) {
err = -errno;
- pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
+ pr_warn("legacy uprobe perf_event_open() failed: %s\n", errstr(err));
goto err_clean_legacy;
}
return pfd;
@@ -11856,8 +12357,6 @@ static const char *arch_specific_lib_paths(void)
return "/lib/i386-linux-gnu";
#elif defined(__s390x__)
return "/lib/s390x-linux-gnu";
-#elif defined(__s390__)
- return "/lib/s390-linux-gnu";
#elif defined(__arm__) && defined(__SOFTFP__)
return "/lib/arm-linux-gnueabi";
#elif defined(__arm__) && !defined(__SOFTFP__)
@@ -11932,10 +12431,11 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
LIBBPF_OPTS(bpf_link_create_opts, lopts);
unsigned long *resolved_offsets = NULL;
+ enum bpf_attach_type attach_type;
int err = 0, link_fd, prog_fd;
struct bpf_link *link = NULL;
- char errmsg[STRERR_BUFSIZE];
char full_path[PATH_MAX];
+ bool retprobe, session;
const __u64 *cookies;
const char **syms;
size_t cnt;
@@ -11955,6 +12455,8 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
cookies = OPTS_GET(opts, cookies, NULL);
cnt = OPTS_GET(opts, cnt, 0);
+ retprobe = OPTS_GET(opts, retprobe, false);
+ session = OPTS_GET(opts, session, false);
/*
* User can specify 2 mutually exclusive set of inputs:
@@ -11983,12 +12485,15 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
}
+ if (retprobe && session)
+ return libbpf_err_ptr(-EINVAL);
+
if (func_pattern) {
if (!strchr(path, '/')) {
err = resolve_full_path(path, full_path, sizeof(full_path));
if (err) {
- pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
- prog->name, path, err);
+ pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
+ prog->name, path, errstr(err));
return libbpf_err_ptr(err);
}
path = full_path;
@@ -12006,12 +12511,14 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
offsets = resolved_offsets;
}
+ attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI;
+
lopts.uprobe_multi.path = path;
lopts.uprobe_multi.offsets = offsets;
lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
lopts.uprobe_multi.cookies = cookies;
lopts.uprobe_multi.cnt = cnt;
- lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
+ lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0;
if (pid == 0)
pid = getpid();
@@ -12025,11 +12532,11 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
}
link->detach = &bpf_link__detach_fd;
- link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
+ link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
if (link_fd < 0) {
err = -errno;
pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
- prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ prog->name, errstr(err));
goto error;
}
link->fd = link_fd;
@@ -12048,7 +12555,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const struct bpf_uprobe_opts *opts)
{
const char *archive_path = NULL, *archive_sep = NULL;
- char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
+ char *legacy_probe = NULL;
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
enum probe_attach_mode attach_mode;
char full_path[PATH_MAX];
@@ -12080,8 +12587,8 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
} else if (!strchr(binary_path, '/')) {
err = resolve_full_path(binary_path, full_path, sizeof(full_path));
if (err) {
- pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
- prog->name, binary_path, err);
+ pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
+ prog->name, binary_path, errstr(err));
return libbpf_err_ptr(err);
}
binary_path = full_path;
@@ -12127,13 +12634,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);
} else {
- char probe_name[PATH_MAX + 64];
+ char probe_name[MAX_EVENT_NAME_LEN];
if (ref_ctr_off)
return libbpf_err_ptr(-EINVAL);
- gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
- binary_path, func_offset);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
+ strrchr(binary_path, '/') ? : binary_path,
+ func_offset);
legacy_probe = strdup(probe_name);
if (!legacy_probe)
@@ -12147,7 +12655,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_out;
}
@@ -12158,7 +12666,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
goto err_clean_legacy;
}
if (legacy) {
@@ -12279,8 +12787,8 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
if (!strchr(binary_path, '/')) {
err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
if (err) {
- pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
- prog->name, binary_path, err);
+ pr_warn("prog '%s': failed to resolve full path for '%s': %s\n",
+ prog->name, binary_path, errstr(err));
return libbpf_err_ptr(err);
}
binary_path = resolved_path;
@@ -12358,14 +12866,13 @@ static int perf_event_open_tracepoint(const char *tp_category,
{
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_event_attr attr;
- char errmsg[STRERR_BUFSIZE];
int tp_id, pfd, err;
tp_id = determine_tracepoint_id(tp_category, tp_name);
if (tp_id < 0) {
pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
tp_category, tp_name,
- libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
+ errstr(tp_id));
return tp_id;
}
@@ -12380,7 +12887,7 @@ static int perf_event_open_tracepoint(const char *tp_category,
err = -errno;
pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
tp_category, tp_name,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
return err;
}
return pfd;
@@ -12392,7 +12899,6 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p
const struct bpf_tracepoint_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
- char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int pfd, err;
@@ -12405,7 +12911,7 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p
if (pfd < 0) {
pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
prog->name, tp_category, tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ errstr(pfd));
return libbpf_err_ptr(pfd);
}
link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
@@ -12414,7 +12920,7 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p
close(pfd);
pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
prog->name, tp_category, tp_name,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ errstr(err));
return libbpf_err_ptr(err);
}
return link;
@@ -12465,7 +12971,6 @@ bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
struct bpf_raw_tracepoint_opts *opts)
{
LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
- char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, pfd;
@@ -12490,7 +12995,7 @@ bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
pfd = -errno;
free(link);
pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
- prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ prog->name, tp_name, errstr(pfd));
return libbpf_err_ptr(pfd);
}
link->fd = pfd;
@@ -12549,7 +13054,6 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
const struct bpf_trace_opts *opts)
{
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
- char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, pfd;
@@ -12574,7 +13078,7 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
pfd = -errno;
free(link);
pr_warn("prog '%s': failed to attach: %s\n",
- prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ prog->name, errstr(pfd));
return libbpf_err_ptr(pfd);
}
link->fd = pfd;
@@ -12615,7 +13119,6 @@ bpf_program_attach_fd(const struct bpf_program *prog,
const struct bpf_link_create_opts *opts)
{
enum bpf_attach_type attach_type;
- char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, link_fd;
@@ -12637,7 +13140,7 @@ bpf_program_attach_fd(const struct bpf_program *prog,
free(link);
pr_warn("prog '%s': failed to attach to %s: %s\n",
prog->name, target_name,
- libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ errstr(link_fd));
return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
@@ -12669,6 +13172,34 @@ struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifi
}
struct bpf_link *
+bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
+ const struct bpf_cgroup_opts *opts)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
+ __u32 relative_id;
+ int relative_fd;
+
+ if (!OPTS_VALID(opts, bpf_cgroup_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ relative_id = OPTS_GET(opts, relative_id, 0);
+ relative_fd = OPTS_GET(opts, relative_fd, 0);
+
+ if (relative_fd && relative_id) {
+ pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0);
+ link_create_opts.cgroup.relative_fd = relative_fd;
+ link_create_opts.cgroup.relative_id = relative_id;
+ link_create_opts.flags = OPTS_GET(opts, flags, 0);
+
+ return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts);
+}
+
+struct bpf_link *
bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
const struct bpf_tcx_opts *opts)
{
@@ -12750,7 +13281,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
}
if (prog->type != BPF_PROG_TYPE_EXT) {
- pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
+ pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n",
prog->name);
return libbpf_err_ptr(-EINVAL);
}
@@ -12758,7 +13289,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
if (target_fd) {
LIBBPF_OPTS(bpf_link_create_opts, target_opts);
- btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
+ btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd);
if (btf_id < 0)
return libbpf_err_ptr(btf_id);
@@ -12779,7 +13310,6 @@ bpf_program__attach_iter(const struct bpf_program *prog,
const struct bpf_iter_attach_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
- char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, link_fd;
__u32 target_fd = 0;
@@ -12807,7 +13337,7 @@ bpf_program__attach_iter(const struct bpf_program *prog,
link_fd = -errno;
free(link);
pr_warn("prog '%s': failed to attach to iterator: %s\n",
- prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ prog->name, errstr(link_fd));
return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
@@ -12849,12 +13379,10 @@ struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
if (link_fd < 0) {
- char errmsg[STRERR_BUFSIZE];
-
link_fd = -errno;
free(link);
pr_warn("prog '%s': failed to attach to netfilter: %s\n",
- prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ prog->name, errstr(link_fd));
return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
@@ -12973,17 +13501,17 @@ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
int err;
if (!bpf_map__is_struct_ops(map))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (map->fd < 0) {
pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
/* Ensure the type of a link is correct */
if (st_ops_link->map_fd < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
/* It can be EBUSY if the map has been used to create or
@@ -13139,7 +13667,6 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
int cpu, int map_key)
{
struct perf_cpu_buf *cpu_buf;
- char msg[STRERR_BUFSIZE];
int err;
cpu_buf = calloc(1, sizeof(*cpu_buf));
@@ -13155,7 +13682,7 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
if (cpu_buf->fd < 0) {
err = -errno;
pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ cpu, errstr(err));
goto error;
}
@@ -13166,14 +13693,14 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
cpu_buf->base = NULL;
err = -errno;
pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ cpu, errstr(err));
goto error;
}
if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ cpu, errstr(err));
goto error;
}
@@ -13210,7 +13737,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = sample_period;
attr.wakeup_events = sample_period;
p.attr = &attr;
@@ -13249,7 +13775,6 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
{
const char *online_cpus_file = "/sys/devices/system/cpu/online";
struct bpf_map_info map;
- char msg[STRERR_BUFSIZE];
struct perf_buffer *pb;
bool *online = NULL;
__u32 map_info_len;
@@ -13272,7 +13797,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
*/
if (err != -EINVAL) {
pr_warn("failed to get map info for map FD %d: %s\n",
- map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
+ map_fd, errstr(err));
return ERR_PTR(err);
}
pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
@@ -13302,7 +13827,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
if (pb->epoll_fd < 0) {
err = -errno;
pr_warn("failed to create epoll instance: %s\n",
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ errstr(err));
goto error;
}
@@ -13333,7 +13858,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = parse_cpu_mask_file(online_cpus_file, &online, &n);
if (err) {
- pr_warn("failed to get online CPU mask: %d\n", err);
+ pr_warn("failed to get online CPU mask: %s\n", errstr(err));
goto error;
}
@@ -13364,7 +13889,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = -errno;
pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
cpu, map_key, cpu_buf->fd,
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ errstr(err));
goto error;
}
@@ -13375,7 +13900,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = -errno;
pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
cpu, cpu_buf->fd,
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ errstr(err));
goto error;
}
j++;
@@ -13470,7 +13995,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
- pr_warn("error while processing records: %d\n", err);
+ pr_warn("error while processing records: %s\n", errstr(err));
return libbpf_err(err);
}
}
@@ -13554,7 +14079,8 @@ int perf_buffer__consume(struct perf_buffer *pb)
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
- pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
+ pr_warn("perf_buffer: failed to process records in buffer #%d: %s\n",
+ i, errstr(err));
return libbpf_err(err);
}
}
@@ -13570,12 +14096,12 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
if (!prog || attach_prog_fd < 0)
return libbpf_err(-EINVAL);
- if (prog->obj->loaded)
+ if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);
if (attach_prog_fd && !attach_func_name) {
- /* remember attach_prog_fd and let bpf_program__load() find
- * BTF ID during the program load
+ /* Store attach_prog_fd. The BTF ID will be resolved later during
+ * the normal object/program load phase.
*/
prog->attach_prog_fd = attach_prog_fd;
return 0;
@@ -13583,7 +14109,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
if (attach_prog_fd) {
btf_id = libbpf_find_prog_btf_id(attach_func_name,
- attach_prog_fd);
+ attach_prog_fd, prog->obj->token_fd);
if (btf_id < 0)
return libbpf_err(btf_id);
} else {
@@ -13665,14 +14191,14 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
fd = open(fcpu, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
- pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
+ pr_warn("Failed to open cpu mask file %s: %s\n", fcpu, errstr(err));
return err;
}
len = read(fd, buf, sizeof(buf));
close(fd);
if (len <= 0) {
err = len ? -errno : -EINVAL;
- pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
+ pr_warn("Failed to read cpu mask from %s: %s\n", fcpu, errstr(err));
return err;
}
if (len >= sizeof(buf)) {
@@ -13758,42 +14284,27 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
- .object_name = s->name,
- );
struct bpf_object *obj;
int err;
- /* Attempt to preserve opts->object_name, unless overriden by user
- * explicitly. Overwriting object name for skeletons is discouraged,
- * as it breaks global data maps, because they contain object name
- * prefix as their own map name prefix. When skeleton is generated,
- * bpftool is making an assumption that this name will stay the same.
- */
- if (opts) {
- memcpy(&skel_opts, opts, sizeof(*opts));
- if (!opts->object_name)
- skel_opts.object_name = s->name;
- }
-
- obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
- err = libbpf_get_error(obj);
- if (err) {
- pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
- s->name, err);
+ obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_warn("failed to initialize skeleton BPF object '%s': %s\n",
+ s->name, errstr(err));
return libbpf_err(err);
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
- pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
+ pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err));
return libbpf_err(err);
}
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
- pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
+ pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err));
return libbpf_err(err);
}
@@ -13823,13 +14334,13 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
- pr_warn("failed to populate subskeleton maps: %d\n", err);
+ pr_warn("failed to populate subskeleton maps: %s\n", errstr(err));
return libbpf_err(err);
}
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
- pr_warn("failed to populate subskeleton maps: %d\n", err);
+ pr_warn("failed to populate subskeleton maps: %s\n", errstr(err));
return libbpf_err(err);
}
@@ -13840,7 +14351,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
map_type = btf__type_by_id(btf, map_type_id);
if (!btf_is_datasec(map_type)) {
- pr_warn("type for map '%1$s' is not a datasec: %2$s",
+ pr_warn("type for map '%1$s' is not a datasec: %2$s\n",
bpf_map__name(map),
__btf_kind_str(btf_kind(map_type)));
return libbpf_err(-EINVAL);
@@ -13876,53 +14387,18 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
err = bpf_object__load(*s->obj);
if (err) {
- pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
+ pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err));
return libbpf_err(err);
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
- size_t mmap_sz = bpf_map_mmap_sz(map);
- int prot, map_fd = map->fd;
- void **mmaped = map_skel->mmaped;
-
- if (!mmaped)
- continue;
-
- if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
- *mmaped = NULL;
- continue;
- }
- if (map->def.type == BPF_MAP_TYPE_ARENA) {
- *mmaped = map->mmaped;
+ if (!map_skel->mmaped)
continue;
- }
-
- if (map->def.map_flags & BPF_F_RDONLY_PROG)
- prot = PROT_READ;
- else
- prot = PROT_READ | PROT_WRITE;
- /* Remap anonymous mmap()-ed "map initialization image" as
- * a BPF map-backed mmap()-ed memory, but preserving the same
- * memory address. This will cause kernel to change process'
- * page table to point to a different piece of kernel memory,
- * but from userspace point of view memory address (and its
- * contents, being identical at this point) will stay the
- * same. This mapping will be released by bpf_object__close()
- * as per normal clean up procedure, so we don't need to worry
- * about it from skeleton's clean up perspective.
- */
- *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
- if (*mmaped == MAP_FAILED) {
- err = -errno;
- *mmaped = NULL;
- pr_warn("failed to re-mmap() map '%s': %d\n",
- bpf_map__name(map), err);
- return libbpf_err(err);
- }
+ *map_skel->mmaped = map->mmaped;
}
return 0;
@@ -13950,8 +14426,8 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
if (err) {
- pr_warn("prog '%s': failed to auto-attach: %d\n",
- bpf_program__name(prog), err);
+ pr_warn("prog '%s': failed to auto-attach: %s\n",
+ bpf_program__name(prog), errstr(err));
return libbpf_err(err);
}
@@ -13988,13 +14464,20 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
}
link = map_skel->link;
+ if (!link) {
+ pr_warn("map '%s': BPF map skeleton link is uninitialized\n",
+ bpf_map__name(map));
+ continue;
+ }
+
if (*link)
continue;
*link = bpf_map__attach_struct_ops(map);
if (!*link) {
err = -errno;
- pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
+ pr_warn("map '%s': failed to auto-attach: %s\n",
+ bpf_map__name(map), errstr(err));
return libbpf_err(err);
}
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 64a6a3d323e3..65e68e964b89 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -24,8 +24,25 @@
extern "C" {
#endif
+/**
+ * @brief **libbpf_major_version()** provides the major version of libbpf.
+ * @return An integer, the major version number
+ */
LIBBPF_API __u32 libbpf_major_version(void);
+
+/**
+ * @brief **libbpf_minor_version()** provides the minor version of libbpf.
+ * @return An integer, the minor version number
+ */
LIBBPF_API __u32 libbpf_minor_version(void);
+
+/**
+ * @brief **libbpf_version_string()** provides the version of libbpf in a
+ * human-readable form, e.g., "v1.7".
+ * @return Pointer to a static string containing the version
+ *
+ * The format is *not* a part of a stable API and may change in the future.
+ */
LIBBPF_API const char *libbpf_version_string(void);
enum libbpf_errno {
@@ -49,6 +66,14 @@ enum libbpf_errno {
__LIBBPF_ERRNO__END,
};
+/**
+ * @brief **libbpf_strerror()** converts the provided error code into a
+ * human-readable string.
+ * @param err The error code to convert
+ * @param buf Pointer to a buffer where the error message will be stored
+ * @param size The number of bytes in the buffer
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
/**
@@ -152,7 +177,7 @@ struct bpf_object_open_opts {
* log_buf and log_level settings.
*
* If specified, this log buffer will be passed for:
- * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
+ * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
* with bpf_program__set_log() on per-program level, to get
* BPF verifier log output.
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
@@ -242,6 +267,19 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts);
/**
+ * @brief **bpf_object__prepare()** prepares BPF object for loading:
+ * performs ELF processing, relocations, prepares final state of BPF program
+ * instructions (accessible with bpf_program__insns()), creates and
+ * (potentially) pins maps. Leaves BPF object in the state ready for program
+ * loading.
+ * @param obj Pointer to a valid BPF object instance returned by
+ * **bpf_object__open*()** API
+ * @return 0, on success; negative error code, otherwise, error code is
+ * stored in errno
+ */
+LIBBPF_API int bpf_object__prepare(struct bpf_object *obj);
+
+/**
* @brief **bpf_object__load()** loads BPF object into kernel.
* @param obj Pointer to a valid BPF object instance returned by
* **bpf_object__open*()** APIs
@@ -294,6 +332,14 @@ LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
+/**
+ * @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
+ * with BPF object.
+ * @param obj Pointer to a valid BPF object
+ * @return BPF token FD or -1, if it wasn't set
+ */
+LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
+
struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
@@ -402,7 +448,7 @@ LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
/**
* @brief **bpf_program__unpin()** unpins the BPF program from a file
- * in the BPFFS specified by a path. This decrements the programs
+ * in the BPFFS specified by a path. This decrements program's in-kernel
* reference count.
*
* The file pinning the BPF program can also be unlinked by a different
@@ -435,14 +481,12 @@ LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
/**
* @brief **bpf_link__unpin()** unpins the BPF link from a file
- * in the BPFFS specified by a path. This decrements the links
- * reference count.
+ * in the BPFFS. This decrements link's in-kernel reference count.
*
* The file pinning the BPF link can also be unlinked by a different
* process in which case this function will return an error.
*
- * @param prog BPF program to unpin
- * @param path file path to the pin in a BPF file system
+ * @param link BPF link to unpin
* @return 0, on success; negative error code, otherwise
*/
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
@@ -455,7 +499,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
/**
* @brief **bpf_program__attach()** is a generic function for attaching
* a BPF program based on auto-detection of program type, attach type,
- * and extra paremeters, where applicable.
+ * and extra parameters, where applicable.
*
* @param prog BPF program to attach
* @return Reference to the newly created BPF link; or NULL is returned on error,
@@ -478,9 +522,11 @@ struct bpf_perf_event_opts {
__u64 bpf_cookie;
/* don't use BPF link when attach BPF program */
bool force_ioctl_attach;
+ /* don't automatically enable the event */
+ bool dont_enable;
size_t :0;
};
-#define bpf_perf_event_opts__last_field force_ioctl_attach
+#define bpf_perf_event_opts__last_field dont_enable
LIBBPF_API struct bpf_link *
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
@@ -544,10 +590,12 @@ struct bpf_kprobe_multi_opts {
bool retprobe;
/* create session kprobes */
bool session;
+ /* enforce unique match */
+ bool unique_match;
size_t :0;
};
-#define bpf_kprobe_multi_opts__last_field session
+#define bpf_kprobe_multi_opts__last_field unique_match
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
@@ -569,10 +617,12 @@ struct bpf_uprobe_multi_opts {
size_t cnt;
/* create return uprobes */
bool retprobe;
+ /* create session kprobes */
+ bool session;
size_t :0;
};
-#define bpf_uprobe_multi_opts__last_field retprobe
+#define bpf_uprobe_multi_opts__last_field session
/**
* @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program
@@ -679,7 +729,7 @@ struct bpf_uprobe_opts {
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
* to the userspace function which is found by binary path and
- * offset. You can optionally specify a particular proccess to attach
+ * offset. You can optionally specify a particular process to attach
* to. You can also optionally attach the program to the function
* exit instead of entry.
*
@@ -852,6 +902,21 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
const struct bpf_netkit_opts *opts);
+struct bpf_cgroup_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ __u32 flags;
+ __u32 relative_fd;
+ __u32 relative_id;
+ __u64 expected_revision;
+ size_t :0;
+};
+#define bpf_cgroup_opts__last_field expected_revision
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
+ const struct bpf_cgroup_opts *opts);
+
struct bpf_map;
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
@@ -915,6 +980,12 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
+LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog);
+LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog);
+
+LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog);
+LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog);
+
/**
* @brief **bpf_program__set_attach_target()** sets BTF-based attach target
* for supported BPF program types:
@@ -922,8 +993,13 @@ LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf,
* - fentry/fexit/fmod_ret;
* - lsm;
* - freplace.
- * @param prog BPF program to set the attach type for
- * @param type attach type to set the BPF map to have
+ * @param prog BPF program to configure; must be not yet loaded.
+ * @param attach_prog_fd FD of target BPF program (for freplace/extension).
+ * If >0 and func name omitted, defers BTF ID resolution.
+ * @param attach_func_name Target function name. Used either with
+ * attach_prog_fd to find destination BTF type ID in that BPF program, or
+ * alone (no attach_prog_fd) to resolve kernel (vmlinux/module) BTF ID.
+ * Must be provided if attach_prog_fd is 0.
* @return error code; or 0 if no error occurred.
*/
LIBBPF_API int
@@ -1025,6 +1101,7 @@ LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
/**
* @brief **bpf_map__set_value_size()** sets map value size.
* @param map the BPF map instance
+ * @param size the new value size
* @return 0, on success; negative error, otherwise
*
* There is a special case for maps with associated memory-mapped regions, like
@@ -1129,7 +1206,7 @@ LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__lookup_elem()** is high-level equivalent of
@@ -1153,7 +1230,7 @@ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__update_elem()** is high-level equivalent of
@@ -1169,7 +1246,7 @@ LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
* @param map BPF map to delete element from
* @param key pointer to memory containing bytes of the key
* @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__delete_elem()** is high-level equivalent of
@@ -1192,7 +1269,7 @@ LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__lookup_and_delete_elem()** is high-level equivalent of
@@ -1218,6 +1295,28 @@ LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
*/
LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
const void *cur_key, void *next_key, size_t key_sz);
+/**
+ * @brief **bpf_map__set_exclusive_program()** sets a map to be exclusive to the
+ * specified program. This must be called *before* the map is created.
+ *
+ * @param map BPF map to make exclusive.
+ * @param prog BPF program to be the exclusive user of the map. Must belong
+ * to the same bpf_object as the map.
+ * @return 0 on success; a negative error code otherwise.
+ *
+ * This function must be called after the BPF object is opened but before
+ * it is loaded. Once the object is loaded, only the specified program
+ * will be able to access the map's contents.
+ */
+LIBBPF_API int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog);
+
+/**
+ * @brief **bpf_map__exclusive_program()** returns the exclusive program
+ * that is registered with the map (if any).
+ * @param map BPF map to which the exclusive program is registered.
+ * @return the registered exclusive program.
+ */
+LIBBPF_API struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map);
struct bpf_xdp_set_link_opts {
size_t sz;
@@ -1258,6 +1357,7 @@ enum bpf_tc_attach_point {
BPF_TC_INGRESS = 1 << 0,
BPF_TC_EGRESS = 1 << 1,
BPF_TC_CUSTOM = 1 << 2,
+ BPF_TC_QDISC = 1 << 3,
};
#define BPF_TC_PARENT(a, b) \
@@ -1272,9 +1372,11 @@ struct bpf_tc_hook {
int ifindex;
enum bpf_tc_attach_point attach_point;
__u32 parent;
+ __u32 handle;
+ const char *qdisc;
size_t :0;
};
-#define bpf_tc_hook__last_field parent
+#define bpf_tc_hook__last_field qdisc
struct bpf_tc_opts {
size_t sz;
@@ -1539,6 +1641,7 @@ struct perf_buffer_opts {
* @param sample_cb function called on each received data record
* @param lost_cb function called when record loss has occurred
* @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
+ * @param opts optional parameters for the perf buffer, can be null
* @return a new instance of struct perf_buffer on success, NULL on error with
* *errno* containing an error code
*/
@@ -1593,11 +1696,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
* memory region of the ring buffer.
* This ring buffer can be used to implement a custom events consumer.
* The ring buffer starts with the *struct perf_event_mmap_page*, which
- * holds the ring buffer managment fields, when accessing the header
+ * holds the ring buffer management fields, when accessing the header
* structure it's important to be SMP aware.
* You can refer to *perf_event_read_simple* for a simple example.
* @param pb the perf buffer structure
- * @param buf_idx the buffer index to retreive
+ * @param buf_idx the buffer index to retrieve
* @param buf (out) gets the base pointer of the mmap()'ed memory
* @param buf_size (out) gets the size of the mmap()'ed region
* @return 0 on success, negative error code for failure
@@ -1759,9 +1862,10 @@ struct gen_loader_opts {
const char *insns;
__u32 data_sz;
__u32 insns_sz;
+ bool gen_hash;
};
-#define gen_loader_opts__last_field insns_sz
+#define gen_loader_opts__last_field gen_hash
LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
struct gen_loader_opts *opts);
@@ -1786,9 +1890,14 @@ struct bpf_linker_file_opts {
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
+LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts);
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
const char *filename,
const struct bpf_linker_file_opts *opts);
+LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
+ const struct bpf_linker_file_opts *opts);
+LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
+ const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 8f0d9ea3b1b4..8ed8749907d4 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -421,9 +421,34 @@ LIBBPF_1.5.0 {
global:
btf__distill_base;
btf__relocate;
+ btf_ext__endianness;
+ btf_ext__set_endianness;
bpf_map__autoattach;
bpf_map__set_autoattach;
+ bpf_object__token_fd;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;
} LIBBPF_1.4.0;
+
+LIBBPF_1.6.0 {
+ global:
+ bpf_linker__add_buf;
+ bpf_linker__add_fd;
+ bpf_linker__new_fd;
+ bpf_object__prepare;
+ bpf_prog_stream_read;
+ bpf_program__attach_cgroup_opts;
+ bpf_program__func_info;
+ bpf_program__func_info_cnt;
+ bpf_program__line_info;
+ bpf_program__line_info_cnt;
+ btf__add_decl_attr;
+ btf__add_type_attr;
+} LIBBPF_1.5.0;
+
+LIBBPF_1.7.0 {
+ global:
+ bpf_map__set_exclusive_program;
+ bpf_map__exclusive_program;
+} LIBBPF_1.6.0;
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
deleted file mode 100644
index 6b180172ec6b..000000000000
--- a/tools/lib/bpf/libbpf_errno.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-/*
- * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
- * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015 Huawei Inc.
- * Copyright (C) 2017 Nicira, Inc.
- */
-
-#undef _GNU_SOURCE
-#include <stdio.h>
-#include <string.h>
-
-#include "libbpf.h"
-#include "libbpf_internal.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
-#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
-#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
-
-static const char *libbpf_strerror_table[NR_ERRNO] = {
- [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
- [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
- [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
- [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
- [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
- [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
- [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
- [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
- [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
- [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
- [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
- [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
- [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing",
-};
-
-int libbpf_strerror(int err, char *buf, size_t size)
-{
- int ret;
-
- if (!buf || !size)
- return libbpf_err(-EINVAL);
-
- err = err > 0 ? err : -err;
-
- if (err < __LIBBPF_ERRNO__START) {
- ret = strerror_r(err, buf, size);
- buf[size - 1] = '\0';
- return libbpf_err_errno(ret);
- }
-
- if (err < __LIBBPF_ERRNO__END) {
- const char *msg;
-
- msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- ret = snprintf(buf, size, "%s", msg);
- buf[size - 1] = '\0';
- /* The length of the buf and msg is positive.
- * A negative number may be returned only when the
- * size exceeds INT_MAX. Not likely to appear.
- */
- if (ret >= size)
- return libbpf_err(-ERANGE);
- return 0;
- }
-
- ret = snprintf(buf, size, "Unknown libbpf error %d", err);
- buf[size - 1] = '\0';
- if (ret >= size)
- return libbpf_err(-ERANGE);
- return libbpf_err(-ENOENT);
-}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 408df59e0771..fc59b21b51b5 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -10,6 +10,7 @@
#define __LIBBPF_LIBBPF_INTERNAL_H
#include <stdlib.h>
+#include <byteswap.h>
#include <limits.h>
#include <errno.h>
#include <linux/err.h>
@@ -73,6 +74,8 @@
#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
#endif
+#define JUMPTABLES_SEC ".jumptables"
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
@@ -171,6 +174,16 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+/**
+ * @brief **libbpf_errstr()** returns string corresponding to numeric errno
+ * @param err negative numeric errno
+ * @return pointer to string representation of the errno, that is invalidated
+ * upon the next call.
+ */
+const char *libbpf_errstr(int err);
+
+#define errstr(err) libbpf_errstr(err)
+
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
@@ -408,6 +421,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
int btf_load_into_kernel(struct btf *btf,
char *log_buf, size_t log_sz, __u32 log_level,
int token_fd);
+struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
@@ -448,11 +462,11 @@ struct btf_ext_info {
*
* The func_info subsection layout:
* record size for struct bpf_func_info in the func_info subsection
- * struct btf_sec_func_info for section #1
+ * struct btf_ext_info_sec for section #1
* a list of bpf_func_info records for section #1
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
* but may not be identical
- * struct btf_sec_func_info for section #2
+ * struct btf_ext_info_sec for section #2
* a list of bpf_func_info records for section #2
* ......
*
@@ -484,6 +498,8 @@ struct btf_ext {
struct btf_ext_header *hdr;
void *data;
};
+ void *data_swapped;
+ bool swapped_endian;
struct btf_ext_info func_info;
struct btf_ext_info line_info;
struct btf_ext_info core_relo_info;
@@ -511,6 +527,32 @@ struct bpf_line_info_min {
__u32 line_col;
};
+/* Functions to byte-swap info records */
+
+typedef void (*info_rec_bswap_fn)(void *);
+
+static inline void bpf_func_info_bswap(struct bpf_func_info *i)
+{
+ i->insn_off = bswap_32(i->insn_off);
+ i->type_id = bswap_32(i->type_id);
+}
+
+static inline void bpf_line_info_bswap(struct bpf_line_info *i)
+{
+ i->insn_off = bswap_32(i->insn_off);
+ i->file_name_off = bswap_32(i->file_name_off);
+ i->line_off = bswap_32(i->line_off);
+ i->line_col = bswap_32(i->line_col);
+}
+
+static inline void bpf_core_relo_bswap(struct bpf_core_relo *i)
+{
+ i->insn_off = bswap_32(i->insn_off);
+ i->type_id = bswap_32(i->type_id);
+ i->access_str_off = bswap_32(i->access_str_off);
+ i->kind = bswap_32(i->kind);
+}
+
enum btf_field_iter_kind {
BTF_FIELD_ITER_IDS,
BTF_FIELD_ITER_STRS,
@@ -588,6 +630,16 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
+static inline void bpf_insn_bswap(struct bpf_insn *insn)
+{
+ __u8 tmp_reg = insn->dst_reg;
+
+ insn->dst_reg = insn->src_reg;
+ insn->src_reg = tmp_reg;
+ insn->off = bswap_16(insn->off);
+ insn->imm = bswap_32(insn->imm);
+}
+
/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
* Original FD is not closed or altered in any other way.
* Preserves original FD value, if it's invalid (negative).
@@ -627,6 +679,15 @@ static inline int sys_dup3(int oldfd, int newfd, int flags)
return syscall(__NR_dup3, oldfd, newfd, flags);
}
+/* Some versions of Android don't provide memfd_create() in their libc
+ * implementation, so avoid complications and just go straight to Linux
+ * syscall.
+ */
+static inline int sys_memfd_create(const char *name, unsigned flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
* Regardless of success, *tmp_fd* is closed.
* Whatever *fixed_fd* pointed to is closed silently.
@@ -663,6 +724,11 @@ static inline bool is_pow_of_2(size_t x)
return x && (x & (x - 1)) == 0;
}
+static inline __u32 ror32(__u32 v, int bits)
+{
+ return (v >> bits) | (v << (32 - bits));
+}
+
#define PROG_LOAD_ATTEMPTS 5
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
@@ -687,4 +753,8 @@ int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
int probe_fd(int fd);
+#define SHA256_DIGEST_LENGTH 32
+#define SHA256_DWORD_SIZE SHA256_DIGEST_LENGTH / sizeof(__u64)
+
+void libbpf_sha256(const void *data, size_t len, __u8 out[SHA256_DIGEST_LENGTH]);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index 1e1be467bede..60b2600be88a 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
* first BPF program or map creation operation. This is done only if
* kernel is too old to support memcg-based memory accounting for BPF
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
- * but it can be overriden with libbpf_set_memlock_rlim() API.
+ * but it can be overridden with libbpf_set_memlock_rlim() API.
* Note that libbpf_set_memlock_rlim() needs to be called before
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
* operation.
@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
* @brief **libbpf_get_error()** extracts the error code from the passed
* pointer
* @param ptr pointer returned from libbpf API function
- * @return error code; or 0 if no error occured
+ * @return error code; or 0 if no error occurred
*
* Note, as of libbpf 1.0 this function is not necessary and not recommended
* to be used. Libbpf doesn't return error code embedded into the pointer
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 9dfbe7750f56..bccf4bb747e1 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -364,6 +364,10 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
break;
+ case BPF_MAP_TYPE_INSN_ARRAY:
+ key_size = sizeof(__u32);
+ value_size = sizeof(struct bpf_insn_array_value);
+ break;
case BPF_MAP_TYPE_UNSPEC:
default:
return -EOPNOTSUPP;
diff --git a/tools/lib/bpf/libbpf_utils.c b/tools/lib/bpf/libbpf_utils.c
new file mode 100644
index 000000000000..ac3beae54cf6
--- /dev/null
+++ b/tools/lib/bpf/libbpf_utils.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
+ */
+
+#undef _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
+#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
+#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
+
+static const char *libbpf_strerror_table[NR_ERRNO] = {
+ [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
+ [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
+ [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
+ [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
+ [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
+ [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
+ [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
+ [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
+ [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
+ [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
+ [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
+ [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
+ [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing",
+};
+
+int libbpf_strerror(int err, char *buf, size_t size)
+{
+ int ret;
+
+ if (!buf || !size)
+ return libbpf_err(-EINVAL);
+
+ err = err > 0 ? err : -err;
+
+ if (err < __LIBBPF_ERRNO__START) {
+ ret = strerror_r(err, buf, size);
+ buf[size - 1] = '\0';
+ return libbpf_err_errno(ret);
+ }
+
+ if (err < __LIBBPF_ERRNO__END) {
+ const char *msg;
+
+ msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
+ ret = snprintf(buf, size, "%s", msg);
+ buf[size - 1] = '\0';
+ /* The length of the buf and msg is positive.
+ * A negative number may be returned only when the
+ * size exceeds INT_MAX. Not likely to appear.
+ */
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
+ return 0;
+ }
+
+ ret = snprintf(buf, size, "Unknown libbpf error %d", err);
+ buf[size - 1] = '\0';
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
+ return libbpf_err(-ENOENT);
+}
+
+const char *libbpf_errstr(int err)
+{
+ static __thread char buf[12];
+
+ if (err > 0)
+ err = -err;
+
+ switch (err) {
+ case -E2BIG: return "-E2BIG";
+ case -EACCES: return "-EACCES";
+ case -EADDRINUSE: return "-EADDRINUSE";
+ case -EADDRNOTAVAIL: return "-EADDRNOTAVAIL";
+ case -EAGAIN: return "-EAGAIN";
+ case -EALREADY: return "-EALREADY";
+ case -EBADF: return "-EBADF";
+ case -EBADFD: return "-EBADFD";
+ case -EBUSY: return "-EBUSY";
+ case -ECANCELED: return "-ECANCELED";
+ case -ECHILD: return "-ECHILD";
+ case -EDEADLK: return "-EDEADLK";
+ case -EDOM: return "-EDOM";
+ case -EEXIST: return "-EEXIST";
+ case -EFAULT: return "-EFAULT";
+ case -EFBIG: return "-EFBIG";
+ case -EILSEQ: return "-EILSEQ";
+ case -EINPROGRESS: return "-EINPROGRESS";
+ case -EINTR: return "-EINTR";
+ case -EINVAL: return "-EINVAL";
+ case -EIO: return "-EIO";
+ case -EISDIR: return "-EISDIR";
+ case -ELOOP: return "-ELOOP";
+ case -EMFILE: return "-EMFILE";
+ case -EMLINK: return "-EMLINK";
+ case -EMSGSIZE: return "-EMSGSIZE";
+ case -ENAMETOOLONG: return "-ENAMETOOLONG";
+ case -ENFILE: return "-ENFILE";
+ case -ENODATA: return "-ENODATA";
+ case -ENODEV: return "-ENODEV";
+ case -ENOENT: return "-ENOENT";
+ case -ENOEXEC: return "-ENOEXEC";
+ case -ENOLINK: return "-ENOLINK";
+ case -ENOMEM: return "-ENOMEM";
+ case -ENOSPC: return "-ENOSPC";
+ case -ENOTBLK: return "-ENOTBLK";
+ case -ENOTDIR: return "-ENOTDIR";
+ case -ENOTSUPP: return "-ENOTSUPP";
+ case -ENOTTY: return "-ENOTTY";
+ case -ENXIO: return "-ENXIO";
+ case -EOPNOTSUPP: return "-EOPNOTSUPP";
+ case -EOVERFLOW: return "-EOVERFLOW";
+ case -EPERM: return "-EPERM";
+ case -EPIPE: return "-EPIPE";
+ case -EPROTO: return "-EPROTO";
+ case -EPROTONOSUPPORT: return "-EPROTONOSUPPORT";
+ case -ERANGE: return "-ERANGE";
+ case -EROFS: return "-EROFS";
+ case -ESPIPE: return "-ESPIPE";
+ case -ESRCH: return "-ESRCH";
+ case -ETXTBSY: return "-ETXTBSY";
+ case -EUCLEAN: return "-EUCLEAN";
+ case -EXDEV: return "-EXDEV";
+ default:
+ snprintf(buf, sizeof(buf), "%d", err);
+ return buf;
+ }
+}
+
+static inline __u32 get_unaligned_be32(const void *p)
+{
+ __be32 val;
+
+ memcpy(&val, p, sizeof(val));
+ return be32_to_cpu(val);
+}
+
+static inline void put_unaligned_be32(__u32 val, void *p)
+{
+ __be32 be_val = cpu_to_be32(val);
+
+ memcpy(p, &be_val, sizeof(be_val));
+}
+
+#define SHA256_BLOCK_LENGTH 64
+#define Ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+#define Sigma_0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22))
+#define Sigma_1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25))
+#define sigma_0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3))
+#define sigma_1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10))
+
+static const __u32 sha256_K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
+ 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
+ 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
+ 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
+ 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
+ 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) \
+ { \
+ __u32 tmp = h + Sigma_1(e) + Ch(e, f, g) + sha256_K[i] + w[i]; \
+ d += tmp; \
+ h = tmp + Sigma_0(a) + Maj(a, b, c); \
+ }
+
+static void sha256_blocks(__u32 state[8], const __u8 *data, size_t nblocks)
+{
+ while (nblocks--) {
+ __u32 a = state[0];
+ __u32 b = state[1];
+ __u32 c = state[2];
+ __u32 d = state[3];
+ __u32 e = state[4];
+ __u32 f = state[5];
+ __u32 g = state[6];
+ __u32 h = state[7];
+ __u32 w[64];
+ int i;
+
+ for (i = 0; i < 16; i++)
+ w[i] = get_unaligned_be32(&data[4 * i]);
+ for (; i < ARRAY_SIZE(w); i++)
+ w[i] = sigma_1(w[i - 2]) + w[i - 7] +
+ sigma_0(w[i - 15]) + w[i - 16];
+ for (i = 0; i < ARRAY_SIZE(w); i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+ data += SHA256_BLOCK_LENGTH;
+ }
+}
+
+void libbpf_sha256(const void *data, size_t len, __u8 out[SHA256_DIGEST_LENGTH])
+{
+ __u32 state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+ 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
+ const __be64 bitcount = cpu_to_be64((__u64)len * 8);
+ __u8 final_data[2 * SHA256_BLOCK_LENGTH] = { 0 };
+ size_t final_len = len % SHA256_BLOCK_LENGTH;
+ int i;
+
+ sha256_blocks(state, data, len / SHA256_BLOCK_LENGTH);
+
+ memcpy(final_data, data + len - final_len, final_len);
+ final_data[final_len] = 0x80;
+ final_len = roundup(final_len + 9, SHA256_BLOCK_LENGTH);
+ memcpy(&final_data[final_len - 8], &bitcount, 8);
+
+ sha256_blocks(state, final_data, final_len / SHA256_BLOCK_LENGTH);
+
+ for (i = 0; i < ARRAY_SIZE(state); i++)
+ put_unaligned_be32(state[i], &out[4 * i]);
+}
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index d6e5eff967cb..99331e317dee 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 5
+#define LIBBPF_MINOR_VERSION 7
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 9cd3d4109788..f4403e3cf994 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -4,6 +4,10 @@
*
* Copyright (c) 2021 Facebook
*/
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
@@ -16,6 +20,7 @@
#include <elf.h>
#include <libelf.h>
#include <fcntl.h>
+#include <sys/mman.h>
#include "libbpf.h"
#include "btf.h"
#include "libbpf_internal.h"
@@ -135,6 +140,7 @@ struct bpf_linker {
int fd;
Elf *elf;
Elf64_Ehdr *elf_hdr;
+ bool swapped_endian;
/* Output sections metadata */
struct dst_sec *secs;
@@ -150,15 +156,19 @@ struct bpf_linker {
/* global (including extern) ELF symbols */
int glob_sym_cnt;
struct glob_sym *glob_syms;
+
+ bool fd_is_owned;
};
#define pr_warn_elf(fmt, ...) \
libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
-static int init_output_elf(struct bpf_linker *linker, const char *file);
+static int init_output_elf(struct bpf_linker *linker);
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts,
+static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
+ const char *filename);
+
+static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
@@ -189,7 +199,7 @@ void bpf_linker__free(struct bpf_linker *linker)
if (linker->elf)
elf_end(linker->elf);
- if (linker->fd >= 0)
+ if (linker->fd >= 0 && linker->fd_is_owned)
close(linker->fd);
strset__free(linker->strtab_strs);
@@ -231,9 +241,63 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
if (!linker)
return errno = ENOMEM, NULL;
- linker->fd = -1;
+ linker->filename = strdup(filename);
+ if (!linker->filename) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ linker->fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
+ if (linker->fd < 0) {
+ err = -errno;
+ pr_warn("failed to create '%s': %d\n", filename, err);
+ goto err_out;
+ }
+ linker->fd_is_owned = true;
+
+ err = init_output_elf(linker);
+ if (err)
+ goto err_out;
+
+ return linker;
+
+err_out:
+ bpf_linker__free(linker);
+ return errno = -err, NULL;
+}
+
+struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts)
+{
+ struct bpf_linker *linker;
+ char filename[32];
+ int err;
+
+ if (fd < 0)
+ return errno = EINVAL, NULL;
+
+ if (!OPTS_VALID(opts, bpf_linker_opts))
+ return errno = EINVAL, NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn_elf("libelf initialization failed");
+ return errno = EINVAL, NULL;
+ }
+
+ linker = calloc(1, sizeof(*linker));
+ if (!linker)
+ return errno = ENOMEM, NULL;
+
+ snprintf(filename, sizeof(filename), "fd:%d", fd);
+ linker->filename = strdup(filename);
+ if (!linker->filename) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ linker->fd = fd;
+ linker->fd_is_owned = false;
- err = init_output_elf(linker, filename);
+ err = init_output_elf(linker);
if (err)
goto err_out;
@@ -292,23 +356,12 @@ static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
return sym;
}
-static int init_output_elf(struct bpf_linker *linker, const char *file)
+static int init_output_elf(struct bpf_linker *linker)
{
int err, str_off;
Elf64_Sym *init_sym;
struct dst_sec *sec;
- linker->filename = strdup(file);
- if (!linker->filename)
- return -ENOMEM;
-
- linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
- if (linker->fd < 0) {
- err = -errno;
- pr_warn("failed to create '%s': %d\n", file, err);
- return err;
- }
-
linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
if (!linker->elf) {
pr_warn_elf("failed to create ELF object");
@@ -324,13 +377,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
linker->elf_hdr->e_machine = EM_BPF;
linker->elf_hdr->e_type = ET_REL;
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
-#else
-#error "Unknown __BYTE_ORDER__"
-#endif
+ /* Set unknown ELF endianness, assign later from input files */
+ linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE;
/* STRTAB */
/* initialize strset with an empty string to conform to ELF */
@@ -396,6 +444,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
pr_warn_elf("failed to create SYMTAB data");
return -EINVAL;
}
+ /* Ensure libelf translates byte-order of symbol records */
+ sec->data->d_type = ELF_T_SYM;
str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
if (str_off < 0)
@@ -437,19 +487,16 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
-int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts)
+static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
+ const char *filename)
{
struct src_obj obj = {};
int err = 0;
- if (!OPTS_VALID(opts, bpf_linker_file_opts))
- return libbpf_err(-EINVAL);
+ obj.filename = filename;
+ obj.fd = fd;
- if (!linker->elf)
- return libbpf_err(-EINVAL);
-
- err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
+ err = err ?: linker_load_obj_file(linker, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@@ -464,12 +511,91 @@ int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
free(obj.sym_map);
if (obj.elf)
elf_end(obj.elf);
- if (obj.fd >= 0)
- close(obj.fd);
+ return err;
+}
+
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts)
+{
+ int fd, err;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
+ return libbpf_err(err);
+ }
+
+ err = bpf_linker_add_file(linker, fd, filename);
+ close(fd);
+ return libbpf_err(err);
+}
+
+int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
+ const struct bpf_linker_file_opts *opts)
+{
+ char filename[32];
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ if (fd < 0)
+ return libbpf_err(-EINVAL);
+
+ snprintf(filename, sizeof(filename), "fd:%d", fd);
+ err = bpf_linker_add_file(linker, fd, filename);
return libbpf_err(err);
}
+int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
+ const struct bpf_linker_file_opts *opts)
+{
+ char filename[32];
+ int fd, written, ret;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz);
+
+ fd = sys_memfd_create(filename, 0);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret));
+ return libbpf_err(ret);
+ }
+
+ written = 0;
+ while (written < buf_sz) {
+ ret = write(fd, buf, buf_sz);
+ if (ret < 0) {
+ ret = -errno;
+ pr_warn("failed to write '%s': %s\n", filename, errstr(ret));
+ goto err_out;
+ }
+ written += ret;
+ }
+
+ ret = bpf_linker_add_file(linker, fd, filename);
+err_out:
+ close(fd);
+ return libbpf_err(ret);
+}
+
static bool is_dwarf_sec_name(const char *name)
{
/* approximation, but the actual list is too long */
@@ -535,65 +661,69 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts,
+static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj)
{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- const int host_endianness = ELFDATA2LSB;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- const int host_endianness = ELFDATA2MSB;
-#else
-#error "Unknown __BYTE_ORDER__"
-#endif
int err = 0;
Elf_Scn *scn;
Elf_Data *data;
Elf64_Ehdr *ehdr;
Elf64_Shdr *shdr;
struct src_sec *sec;
+ unsigned char obj_byteorder;
+ unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA];
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const unsigned char host_byteorder = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ const unsigned char host_byteorder = ELFDATA2MSB;
+#else
+#error "Unknown __BYTE_ORDER__"
+#endif
- pr_debug("linker: adding object file '%s'...\n", filename);
-
- obj->filename = filename;
+ pr_debug("linker: adding object file '%s'...\n", obj->filename);
- obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
- if (obj->fd < 0) {
- err = -errno;
- pr_warn("failed to open file '%s': %d\n", filename, err);
- return err;
- }
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
if (!obj->elf) {
- err = -errno;
- pr_warn_elf("failed to parse ELF file '%s'", filename);
- return err;
+ pr_warn_elf("failed to parse ELF file '%s'", obj->filename);
+ return -EINVAL;
}
/* Sanity check ELF file high-level properties */
ehdr = elf64_getehdr(obj->elf);
if (!ehdr) {
- err = -errno;
- pr_warn_elf("failed to get ELF header for %s", filename);
+ pr_warn_elf("failed to get ELF header for %s", obj->filename);
+ return -EINVAL;
+ }
+
+ /* Linker output endianness set by first input object */
+ obj_byteorder = ehdr->e_ident[EI_DATA];
+ if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
+ err = -EOPNOTSUPP;
+ pr_warn("unknown byte order of ELF file %s\n", obj->filename);
return err;
}
- if (ehdr->e_ident[EI_DATA] != host_endianness) {
+ if (link_byteorder == ELFDATANONE) {
+ linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder;
+ linker->swapped_endian = obj_byteorder != host_byteorder;
+ pr_debug("linker: set %s-endian output byte order\n",
+ obj_byteorder == ELFDATA2MSB ? "big" : "little");
+ } else if (link_byteorder != obj_byteorder) {
err = -EOPNOTSUPP;
- pr_warn_elf("unsupported byte order of ELF file %s", filename);
+ pr_warn("byte order mismatch with ELF file %s\n", obj->filename);
return err;
}
+
if (ehdr->e_type != ET_REL
|| ehdr->e_machine != EM_BPF
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
err = -EOPNOTSUPP;
- pr_warn_elf("unsupported kind of ELF file %s", filename);
+ pr_warn_elf("unsupported kind of ELF file %s", obj->filename);
return err;
}
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
- err = -errno;
- pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
- return err;
+ pr_warn_elf("failed to get SHSTRTAB section index for %s", obj->filename);
+ return -EINVAL;
}
scn = NULL;
@@ -603,26 +733,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
shdr = elf64_getshdr(scn);
if (!shdr) {
- err = -errno;
pr_warn_elf("failed to get section #%zu header for %s",
- sec_idx, filename);
- return err;
+ sec_idx, obj->filename);
+ return -EINVAL;
}
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
if (!sec_name) {
- err = -errno;
pr_warn_elf("failed to get section #%zu name for %s",
- sec_idx, filename);
- return err;
+ sec_idx, obj->filename);
+ return -EINVAL;
}
data = elf_getdata(scn, 0);
if (!data) {
- err = -errno;
pr_warn_elf("failed to get section #%zu (%s) data from %s",
- sec_idx, sec_name, filename);
- return err;
+ sec_idx, sec_name, obj->filename);
+ return -EINVAL;
}
sec = add_src_sec(obj, sec_name);
@@ -656,7 +783,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj->btf = btf__new(data->d_buf, shdr->sh_size);
err = libbpf_get_error(obj->btf);
if (err) {
- pr_warn("failed to parse .BTF from %s: %d\n", filename, err);
+ pr_warn("failed to parse .BTF from %s: %s\n",
+ obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@@ -666,7 +794,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size);
err = libbpf_get_error(obj->btf_ext);
if (err) {
- pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err);
+ pr_warn("failed to parse .BTF.ext from '%s': %s\n",
+ obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@@ -683,7 +812,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
break;
default:
pr_warn("unrecognized section #%zu (%s) in %s\n",
- sec_idx, sec_name, filename);
+ sec_idx, sec_name, obj->filename);
err = -EINVAL;
return err;
}
@@ -1109,6 +1238,24 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec
return true;
}
+static bool is_exec_sec(struct dst_sec *sec)
+{
+ if (!sec || sec->ephemeral)
+ return false;
+ return (sec->shdr->sh_type == SHT_PROGBITS) &&
+ (sec->shdr->sh_flags & SHF_EXECINSTR);
+}
+
+static void exec_sec_bswap(void *raw_data, int size)
+{
+ const int insn_cnt = size / sizeof(struct bpf_insn);
+ struct bpf_insn *insn = raw_data;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++, insn++)
+ bpf_insn_bswap(insn);
+}
+
static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
{
void *tmp;
@@ -1168,6 +1315,10 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src
memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
/* now copy src data at a properly aligned offset */
memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
+
+ /* convert added bpf insns to native byte-order */
+ if (linker->swapped_endian && is_exec_sec(dst))
+ exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size);
}
dst->sec_sz = dst_final_sz;
@@ -1224,7 +1375,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj
} else {
if (!secs_match(dst_sec, src_sec)) {
pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
- return -1;
+ return -EINVAL;
}
/* "license" and "version" sections are deduped */
@@ -1413,7 +1564,7 @@ recur:
return true;
case BTF_KIND_PTR:
/* just validate overall shape of the referenced type, so no
- * contents comparison for struct/union, and allowd fwd vs
+ * contents comparison for struct/union, and allowed fwd vs
* struct/union
*/
exact = false;
@@ -1874,6 +2025,9 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx;
return 0;
}
+
+ if (strcmp(src_sec->sec_name, JUMPTABLES_SEC) == 0)
+ goto add_sym;
}
if (sym_bind == STB_LOCAL)
@@ -1962,7 +2116,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
/* If existing symbol is a strong resolved symbol, bail out,
* because we lost resolution battle have nothing to
- * contribute. We already checked abover that there is no
+ * contribute. We already checked above that there is no
* strong-strong conflict. We also already tightened binding
* and visibility, so nothing else to contribute at that point.
*/
@@ -2011,7 +2165,7 @@ add_sym:
obj->sym_map[src_sym_idx] = dst_sym_idx;
- if (sym_type == STT_SECTION && dst_sym) {
+ if (sym_type == STT_SECTION && dst_sec) {
dst_sec->sec_sym_idx = dst_sym_idx;
dst_sym->st_value = 0;
}
@@ -2071,7 +2225,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
}
} else if (!secs_match(dst_sec, src_sec)) {
pr_warn("sections %s are not compatible\n", src_sec->sec_name);
- return -1;
+ return -EINVAL;
}
/* shdr->sh_link points to SYMTAB */
@@ -2415,6 +2569,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
if (glob_sym && glob_sym->var_idx >= 0) {
__s64 sz;
+ /* FUNCs don't have size, nothing to update */
+ if (btf_is_func(t))
+ continue;
+
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
/* Because underlying BTF type might have
* changed, so might its size have changed, so
@@ -2628,27 +2786,32 @@ int bpf_linker__finalize(struct bpf_linker *linker)
if (!sec->scn)
continue;
+ /* restore sections with bpf insns to target byte-order */
+ if (linker->swapped_endian && is_exec_sec(sec))
+ exec_sec_bswap(sec->raw_data, sec->sec_sz);
+
sec->data->d_buf = sec->raw_data;
}
/* Finalize ELF layout */
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to finalize ELF layout");
return libbpf_err(err);
}
/* Write out final ELF contents */
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to write ELF contents");
return libbpf_err(err);
}
elf_end(linker->elf);
- close(linker->fd);
-
linker->elf = NULL;
+
+ if (linker->fd_is_owned)
+ close(linker->fd);
linker->fd = -1;
return 0;
@@ -2696,6 +2859,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
static int finalize_btf(struct bpf_linker *linker)
{
+ enum btf_endianness link_endianness;
LIBBPF_OPTS(btf_dedup_opts, opts);
struct btf *btf = linker->btf;
const void *raw_data;
@@ -2729,17 +2893,24 @@ static int finalize_btf(struct bpf_linker *linker)
err = finalize_btf_ext(linker);
if (err) {
- pr_warn(".BTF.ext generation failed: %d\n", err);
+ pr_warn(".BTF.ext generation failed: %s\n", errstr(err));
return err;
}
opts.btf_ext = linker->btf_ext;
err = btf__dedup(linker->btf, &opts);
if (err) {
- pr_warn("BTF dedup failed: %d\n", err);
+ pr_warn("BTF dedup failed: %s\n", errstr(err));
return err;
}
+ /* Set .BTF and .BTF.ext output byte order */
+ link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ?
+ BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
+ btf__set_endianness(linker->btf, link_endianness);
+ if (linker->btf_ext)
+ btf_ext__set_endianness(linker->btf_ext, link_endianness);
+
/* Emit .BTF section */
raw_data = btf__raw_data(linker->btf, &raw_sz);
if (!raw_data)
@@ -2747,7 +2918,7 @@ static int finalize_btf(struct bpf_linker *linker)
err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz);
if (err) {
- pr_warn("failed to write out .BTF ELF section: %d\n", err);
+ pr_warn("failed to write out .BTF ELF section: %s\n", errstr(err));
return err;
}
@@ -2759,7 +2930,7 @@ static int finalize_btf(struct bpf_linker *linker)
err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz);
if (err) {
- pr_warn("failed to write out .BTF.ext ELF section: %d\n", err);
+ pr_warn("failed to write out .BTF.ext ELF section: %s\n", errstr(err));
return err;
}
}
@@ -2935,7 +3106,7 @@ static int finalize_btf_ext(struct bpf_linker *linker)
err = libbpf_get_error(linker->btf_ext);
if (err) {
linker->btf_ext = NULL;
- pr_warn("failed to parse final .BTF.ext data: %d\n", err);
+ pr_warn("failed to parse final .BTF.ext data: %s\n", errstr(err));
goto out;
}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 68a2def17175..c997e69d507f 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -529,9 +529,9 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
}
-typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
+typedef int (*qdisc_config_t)(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook);
-static int clsact_config(struct libbpf_nla_req *req)
+static int clsact_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook)
{
req->tc.tcm_parent = TC_H_CLSACT;
req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
@@ -539,6 +539,16 @@ static int clsact_config(struct libbpf_nla_req *req)
return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact"));
}
+static int qdisc_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook)
+{
+ const char *qdisc = OPTS_GET(hook, qdisc, NULL);
+
+ req->tc.tcm_parent = OPTS_GET(hook, parent, TC_H_ROOT);
+ req->tc.tcm_handle = OPTS_GET(hook, handle, 0);
+
+ return nlattr_add(req, TCA_KIND, qdisc, strlen(qdisc) + 1);
+}
+
static int attach_point_to_config(struct bpf_tc_hook *hook,
qdisc_config_t *config)
{
@@ -552,6 +562,9 @@ static int attach_point_to_config(struct bpf_tc_hook *hook,
return 0;
case BPF_TC_CUSTOM:
return -EOPNOTSUPP;
+ case BPF_TC_QDISC:
+ *config = &qdisc_config;
+ return 0;
default:
return -EINVAL;
}
@@ -596,7 +609,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
req.tc.tcm_family = AF_UNSPEC;
req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
- ret = config(&req);
+ ret = config(&req, hook);
if (ret < 0)
return ret;
@@ -639,6 +652,7 @@ int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
case BPF_TC_INGRESS:
case BPF_TC_EGRESS:
return libbpf_err(__bpf_tc_detach(hook, NULL, true));
+ case BPF_TC_QDISC:
case BPF_TC_INGRESS | BPF_TC_EGRESS:
return libbpf_err(tc_qdisc_delete(hook));
case BPF_TC_CUSTOM:
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 975e265eab3b..06663f9ea581 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype,
minlen = nla_attr_minlen[pt->type];
if (libbpf_nla_len(nla) < minlen)
- return -1;
+ return -EINVAL;
if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
- return -1;
+ return -EINVAL;
if (pt->type == LIBBPF_NLA_STRING) {
char *data = libbpf_nla_data(nla);
if (data[libbpf_nla_len(nla) - 1] != '\0')
- return -1;
+ return -EINVAL;
}
return 0;
@@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
- goto errout;
+ return err;
}
- if (tb[type])
+ if (tb[type]) {
pr_warn("Attribute of type %#x found multiple times in message, "
"previous attribute is being ignored.\n", type);
+ }
tb[type] = nla;
}
- err = 0;
-errout:
- return err;
+ return 0;
}
/**
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index 63a4d5ad12d1..6eea5edba58a 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -64,7 +64,6 @@ enum libbpf_print_level {
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
-#include "str_error.h"
#include "libbpf_internal.h"
#endif
@@ -683,7 +682,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
{
const struct bpf_core_accessor *acc;
const struct btf_type *t;
- __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
+ __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
const struct btf_member *m;
const struct btf_type *mt;
bool bitfield;
@@ -706,8 +705,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
if (!acc->name) {
if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
*val = spec->bit_offset / 8;
- /* remember field size for load/store mem size */
- sz = btf__resolve_size(spec->btf, acc->type_id);
+ /* remember field size for load/store mem size;
+ * note, for arrays we care about individual element
+ * sizes, not the overall array size
+ */
+ t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
+ while (btf_is_array(t))
+ t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
+ sz = btf__resolve_size(spec->btf, elem_id);
if (sz < 0)
return -EINVAL;
*field_sz = sz;
@@ -767,7 +772,17 @@ static int bpf_core_calc_field_relo(const char *prog_name,
case BPF_CORE_FIELD_BYTE_OFFSET:
*val = byte_off;
if (!bitfield) {
- *field_sz = byte_sz;
+ /* remember field size for load/store mem size;
+ * note, for arrays we care about individual element
+ * sizes, not the overall array size
+ */
+ t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
+ while (btf_is_array(t))
+ t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
+ sz = btf__resolve_size(spec->btf, elem_id);
+ if (sz < 0)
+ return -EINVAL;
+ *field_sz = sz;
*type_id = field_type_id;
}
break;
@@ -1339,7 +1354,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
cands->cands[i].id, cand_spec);
if (err < 0) {
bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
- pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n",
prog_name, relo_idx, i, spec_buf, err);
return err;
}
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index bfd8dac4c0cc..00ec4837a06d 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -88,8 +88,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
- pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
- map_fd, err);
+ pr_warn("ringbuf: failed to get map info for fd=%d: %s\n",
+ map_fd, errstr(err));
return libbpf_err(err);
}
@@ -123,8 +123,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
+ map_fd, errstr(err));
goto err_out;
}
r->consumer_pos = tmp;
@@ -142,8 +142,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %s\n",
+ map_fd, errstr(err));
goto err_out;
}
r->producer_pos = tmp;
@@ -156,8 +156,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
e->data.fd = rb->ring_cnt;
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
err = -errno;
- pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("ringbuf: failed to epoll add map fd=%d: %s\n",
+ map_fd, errstr(err));
goto err_out;
}
@@ -205,7 +205,7 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (rb->epoll_fd < 0) {
err = -errno;
- pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
+ pr_warn("ringbuf: failed to create epoll instance: %s\n", errstr(err));
goto err_out;
}
@@ -458,7 +458,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
- pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
+ pr_warn("user ringbuf: failed to get map info for fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
@@ -474,8 +475,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
rb->consumer_pos = tmp;
@@ -494,8 +495,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
@@ -506,7 +507,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
rb_epoll->events = EPOLLOUT;
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
err = -errno;
- pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
+ pr_warn("user ringbuf: failed to epoll add map fd=%d: %s\n", map_fd, errstr(err));
return err;
}
@@ -531,7 +532,7 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (rb->epoll_fd < 0) {
err = -errno;
- pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
+ pr_warn("user ringbuf: failed to create epoll instance: %s\n", errstr(err));
goto err_out;
}
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index 1e82ab06c3eb..6a8f5c7a02eb 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -13,10 +13,15 @@
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
+#include <linux/keyctl.h>
#include <stdlib.h>
#include "bpf.h"
#endif
+#ifndef SHA256_DIGEST_LENGTH
+#define SHA256_DIGEST_LENGTH 32
+#endif
+
#ifndef __NR_bpf
# if defined(__mips__) && defined(_ABIO32)
# define __NR_bpf 4355
@@ -64,6 +69,11 @@ struct bpf_load_and_run_opts {
__u32 data_sz;
__u32 insns_sz;
const char *errstr;
+ void *signature;
+ __u32 signature_sz;
+ __s32 keyring_id;
+ void *excl_prog_hash;
+ __u32 excl_prog_hash_sz;
};
long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
@@ -107,7 +117,7 @@ static inline void skel_free(const void *p)
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
- * is not nessary.
+ * is not necessary.
*
* For user space:
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
@@ -220,14 +230,19 @@ static inline int skel_map_create(enum bpf_map_type map_type,
const char *map_name,
__u32 key_size,
__u32 value_size,
- __u32 max_entries)
+ __u32 max_entries,
+ const void *excl_prog_hash,
+ __u32 excl_prog_hash_sz)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
union bpf_attr attr;
memset(&attr, 0, attr_sz);
attr.map_type = map_type;
+ attr.excl_prog_hash = (unsigned long) excl_prog_hash;
+ attr.excl_prog_hash_size = excl_prog_hash_sz;
+
strncpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -300,6 +315,35 @@ static inline int skel_link_create(int prog_fd, int target_fd,
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
}
+static inline int skel_obj_get_info_by_fd(int fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, info);
+ __u8 sha[SHA256_DIGEST_LENGTH];
+ struct bpf_map_info info;
+ __u32 info_len = sizeof(info);
+ union bpf_attr attr;
+
+ memset(&info, 0, sizeof(info));
+ info.hash = (long) &sha;
+ info.hash_size = SHA256_DIGEST_LENGTH;
+
+ memset(&attr, 0, attr_sz);
+ attr.info.bpf_fd = fd;
+ attr.info.info = (long) &info;
+ attr.info.info_len = info_len;
+ return skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
+}
+
+static inline int skel_map_freeze(int fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+
+ return skel_sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
+}
#ifdef __KERNEL__
#define set_err
#else
@@ -308,12 +352,13 @@ static inline int skel_link_create(int prog_fd, int target_fd,
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
{
- const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
+ const size_t prog_load_attr_sz = offsetofend(union bpf_attr, keyring_id);
const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr;
- err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
+ err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1,
+ opts->excl_prog_hash, opts->excl_prog_hash_sz);
if (map_fd < 0) {
opts->errstr = "failed to create loader map";
set_err;
@@ -327,11 +372,34 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
goto out;
}
+#ifndef __KERNEL__
+ err = skel_map_freeze(map_fd);
+ if (err < 0) {
+ opts->errstr = "failed to freeze map";
+ set_err;
+ goto out;
+ }
+ err = skel_obj_get_info_by_fd(map_fd);
+ if (err < 0) {
+ opts->errstr = "failed to fetch obj info";
+ set_err;
+ goto out;
+ }
+#endif
+
memset(&attr, 0, prog_load_attr_sz);
attr.prog_type = BPF_PROG_TYPE_SYSCALL;
attr.insns = (long) opts->insns;
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
attr.license = (long) "Dual BSD/GPL";
+#ifndef __KERNEL__
+ attr.signature = (long) opts->signature;
+ attr.signature_size = opts->signature_sz;
+#else
+ if (opts->signature || opts->signature_sz)
+ pr_warn("signatures are not supported from bpf_preload\n");
+#endif
+ attr.keyring_id = opts->keyring_id;
memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
attr.fd_array = (long) &map_fd;
attr.log_level = opts->ctx->log_level;
@@ -351,10 +419,11 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
attr.test.ctx_size_in = opts->ctx->sz;
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
if (err < 0 || (int)attr.test.retval < 0) {
- opts->errstr = "failed to execute loader prog";
if (err < 0) {
+ opts->errstr = "failed to execute loader prog";
set_err;
} else {
+ opts->errstr = "error returned by loader prog";
err = (int)attr.test.retval;
#ifndef __KERNEL__
errno = -err;
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
deleted file mode 100644
index 5e6a1e27ddf9..000000000000
--- a/tools/lib/bpf/str_error.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-#undef _GNU_SOURCE
-#include <string.h>
-#include <stdio.h>
-#include <errno.h>
-#include "str_error.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-/*
- * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
- * libc, while checking strerror_r() return to avoid having to check this in
- * all places calling it.
- */
-char *libbpf_strerror_r(int err, char *dst, int len)
-{
- int ret = strerror_r(err < 0 ? -err : err, dst, len);
- /* on glibc <2.13, ret == -1 and errno is set, if strerror_r() can't
- * handle the error, on glibc >=2.13 *positive* (errno-like) error
- * code is returned directly
- */
- if (ret == -1)
- ret = errno;
- if (ret) {
- if (ret == EINVAL)
- /* strerror_r() doesn't recognize this specific error */
- snprintf(dst, len, "unknown error (%d)", err < 0 ? err : -err);
- else
- snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
- }
- return dst;
-}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
deleted file mode 100644
index 626d7ffb03d6..000000000000
--- a/tools/lib/bpf/str_error.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __LIBBPF_STR_ERROR_H
-#define __LIBBPF_STR_ERROR_H
-
-#define STRERR_BUFSIZE 128
-
-char *libbpf_strerror_r(int err, char *dst, int len);
-
-#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
index 76359bcdc94a..43deb05a5197 100644
--- a/tools/lib/bpf/usdt.bpf.h
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -34,13 +34,32 @@ enum __bpf_usdt_arg_type {
BPF_USDT_ARG_CONST,
BPF_USDT_ARG_REG,
BPF_USDT_ARG_REG_DEREF,
+ BPF_USDT_ARG_SIB,
};
+/*
+ * This struct layout is designed specifically to be backwards/forward
+ * compatible between libbpf versions for ARG_CONST, ARG_REG, and
+ * ARG_REG_DEREF modes. ARG_SIB requires libbpf v1.7+.
+ */
struct __bpf_usdt_arg_spec {
/* u64 scalar interpreted depending on arg_type, see below */
__u64 val_off;
- /* arg location case, see bpf_udst_arg() for details */
- enum __bpf_usdt_arg_type arg_type;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* arg location case, see bpf_usdt_arg() for details */
+ enum __bpf_usdt_arg_type arg_type: 8;
+ /* index register offset within struct pt_regs */
+ __u16 idx_reg_off: 12;
+ /* scale factor for index register (1, 2, 4, or 8) */
+ __u16 scale_bitshift: 4;
+ /* reserved for future use, keeps reg_off offset stable */
+ __u8 __reserved: 8;
+#else
+ __u8 __reserved: 8;
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ enum __bpf_usdt_arg_type arg_type: 8;
+#endif
/* offset of referenced register within struct pt_regs */
short reg_off;
/* whether arg should be interpreted as signed value */
@@ -108,6 +127,38 @@ int bpf_usdt_arg_cnt(struct pt_regs *ctx)
return spec->arg_cnt;
}
+/* Returns the size in bytes of the #*arg_num* (zero-indexed) USDT argument.
+ * Returns negative error if argument is not found or arg_num is invalid.
+ */
+static __always_inline
+int bpf_usdt_arg_size(struct pt_regs *ctx, __u64 arg_num)
+{
+ struct __bpf_usdt_arg_spec *arg_spec;
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT)
+ return -ENOENT;
+ barrier_var(arg_num);
+ if (arg_num >= spec->arg_cnt)
+ return -ENOENT;
+
+ arg_spec = &spec->args[arg_num];
+
+ /* arg_spec->arg_bitshift = 64 - arg_sz * 8
+ * so: arg_sz = (64 - arg_spec->arg_bitshift) / 8
+ */
+ return (unsigned int)(64 - arg_spec->arg_bitshift) / 8;
+}
+
/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
* Returns 0 on success; negative error, otherwise.
* On error *res is guaranteed to be set to zero.
@@ -117,7 +168,7 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
{
struct __bpf_usdt_spec *spec;
struct __bpf_usdt_arg_spec *arg_spec;
- unsigned long val;
+ unsigned long val, idx;
int err, spec_id;
*res = 0;
@@ -172,6 +223,27 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
val >>= arg_spec->arg_bitshift;
#endif
break;
+ case BPF_USDT_ARG_SIB:
+ /* Arg is in memory addressed by SIB (Scale-Index-Base) mode
+ * (e.g., "-1@-96(%rbp,%rax,8)" in USDT arg spec). We first
+ * fetch the base register contents and the index register
+ * contents from pt_regs. Then we calculate the final address
+ * as base + (index * scale) + offset, and do a user-space
+ * probe read to fetch the argument value.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_kernel(&idx, sizeof(idx), (void *)ctx + arg_spec->idx_reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_user(&val, sizeof(val), (void *)(val + (idx << arg_spec->scale_bitshift) + arg_spec->val_off));
+ if (err)
+ return err;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ val >>= arg_spec->arg_bitshift;
+#endif
+ break;
default:
return -EINVAL;
}
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index 93794f01bb67..d1524f6f54ae 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -58,7 +58,7 @@
*
* STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
*
- * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
+ * USDT is identified by its <provider-name>:<probe-name> pair of names. Each
* individual USDT has a fixed number of arguments (3 in the above example)
* and specifies values of each argument as if it was a function call.
*
@@ -80,7 +80,7 @@
* NOP instruction that kernel can replace with an interrupt instruction to
* trigger instrumentation code (BPF program for all that we care about).
*
- * Semaphore above is and optional feature. It records an address of a 2-byte
+ * Semaphore above is an optional feature. It records an address of a 2-byte
* refcount variable (normally in '.probes' ELF section) used for signaling if
* there is anything that is attached to USDT. This is useful for user
* applications if, for example, they need to prepare some arguments that are
@@ -120,7 +120,7 @@
* a uprobe BPF program (which for kernel, at least currently, is just a kprobe
* program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
* that uprobe is usually attached at the function entry, while USDT will
- * normally will be somewhere inside the function. But it should always be
+ * normally be somewhere inside the function. But it should always be
* pointing to NOP instruction, which makes such uprobes the fastest uprobe
* kind.
*
@@ -150,7 +150,7 @@
* libbpf sets to spec ID during attach time, or, if kernel is too old to
* support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
* case. The latter means that some modes of operation can't be supported
- * without BPF cookie. Such mode is attaching to shared library "generically",
+ * without BPF cookie. Such a mode is attaching to shared library "generically",
* without specifying target process. In such case, it's impossible to
* calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
* is not supported without BPF cookie support.
@@ -184,7 +184,7 @@
* as even if USDT spec string is the same, USDT cookie value can be
* different. It was deemed excessive to try to deduplicate across independent
* USDT attachments by taking into account USDT spec string *and* USDT cookie
- * value, which would complicated spec ID accounting significantly for little
+ * value, which would complicate spec ID accounting significantly for little
* gain.
*/
@@ -199,12 +199,23 @@ enum usdt_arg_type {
USDT_ARG_CONST,
USDT_ARG_REG,
USDT_ARG_REG_DEREF,
+ USDT_ARG_SIB,
};
/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
struct usdt_arg_spec {
__u64 val_off;
- enum usdt_arg_type arg_type;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ enum usdt_arg_type arg_type: 8;
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ __u8 __reserved: 8; /* keep reg_off offset stable */
+#else
+ __u8 __reserved: 8; /* keep reg_off offset stable */
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ enum usdt_arg_type arg_type: 8;
+#endif
short reg_off;
bool arg_signed;
char arg_bitshift;
@@ -465,8 +476,8 @@ static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs,
goto proceed;
if (!realpath(lib_path, path)) {
- pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
- lib_path, -errno);
+ pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n",
+ lib_path, errstr(-errno));
libbpf_strlcpy(path, lib_path, sizeof(path));
}
@@ -475,8 +486,8 @@ proceed:
f = fopen(line, "re");
if (!f) {
err = -errno;
- pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
- line, lib_path, err);
+ pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n",
+ line, lib_path, errstr(err));
return err;
}
@@ -569,9 +580,8 @@ static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long o
return NULL;
}
-static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
- const char *data, size_t name_off, size_t desc_off,
- struct usdt_note *usdt_note);
+static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off,
+ size_t desc_off, struct usdt_note *usdt_note);
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
@@ -606,7 +616,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
err = parse_elf_segs(elf, path, &segs, &seg_cnt);
if (err) {
- pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
+ pr_warn("usdt: failed to process ELF program segments for '%s': %s\n",
+ path, errstr(err));
goto err_out;
}
@@ -624,7 +635,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
struct elf_seg *seg = NULL;
void *tmp;
- err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
+ err = parse_usdt_note(&nhdr, data->d_buf, name_off, desc_off, &note);
if (err)
goto err_out;
@@ -659,7 +670,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
*/
usdt_abs_ip = note.loc_addr;
- if (base_addr)
+ if (base_addr && note.base_addr)
usdt_abs_ip += base_addr - note.base_addr;
/* When attaching uprobes (which is what USDTs basically are)
@@ -708,8 +719,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
if (vma_seg_cnt == 0) {
err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
if (err) {
- pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
- pid, path, err);
+ pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n",
+ pid, path, errstr(err));
goto err_out;
}
}
@@ -1047,8 +1058,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
err = -errno;
- pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
- spec_id, usdt_provider, usdt_name, path, err);
+ pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n",
+ spec_id, usdt_provider, usdt_name, path, errstr(err));
goto err_out;
}
if (!man->has_bpf_cookie &&
@@ -1058,9 +1069,9 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
spec_id, usdt_provider, usdt_name, path);
} else {
- pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
+ pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n",
target->abs_ip, spec_id, usdt_provider, usdt_name,
- path, err);
+ path, errstr(err));
}
goto err_out;
}
@@ -1076,8 +1087,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
target->rel_ip, &opts);
err = libbpf_get_error(uprobe_link);
if (err) {
- pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
- i, usdt_provider, usdt_name, path, err);
+ pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n",
+ i, usdt_provider, usdt_name, path, errstr(err));
goto err_out;
}
@@ -1099,8 +1110,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
NULL, &opts_multi);
if (!link->multi_link) {
err = -errno;
- pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n",
- usdt_provider, usdt_name, path, err);
+ pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n",
+ usdt_provider, usdt_name, path, errstr(err));
goto err_out;
}
@@ -1130,8 +1141,7 @@ err_out:
/* Parse out USDT ELF note from '.note.stapsdt' section.
* Logic inspired by perf's code.
*/
-static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
- const char *data, size_t name_off, size_t desc_off,
+static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
struct usdt_note *note)
{
const char *provider, *name, *args;
@@ -1281,11 +1291,51 @@ static int calc_pt_regs_off(const char *reg_name)
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
{
- char reg_name[16];
- int len, reg_off;
- long off;
+ char reg_name[16] = {0}, idx_reg_name[16] = {0};
+ int len, reg_off, idx_reg_off, scale = 1;
+ long off = 0;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^,] , %d ) %n",
+ arg_sz, &off, reg_name, idx_reg_name, &scale, &len) == 5 ||
+ sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^,] , %d ) %n",
+ arg_sz, reg_name, idx_reg_name, &scale, &len) == 4 ||
+ sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^)] ) %n",
+ arg_sz, &off, reg_name, idx_reg_name, &len) == 4 ||
+ sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^)] ) %n",
+ arg_sz, reg_name, idx_reg_name, &len) == 3
+ ) {
+ /*
+ * Scale Index Base case:
+ * 1@-96(%rbp,%rax,8)
+ * 1@(%rbp,%rax,8)
+ * 1@-96(%rbp,%rax)
+ * 1@(%rbp,%rax)
+ */
+ arg->arg_type = USDT_ARG_SIB;
+ arg->val_off = off;
- if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) {
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+
+ idx_reg_off = calc_pt_regs_off(idx_reg_name);
+ if (idx_reg_off < 0)
+ return idx_reg_off;
+ arg->idx_reg_off = idx_reg_off;
+
+ /* validate scale factor and set fields directly */
+ switch (scale) {
+ case 1: arg->scale_bitshift = 0; break;
+ case 2: arg->scale_bitshift = 1; break;
+ case 4: arg->scale_bitshift = 2; break;
+ case 8: arg->scale_bitshift = 3; break;
+ default:
+ pr_warn("usdt: invalid SIB scale %d, expected 1, 2, 4, 8\n", scale);
+ return -EINVAL;
+ }
+ } else if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n",
+ arg_sz, &off, reg_name, &len) == 3) {
/* Memory dereference case, e.g., -4@-20(%rbp) */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = off;
@@ -1304,6 +1354,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
} else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
/* Register read case, e.g., -4@%eax */
arg->arg_type = USDT_ARG_REG;
+ /* register read has no memory offset */
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
@@ -1325,8 +1376,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
#elif defined(__s390x__)
-/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
-
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
{
unsigned int reg;
diff --git a/tools/lib/bpf/zip.c b/tools/lib/bpf/zip.c
index 3f26d629b2b4..88c376a8348d 100644
--- a/tools/lib/bpf/zip.c
+++ b/tools/lib/bpf/zip.c
@@ -223,7 +223,7 @@ struct zip_archive *zip_archive_open(const char *path)
if (!archive) {
munmap(data, size);
return ERR_PTR(-ENOMEM);
- };
+ }
archive->data = data;
archive->size = size;