summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c1241
1 files changed, 799 insertions, 442 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8892f2f1bbcc..a1bea1953df6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -195,8 +195,8 @@ enum kern_feature_id {
FEAT_BTF_FLOAT,
/* BPF perf link support */
FEAT_PERF_LINK,
- /* BTF_KIND_TAG support */
- FEAT_BTF_TAG,
+ /* BTF_KIND_DECL_TAG support */
+ FEAT_BTF_DECL_TAG,
__FEAT_CNT,
};
@@ -285,7 +285,7 @@ struct bpf_program {
size_t sub_insn_off;
char *name;
- /* sec_name with / replaced by _; makes recursive pinning
+ /* name with / replaced by _; makes recursive pinning
* in bpf_object__pin_programs easier
*/
char *pin_name;
@@ -370,15 +370,14 @@ enum libbpf_map_type {
LIBBPF_MAP_KCONFIG,
};
-static const char * const libbpf_type_to_btf_name[] = {
- [LIBBPF_MAP_DATA] = DATA_SEC,
- [LIBBPF_MAP_BSS] = BSS_SEC,
- [LIBBPF_MAP_RODATA] = RODATA_SEC,
- [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
-};
-
struct bpf_map {
char *name;
+ /* real_name is defined for special internal maps (.rodata*,
+ * .data*, .bss, .kconfig) and preserves their original ELF section
+ * name. This is important to be be able to find corresponding BTF
+ * DATASEC information.
+ */
+ char *real_name;
int fd;
int sec_idx;
size_t sec_offset;
@@ -401,6 +400,7 @@ struct bpf_map {
char *pin_path;
bool pinned;
bool reused;
+ __u64 map_extra;
};
enum extern_type {
@@ -443,6 +443,11 @@ struct extern_desc {
/* local btf_id of the ksym extern's type. */
__u32 type_id;
+ /* BTF fd index to be patched in for insn->off, this is
+ * 0 for vmlinux BTF, index in obj->fd_array for module
+ * BTF
+ */
+ __s16 btf_fd_idx;
} ksym;
};
};
@@ -454,6 +459,41 @@ struct module_btf {
char *name;
__u32 id;
int fd;
+ int fd_array_idx;
+};
+
+enum sec_type {
+ SEC_UNUSED = 0,
+ SEC_RELO,
+ SEC_BSS,
+ SEC_DATA,
+ SEC_RODATA,
+};
+
+struct elf_sec_desc {
+ enum sec_type sec_type;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+};
+
+struct elf_state {
+ int fd;
+ const void *obj_buf;
+ size_t obj_buf_sz;
+ Elf *elf;
+ Elf64_Ehdr *ehdr;
+ Elf_Data *symbols;
+ Elf_Data *st_ops_data;
+ size_t shstrndx; /* section index for section name strings */
+ size_t strtabidx;
+ struct elf_sec_desc *secs;
+ int sec_cnt;
+ int maps_shndx;
+ int btf_maps_shndx;
+ __u32 btf_maps_sec_btf_id;
+ int text_shndx;
+ int symbols_shndx;
+ int st_ops_shndx;
};
struct bpf_object {
@@ -471,47 +511,17 @@ struct bpf_object {
struct extern_desc *externs;
int nr_extern;
int kconfig_map_idx;
- int rodata_map_idx;
bool loaded;
bool has_subcalls;
+ bool has_rodata;
struct bpf_gen *gen_loader;
+ /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
+ struct elf_state efile;
/*
- * Information when doing elf related work. Only valid if fd
- * is valid.
- */
- struct {
- int fd;
- const void *obj_buf;
- size_t obj_buf_sz;
- Elf *elf;
- GElf_Ehdr ehdr;
- Elf_Data *symbols;
- Elf_Data *data;
- Elf_Data *rodata;
- Elf_Data *bss;
- Elf_Data *st_ops_data;
- size_t shstrndx; /* section index for section name strings */
- size_t strtabidx;
- struct {
- GElf_Shdr shdr;
- Elf_Data *data;
- } *reloc_sects;
- int nr_reloc_sects;
- int maps_shndx;
- int btf_maps_shndx;
- __u32 btf_maps_sec_btf_id;
- int text_shndx;
- int symbols_shndx;
- int data_shndx;
- int rodata_shndx;
- int bss_shndx;
- int st_ops_shndx;
- } efile;
- /*
- * All loaded bpf_object is linked in a list, which is
+ * All loaded bpf_object are linked in a list, which is
* hidden to caller. bpf_objects__<func> handlers deal with
* all objects.
*/
@@ -539,17 +549,22 @@ struct bpf_object {
void *priv;
bpf_object_clear_priv_t clear_priv;
+ int *fd_array;
+ size_t fd_array_cap;
+ size_t fd_array_cnt;
+
char path[];
};
-#define obj_elf_valid(o) ((o)->efile.elf)
static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
-static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
void bpf_program__unload(struct bpf_program *prog)
{
@@ -604,7 +619,16 @@ static char *__bpf_program__pin_name(struct bpf_program *prog)
{
char *name, *p;
- name = p = strdup(prog->sec_name);
+ if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
+ name = strdup(prog->name);
+ else
+ name = strdup(prog->sec_name);
+
+ if (!name)
+ return NULL;
+
+ p = name;
+
while ((p = strchr(p, '/')))
*p = '_';
@@ -691,25 +715,25 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
int nr_progs, err, i;
const char *name;
- GElf_Sym sym;
+ Elf64_Sym *sym;
progs = obj->programs;
nr_progs = obj->nr_programs;
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
+ nr_syms = symbols->d_size / sizeof(Elf64_Sym);
sec_off = 0;
for (i = 0; i < nr_syms; i++) {
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != sec_idx)
+ sym = elf_sym_by_idx(obj, i);
+
+ if (sym->st_shndx != sec_idx)
continue;
- if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
continue;
- prog_sz = sym.st_size;
- sec_off = sym.st_value;
+ prog_sz = sym->st_size;
+ sec_off = sym->st_value;
- name = elf_sym_str(obj, sym.st_name);
+ name = elf_sym_str(obj, sym->st_name);
if (!name) {
pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
sec_name, sec_off);
@@ -722,7 +746,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
- if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
return -ENOTSUP;
}
@@ -755,9 +779,9 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
* as static to enable more permissive BPF verification mode
* with more outside context available to BPF verifier
*/
- if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
- && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
- || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
+ if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
+ && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@@ -1125,6 +1149,7 @@ static struct bpf_object *bpf_object__new(const char *path,
size_t obj_buf_sz,
const char *obj_name)
{
+ bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
struct bpf_object *obj;
char *end;
@@ -1158,24 +1183,21 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1;
obj->efile.btf_maps_shndx = -1;
- obj->efile.data_shndx = -1;
- obj->efile.rodata_shndx = -1;
- obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
- obj->rodata_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
INIT_LIST_HEAD(&obj->list);
- list_add(&obj->list, &bpf_objects_list);
+ if (!strict)
+ list_add(&obj->list, &bpf_objects_list);
return obj;
}
static void bpf_object__elf_finish(struct bpf_object *obj)
{
- if (!obj_elf_valid(obj))
+ if (!obj->efile.elf)
return;
if (obj->efile.elf) {
@@ -1183,13 +1205,10 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.elf = NULL;
}
obj->efile.symbols = NULL;
- obj->efile.data = NULL;
- obj->efile.rodata = NULL;
- obj->efile.bss = NULL;
obj->efile.st_ops_data = NULL;
- zfree(&obj->efile.reloc_sects);
- obj->efile.nr_reloc_sects = 0;
+ zfree(&obj->efile.secs);
+ obj->efile.sec_cnt = 0;
zclose(obj->efile.fd);
obj->efile.obj_buf = NULL;
obj->efile.obj_buf_sz = 0;
@@ -1197,10 +1216,11 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
static int bpf_object__elf_init(struct bpf_object *obj)
{
+ Elf64_Ehdr *ehdr;
int err = 0;
- GElf_Ehdr *ep;
+ Elf *elf;
- if (obj_elf_valid(obj)) {
+ if (obj->efile.elf) {
pr_warn("elf: init internal error\n");
return -LIBBPF_ERRNO__LIBELF;
}
@@ -1210,10 +1230,9 @@ static int bpf_object__elf_init(struct bpf_object *obj)
* obj_buf should have been validated by
* bpf_object__open_buffer().
*/
- obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
- obj->efile.obj_buf_sz);
+ elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
} else {
- obj->efile.fd = open(obj->path, O_RDONLY);
+ obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE], *cp;
@@ -1223,23 +1242,37 @@ static int bpf_object__elf_init(struct bpf_object *obj)
return err;
}
- obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
+ elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
}
- if (!obj->efile.elf) {
+ if (!elf) {
pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
- if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
+ obj->efile.elf = elf;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
+ goto errout;
+ }
+
+ if (gelf_getclass(elf) != ELFCLASS64) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
+ goto errout;
+ }
+
+ obj->efile.ehdr = ehdr = elf64_getehdr(elf);
+ if (!obj->efile.ehdr) {
pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
- ep = &obj->efile.ehdr;
- if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
+ if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
pr_warn("elf: failed to get section names section index for %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
@@ -1247,7 +1280,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
- if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
+ if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
pr_warn("elf: failed to get section names strings from %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
@@ -1255,8 +1288,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
/* Old LLVM set e_machine to EM_NONE */
- if (ep->e_type != ET_REL ||
- (ep->e_machine && ep->e_machine != EM_BPF)) {
+ if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
@@ -1270,11 +1302,11 @@ errout:
static int bpf_object__check_endianness(struct bpf_object *obj)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
return 0;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
@@ -1314,41 +1346,27 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
return false;
}
-int bpf_object__section_size(const struct bpf_object *obj, const char *name,
- __u32 *size)
+static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
{
int ret = -ENOENT;
+ Elf_Data *data;
+ Elf_Scn *scn;
*size = 0;
- if (!name) {
+ if (!name)
return -EINVAL;
- } else if (!strcmp(name, DATA_SEC)) {
- if (obj->efile.data)
- *size = obj->efile.data->d_size;
- } else if (!strcmp(name, BSS_SEC)) {
- if (obj->efile.bss)
- *size = obj->efile.bss->d_size;
- } else if (!strcmp(name, RODATA_SEC)) {
- if (obj->efile.rodata)
- *size = obj->efile.rodata->d_size;
- } else if (!strcmp(name, STRUCT_OPS_SEC)) {
- if (obj->efile.st_ops_data)
- *size = obj->efile.st_ops_data->d_size;
- } else {
- Elf_Scn *scn = elf_sec_by_name(obj, name);
- Elf_Data *data = elf_sec_data(obj, scn);
- if (data) {
- ret = 0; /* found it */
- *size = data->d_size;
- }
+ scn = elf_sec_by_name(obj, name);
+ data = elf_sec_data(obj, scn);
+ if (data) {
+ ret = 0; /* found it */
+ *size = data->d_size;
}
return *size ? 0 : ret;
}
-int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
- __u32 *off)
+static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
{
Elf_Data *symbols = obj->efile.symbols;
const char *sname;
@@ -1357,23 +1375,20 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
if (!name || !off)
return -EINVAL;
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
- GElf_Sym sym;
+ for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
+ Elf64_Sym *sym = elf_sym_by_idx(obj, si);
- if (!gelf_getsym(symbols, si, &sym))
- continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
- GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
+ ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
continue;
- sname = elf_sym_str(obj, sym.st_name);
+ sname = elf_sym_str(obj, sym->st_name);
if (!sname) {
- pr_warn("failed to get sym name string for var %s\n",
- name);
+ pr_warn("failed to get sym name string for var %s\n", name);
return -EIO;
}
if (strcmp(name, sname) == 0) {
- *off = sym.st_value;
+ *off = sym->st_value;
return 0;
}
}
@@ -1425,17 +1440,55 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
return map_sz;
}
-static char *internal_map_name(struct bpf_object *obj,
- enum libbpf_map_type type)
+static char *internal_map_name(struct bpf_object *obj, const char *real_name)
{
char map_name[BPF_OBJ_NAME_LEN], *p;
- const char *sfx = libbpf_type_to_btf_name[type];
- int sfx_len = max((size_t)7, strlen(sfx));
- int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
- strlen(obj->name));
+ int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
+
+ /* This is one of the more confusing parts of libbpf for various
+ * reasons, some of which are historical. The original idea for naming
+ * internal names was to include as much of BPF object name prefix as
+ * possible, so that it can be distinguished from similar internal
+ * maps of a different BPF object.
+ * As an example, let's say we have bpf_object named 'my_object_name'
+ * and internal map corresponding to '.rodata' ELF section. The final
+ * map name advertised to user and to the kernel will be
+ * 'my_objec.rodata', taking first 8 characters of object name and
+ * entire 7 characters of '.rodata'.
+ * Somewhat confusingly, if internal map ELF section name is shorter
+ * than 7 characters, e.g., '.bss', we still reserve 7 characters
+ * for the suffix, even though we only have 4 actual characters, and
+ * resulting map will be called 'my_objec.bss', not even using all 15
+ * characters allowed by the kernel. Oh well, at least the truncated
+ * object name is somewhat consistent in this case. But if the map
+ * name is '.kconfig', we'll still have entirety of '.kconfig' added
+ * (8 chars) and thus will be left with only first 7 characters of the
+ * object name ('my_obje'). Happy guessing, user, that the final map
+ * name will be "my_obje.kconfig".
+ * Now, with libbpf starting to support arbitrarily named .rodata.*
+ * and .data.* data sections, it's possible that ELF section name is
+ * longer than allowed 15 chars, so we now need to be careful to take
+ * only up to 15 first characters of ELF name, taking no BPF object
+ * name characters at all. So '.rodata.abracadabra' will result in
+ * '.rodata.abracad' kernel and user-visible name.
+ * We need to keep this convoluted logic intact for .data, .bss and
+ * .rodata maps, but for new custom .data.custom and .rodata.custom
+ * maps we use their ELF names as is, not prepending bpf_object name
+ * in front. We still need to truncate them to 15 characters for the
+ * kernel. Full name can be recovered for such maps by using DATASEC
+ * BTF type associated with such map's value type, though.
+ */
+ if (sfx_len >= BPF_OBJ_NAME_LEN)
+ sfx_len = BPF_OBJ_NAME_LEN - 1;
+
+ /* if there are two or more dots in map name, it's a custom dot map */
+ if (strchr(real_name + 1, '.') != NULL)
+ pfx_len = 0;
+ else
+ pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
- sfx_len, libbpf_type_to_btf_name[type]);
+ sfx_len, real_name);
/* sanitise map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
@@ -1447,7 +1500,7 @@ static char *internal_map_name(struct bpf_object *obj,
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
- int sec_idx, void *data, size_t data_sz)
+ const char *real_name, int sec_idx, void *data, size_t data_sz)
{
struct bpf_map_def *def;
struct bpf_map *map;
@@ -1460,9 +1513,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->libbpf_type = type;
map->sec_idx = sec_idx;
map->sec_offset = 0;
- map->name = internal_map_name(obj, type);
- if (!map->name) {
- pr_warn("failed to alloc map name\n");
+ map->real_name = strdup(real_name);
+ map->name = internal_map_name(obj, real_name);
+ if (!map->real_name || !map->name) {
+ zfree(&map->real_name);
+ zfree(&map->name);
return -ENOMEM;
}
@@ -1485,6 +1540,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->mmaped = NULL;
pr_warn("failed to alloc map '%s' content buffer: %d\n",
map->name, err);
+ zfree(&map->real_name);
zfree(&map->name);
return err;
}
@@ -1498,34 +1554,43 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
- int err;
+ struct elf_sec_desc *sec_desc;
+ const char *sec_name;
+ int err = 0, sec_idx;
/*
* Populate obj->maps with libbpf internal maps.
*/
- if (obj->efile.data_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
- obj->efile.data_shndx,
- obj->efile.data->d_buf,
- obj->efile.data->d_size);
- if (err)
- return err;
- }
- if (obj->efile.rodata_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
- obj->efile.rodata_shndx,
- obj->efile.rodata->d_buf,
- obj->efile.rodata->d_size);
- if (err)
- return err;
-
- obj->rodata_map_idx = obj->nr_maps - 1;
- }
- if (obj->efile.bss_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
- obj->efile.bss_shndx,
- NULL,
- obj->efile.bss->d_size);
+ for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
+ sec_desc = &obj->efile.secs[sec_idx];
+
+ switch (sec_desc->sec_type) {
+ case SEC_DATA:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_RODATA:
+ obj->has_rodata = true;
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_BSS:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
+ sec_name, sec_idx,
+ NULL,
+ sec_desc->data->d_size);
+ break;
+ default:
+ /* skip */
+ break;
+ }
if (err)
return err;
}
@@ -1822,7 +1887,7 @@ static int bpf_object__init_kconfig_map(struct bpf_object *obj)
map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
- obj->efile.symbols_shndx,
+ ".kconfig", obj->efile.symbols_shndx,
NULL, map_sz);
if (err)
return err;
@@ -1860,15 +1925,13 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
*
* TODO: Detect array of map and report error.
*/
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
+ nr_syms = symbols->d_size / sizeof(Elf64_Sym);
for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- if (!gelf_getsym(symbols, i, &sym))
+ if (sym->st_shndx != obj->efile.maps_shndx)
continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
- continue;
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
nr_maps++;
}
@@ -1885,40 +1948,38 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
/* Fill obj->maps using data in "maps" section. */
for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
const char *map_name;
struct bpf_map_def *def;
struct bpf_map *map;
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
+ if (sym->st_shndx != obj->efile.maps_shndx)
continue;
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
map = bpf_object__add_map(obj);
if (IS_ERR(map))
return PTR_ERR(map);
- map_name = elf_sym_str(obj, sym.st_name);
+ map_name = elf_sym_str(obj, sym->st_name);
if (!map_name) {
pr_warn("failed to get map #%d name sym string for obj %s\n",
i, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
- if (GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
return -ENOTSUP;
}
map->libbpf_type = LIBBPF_MAP_UNSPEC;
- map->sec_idx = sym.st_shndx;
- map->sec_offset = sym.st_value;
+ map->sec_idx = sym->st_shndx;
+ map->sec_offset = sym->st_value;
pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
- if (sym.st_value + map_def_sz > data->d_size) {
+ if (sym->st_value + map_def_sz > data->d_size) {
pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
obj->path, map_name);
return -EINVAL;
@@ -1926,11 +1987,11 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
map->name = strdup(map_name);
if (!map->name) {
- pr_warn("failed to alloc map name\n");
+ pr_warn("map '%s': failed to alloc map name\n", map_name);
return -ENOMEM;
}
pr_debug("map %d is \"%s\"\n", i, map->name);
- def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
+ def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
/*
* If the definition of the map in the object file fits in
* bpf_map_def, copy it. Any extra fields in our version
@@ -2014,7 +2075,7 @@ static const char *__btf_kind_str(__u16 kind)
case BTF_KIND_VAR: return "var";
case BTF_KIND_DATASEC: return "datasec";
case BTF_KIND_FLOAT: return "float";
- case BTF_KIND_TAG: return "tag";
+ case BTF_KIND_DECL_TAG: return "decl_tag";
default: return "unknown";
}
}
@@ -2264,6 +2325,13 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
}
map_def->pinning = val;
map_def->parts |= MAP_DEF_PINNING;
+ } else if (strcmp(name, "map_extra") == 0) {
+ __u32 map_extra;
+
+ if (!get_map_field_int(map_name, btf, m, &map_extra))
+ return -EINVAL;
+ map_def->map_extra = map_extra;
+ map_def->parts |= MAP_DEF_MAP_EXTRA;
} else {
if (strict) {
pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
@@ -2288,6 +2356,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
map->def.value_size = def->value_size;
map->def.max_entries = def->max_entries;
map->def.map_flags = def->map_flags;
+ map->map_extra = def->map_extra;
map->numa_node = def->numa_node;
map->btf_key_type_id = def->key_type_id;
@@ -2311,7 +2380,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
if (def->parts & MAP_DEF_MAX_ENTRIES)
pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
if (def->parts & MAP_DEF_MAP_FLAGS)
- pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
+ pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
+ if (def->parts & MAP_DEF_MAP_EXTRA)
+ pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
+ (unsigned long long)def->map_extra);
if (def->parts & MAP_DEF_PINNING)
pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
if (def->parts & MAP_DEF_NUMA_NODE)
@@ -2448,8 +2520,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return -EINVAL;
}
- nr_types = btf__get_nr_types(obj->btf);
- for (i = 1; i <= nr_types; i++) {
+ nr_types = btf__type_cnt(obj->btf);
+ for (i = 1; i < nr_types; i++) {
t = btf__type_by_id(obj->btf, i);
if (!btf_is_datasec(t))
continue;
@@ -2500,12 +2572,13 @@ static int bpf_object__init_maps(struct bpf_object *obj,
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
- if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
+ sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
+ if (!sh)
return false;
- return sh.sh_flags & SHF_EXECINSTR;
+ return sh->sh_flags & SHF_EXECINSTR;
}
static bool btf_needs_sanitization(struct bpf_object *obj)
@@ -2514,9 +2587,9 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
- bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
- return !has_func || !has_datasec || !has_func_global || !has_float || !has_tag;
+ return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2525,15 +2598,15 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
- bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
struct btf_type *t;
int i, j, vlen;
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ for (i = 1; i < btf__type_cnt(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
- if ((!has_datasec && btf_is_var(t)) || (!has_tag && btf_is_tag(t))) {
- /* replace VAR/TAG with INT */
+ if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
+ /* replace VAR/DECL_TAG with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
/*
* using size = 1 is the safest choice, 4 will be too
@@ -2640,6 +2713,104 @@ out:
return 0;
}
+static int compare_vsi_off(const void *_a, const void *_b)
+{
+ const struct btf_var_secinfo *a = _a;
+ const struct btf_var_secinfo *b = _b;
+
+ return a->offset - b->offset;
+}
+
+static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
+ struct btf_type *t)
+{
+ __u32 size = 0, off = 0, i, vars = btf_vlen(t);
+ const char *name = btf__name_by_offset(btf, t->name_off);
+ const struct btf_type *t_var;
+ struct btf_var_secinfo *vsi;
+ const struct btf_var *var;
+ int ret;
+
+ if (!name) {
+ pr_debug("No name found in string section for DATASEC kind.\n");
+ return -ENOENT;
+ }
+
+ /* .extern datasec size and var offsets were set correctly during
+ * extern collection step, so just skip straight to sorting variables
+ */
+ if (t->size)
+ goto sort_vars;
+
+ ret = find_elf_sec_sz(obj, name, &size);
+ if (ret || !size || (t->size && t->size != size)) {
+ pr_debug("Invalid size for section %s: %u bytes\n", name, size);
+ return -ENOENT;
+ }
+
+ t->size = size;
+
+ for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ var = btf_var(t_var);
+
+ if (!btf_is_var(t_var)) {
+ pr_debug("Non-VAR type seen in section %s\n", name);
+ return -EINVAL;
+ }
+
+ if (var->linkage == BTF_VAR_STATIC)
+ continue;
+
+ name = btf__name_by_offset(btf, t_var->name_off);
+ if (!name) {
+ pr_debug("No name found in string section for VAR kind\n");
+ return -ENOENT;
+ }
+
+ ret = find_elf_var_offset(obj, name, &off);
+ if (ret) {
+ pr_debug("No offset found in symbol table for VAR %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ vsi->offset = off;
+ }
+
+sort_vars:
+ qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
+ return 0;
+}
+
+static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+ int err = 0;
+ __u32 i, n = btf__type_cnt(btf);
+
+ for (i = 1; i < n; i++) {
+ struct btf_type *t = btf_type_by_id(btf, i);
+
+ /* Loader needs to fix up some of the things compiler
+ * couldn't get its hands on while emitting BTF. This
+ * is section size and global variable offset. We use
+ * the info from the ELF itself for this purpose.
+ */
+ if (btf_is_datasec(t)) {
+ err = btf_fixup_datasec(obj, btf, t);
+ if (err)
+ break;
+ }
+ }
+
+ return libbpf_err(err);
+}
+
+int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+ return btf_finalize_data(obj, btf);
+}
+
static int bpf_object__finalize_btf(struct bpf_object *obj)
{
int err;
@@ -2647,7 +2818,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
- err = btf__finalize_data(obj, obj->btf);
+ err = btf_finalize_data(obj, obj->btf);
if (err) {
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
return err;
@@ -2757,8 +2928,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
continue;
- n = btf__get_nr_types(obj->btf);
- for (j = 1; j <= n; j++) {
+ n = btf__type_cnt(obj->btf);
+ for (j = 1; j < n; j++) {
t = btf_type_by_id(obj->btf, j);
if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
continue;
@@ -2778,7 +2949,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
__u32 sz;
/* clone BTF to sanitize a copy and leave the original intact */
- raw_data = btf__get_raw_data(obj->btf, &sz);
+ raw_data = btf__raw_data(obj->btf, &sz);
kern_btf = btf__new(raw_data, sz);
err = libbpf_get_error(kern_btf);
if (err)
@@ -2791,7 +2962,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (obj->gen_loader) {
__u32 raw_size = 0;
- const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
+ const void *raw_data = btf__raw_data(kern_btf, &raw_size);
if (!raw_data)
return -ENOMEM;
@@ -2883,32 +3054,36 @@ static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
return NULL;
}
-static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
{
+ Elf64_Shdr *shdr;
+
if (!scn)
- return -EINVAL;
+ return NULL;
- if (gelf_getshdr(scn, hdr) != hdr) {
+ shdr = elf64_getshdr(scn);
+ if (!shdr) {
pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
elf_ndxscn(scn), obj->path, elf_errmsg(-1));
- return -EINVAL;
+ return NULL;
}
- return 0;
+ return shdr;
}
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
{
const char *name;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
if (!scn)
return NULL;
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return NULL;
- name = elf_sec_str(obj, sh.sh_name);
+ name = elf_sec_str(obj, sh->sh_name);
if (!name) {
pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
elf_ndxscn(scn), obj->path, elf_errmsg(-1));
@@ -2936,13 +3111,29 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
return data;
}
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
+{
+ if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
+ return NULL;
+
+ return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
+}
+
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
+{
+ if (idx >= data->d_size / sizeof(Elf64_Rel))
+ return NULL;
+
+ return (Elf64_Rel *)data->d_buf + idx;
+}
+
static bool is_sec_name_dwarf(const char *name)
{
/* approximation, but the actual list is too long */
return str_has_pfx(name, ".debug_");
}
-static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
+static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
{
/* no special handling of .strtab */
if (hdr->sh_type == SHT_STRTAB)
@@ -2990,6 +3181,7 @@ static int cmp_progs(const void *_a, const void *_b)
static int bpf_object__elf_collect(struct bpf_object *obj)
{
+ struct elf_sec_desc *sec_desc;
Elf *elf = obj->efile.elf;
Elf_Data *btf_ext_data = NULL;
Elf_Data *btf_data = NULL;
@@ -2997,17 +3189,27 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
const char *name;
Elf_Data *data;
Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
+
+ /* ELF section indices are 1-based, so allocate +1 element to keep
+ * indexing simple. Also include 0th invalid section into sec_cnt for
+ * simpler and more traditional iteration logic.
+ */
+ obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum;
+ obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
+ if (!obj->efile.secs)
+ return -ENOMEM;
/* a bunch of ELF parsing functionality depends on processing symbols,
* so do the first pass and find the symbol table
*/
scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
- if (sh.sh_type == SHT_SYMTAB) {
+ if (sh->sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warn("elf: multiple symbol tables in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
@@ -3017,9 +3219,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!data)
return -LIBBPF_ERRNO__FORMAT;
+ idx = elf_ndxscn(scn);
+
obj->efile.symbols = data;
- obj->efile.symbols_shndx = elf_ndxscn(scn);
- obj->efile.strtabidx = sh.sh_link;
+ obj->efile.symbols_shndx = idx;
+ obj->efile.strtabidx = sh->sh_link;
}
}
@@ -3031,16 +3235,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- idx++;
+ idx = elf_ndxscn(scn);
+ sec_desc = &obj->efile.secs[idx];
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
- name = elf_sec_str(obj, sh.sh_name);
+ name = elf_sec_str(obj, sh->sh_name);
if (!name)
return -LIBBPF_ERRNO__FORMAT;
- if (ignore_elf_section(&sh, name))
+ if (ignore_elf_section(sh, name))
continue;
data = elf_sec_data(obj, scn);
@@ -3049,8 +3255,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
- (int)sh.sh_link, (unsigned long)sh.sh_flags,
- (int)sh.sh_type);
+ (int)sh->sh_link, (unsigned long)sh->sh_flags,
+ (int)sh->sh_type);
if (strcmp(name, "license") == 0) {
err = bpf_object__init_license(obj, data->d_buf, data->d_size);
@@ -3068,21 +3274,25 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
- } else if (sh.sh_type == SHT_SYMTAB) {
+ } else if (sh->sh_type == SHT_SYMTAB) {
/* already processed during the first pass above */
- } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
- if (sh.sh_flags & SHF_EXECINSTR) {
+ } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
+ if (sh->sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
err = bpf_object__add_programs(obj, data, name, idx);
if (err)
return err;
- } else if (strcmp(name, DATA_SEC) == 0) {
- obj->efile.data = data;
- obj->efile.data_shndx = idx;
- } else if (strcmp(name, RODATA_SEC) == 0) {
- obj->efile.rodata = data;
- obj->efile.rodata_shndx = idx;
+ } else if (strcmp(name, DATA_SEC) == 0 ||
+ str_has_pfx(name, DATA_SEC ".")) {
+ sec_desc->sec_type = SEC_DATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (strcmp(name, RODATA_SEC) == 0 ||
+ str_has_pfx(name, RODATA_SEC ".")) {
+ sec_desc->sec_type = SEC_RODATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
obj->efile.st_ops_data = data;
obj->efile.st_ops_shndx = idx;
@@ -3090,37 +3300,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
}
- } else if (sh.sh_type == SHT_REL) {
- int nr_sects = obj->efile.nr_reloc_sects;
- void *sects = obj->efile.reloc_sects;
- int sec = sh.sh_info; /* points to other section */
+ } else if (sh->sh_type == SHT_REL) {
+ int targ_sec_idx = sh->sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
- if (!section_have_execinstr(obj, sec) &&
+ if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
- idx, name, sec,
- elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
+ idx, name, targ_sec_idx,
+ elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
continue;
}
- sects = libbpf_reallocarray(sects, nr_sects + 1,
- sizeof(*obj->efile.reloc_sects));
- if (!sects)
- return -ENOMEM;
-
- obj->efile.reloc_sects = sects;
- obj->efile.nr_reloc_sects++;
-
- obj->efile.reloc_sects[nr_sects].shdr = sh;
- obj->efile.reloc_sects[nr_sects].data = data;
- } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
- obj->efile.bss = data;
- obj->efile.bss_shndx = idx;
+ sec_desc->sec_type = SEC_RELO;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
+ sec_desc->sec_type = SEC_BSS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else {
pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
- (size_t)sh.sh_size);
+ (size_t)sh->sh_size);
}
}
@@ -3136,19 +3338,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
-static bool sym_is_extern(const GElf_Sym *sym)
+static bool sym_is_extern(const Elf64_Sym *sym)
{
- int bind = GELF_ST_BIND(sym->st_info);
+ int bind = ELF64_ST_BIND(sym->st_info);
/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
return sym->st_shndx == SHN_UNDEF &&
(bind == STB_GLOBAL || bind == STB_WEAK) &&
- GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
+ ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
}
-static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
+static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
{
- int bind = GELF_ST_BIND(sym->st_info);
- int type = GELF_ST_TYPE(sym->st_info);
+ int bind = ELF64_ST_BIND(sym->st_info);
+ int type = ELF64_ST_TYPE(sym->st_info);
/* in .text section */
if (sym->st_shndx != text_shndx)
@@ -3171,8 +3373,8 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
if (!btf)
return -ESRCH;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_var(t) && !btf_is_func(t))
@@ -3203,8 +3405,8 @@ static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
if (!btf)
return -ESRCH;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_datasec(t))
@@ -3288,8 +3490,8 @@ static int find_int_btf_id(const struct btf *btf)
const struct btf_type *t;
int i, n;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (btf_is_int(t) && btf_int_bits(t) == 32)
@@ -3346,30 +3548,31 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
int i, n, off, dummy_var_btf_id;
const char *ext_name, *sec_name;
Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
if (!obj->efile.symbols)
return 0;
scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
if (dummy_var_btf_id < 0)
return dummy_var_btf_id;
- n = sh.sh_size / sh.sh_entsize;
+ n = sh->sh_size / sh->sh_entsize;
pr_debug("looking for externs among %d symbols...\n", n);
for (i = 0; i < n; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ if (!sym)
return -LIBBPF_ERRNO__FORMAT;
- if (!sym_is_extern(&sym))
+ if (!sym_is_extern(sym))
continue;
- ext_name = elf_sym_str(obj, sym.st_name);
+ ext_name = elf_sym_str(obj, sym->st_name);
if (!ext_name || !ext_name[0])
continue;
@@ -3391,7 +3594,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
t = btf__type_by_id(obj->btf, ext->btf_id);
ext->name = btf__name_by_offset(obj->btf, t->name_off);
ext->sym_idx = i;
- ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
+ ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
if (ext->sec_btf_id <= 0) {
@@ -3429,11 +3632,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -ENOTSUP;
}
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
- if (btf_is_func(t) && ext->is_weak) {
- pr_warn("extern weak function %s is unsupported\n",
- ext->name);
- return -ENOTSUP;
- }
ksym_sec = sec;
ext->type = EXT_KSYM;
skip_mods_and_typedefs(obj->btf, t->type,
@@ -3601,9 +3799,14 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.data_shndx ||
- shndx == obj->efile.bss_shndx ||
- shndx == obj->efile.rodata_shndx;
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
+ case SEC_DATA:
+ case SEC_RODATA:
+ return true;
+ default:
+ return false;
+ }
}
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
@@ -3616,22 +3819,25 @@ static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
- if (shndx == obj->efile.data_shndx)
- return LIBBPF_MAP_DATA;
- else if (shndx == obj->efile.bss_shndx)
+ if (shndx == obj->efile.symbols_shndx)
+ return LIBBPF_MAP_KCONFIG;
+
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
return LIBBPF_MAP_BSS;
- else if (shndx == obj->efile.rodata_shndx)
+ case SEC_DATA:
+ return LIBBPF_MAP_DATA;
+ case SEC_RODATA:
return LIBBPF_MAP_RODATA;
- else if (shndx == obj->efile.symbols_shndx)
- return LIBBPF_MAP_KCONFIG;
- else
+ default:
return LIBBPF_MAP_UNSPEC;
+ }
}
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
__u32 insn_idx, const char *sym_name,
- const GElf_Sym *sym, const GElf_Rel *rel)
+ const Elf64_Sym *sym, const Elf64_Rel *rel)
{
struct bpf_insn *insn = &prog->insns[insn_idx];
size_t map_idx, nr_maps = prog->obj->nr_maps;
@@ -3648,7 +3854,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
if (sym_is_extern(sym)) {
- int sym_idx = GELF_R_SYM(rel->r_info);
+ int sym_idx = ELF64_R_SYM(rel->r_info);
int i, n = obj->nr_extern;
struct extern_desc *ext;
@@ -3761,7 +3967,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
map = &obj->maps[map_idx];
- if (map->libbpf_type != type)
+ if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
continue;
pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
prog->name, map_idx, map->name, map->sec_idx,
@@ -3813,9 +4019,8 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
}
static int
-bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
+bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
{
- Elf_Data *symbols = obj->efile.symbols;
const char *relo_sec_name, *sec_name;
size_t sec_idx = shdr->sh_info;
struct bpf_program *prog;
@@ -3825,8 +4030,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
__u32 insn_idx;
Elf_Scn *scn;
Elf_Data *scn_data;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
scn = elf_sec_by_idx(obj, sec_idx);
scn_data = elf_sec_data(obj, scn);
@@ -3841,33 +4046,36 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
- relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
+ relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
+ if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
- relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
+ relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- insn_idx = rel.r_offset / BPF_INSN_SZ;
+ insn_idx = rel->r_offset / BPF_INSN_SZ;
/* relocations against static functions are recorded as
* relocations against the section that contains a function;
* in such case, symbol will be STT_SECTION and sym.st_name
* will point to empty string (0), so fetch section name
* instead
*/
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
- sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
+ sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
else
- sym_name = elf_sym_str(obj, sym.st_name);
+ sym_name = elf_sym_str(obj, sym->st_name);
sym_name = sym_name ?: "<?";
pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
@@ -3889,7 +4097,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
/* adjust insn_idx to local BPF program frame of reference */
insn_idx -= prog->sec_insn_off;
err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
- insn_idx, sym_name, &sym, &rel);
+ insn_idx, sym_name, sym, rel);
if (err)
return err;
@@ -3921,8 +4129,7 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
* LLVM annotates global data differently in BTF, that is,
* only as '.data', '.bss' or '.rodata'.
*/
- ret = btf__find_by_name(obj->btf,
- libbpf_type_to_btf_name[map->libbpf_type]);
+ ret = btf__find_by_name(obj->btf, map->real_name);
}
if (ret < 0)
return ret;
@@ -4015,6 +4222,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
map->btf_key_type_id = info.btf_key_type_id;
map->btf_value_type_id = info.btf_value_type_id;
map->reused = true;
+ map->map_extra = info.map_extra;
return 0;
@@ -4243,7 +4451,7 @@ static int probe_kern_btf_float(void)
strs, sizeof(strs)));
}
-static int probe_kern_btf_tag(void)
+static int probe_kern_btf_decl_tag(void)
{
static const char strs[] = "\0tag";
__u32 types[] = {
@@ -4253,7 +4461,7 @@ static int probe_kern_btf_tag(void)
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
BTF_VAR_STATIC,
/* attr */
- BTF_TYPE_TAG_ENC(1, 2, -1),
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
};
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
@@ -4476,8 +4684,8 @@ static struct kern_feature_desc {
[FEAT_PERF_LINK] = {
"BPF perf link support", probe_perf_link,
},
- [FEAT_BTF_TAG] = {
- "BTF_KIND_TAG support", probe_kern_btf_tag,
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
},
};
@@ -4529,7 +4737,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
map_info.max_entries == map->def.max_entries &&
- map_info.map_flags == map->def.map_flags);
+ map_info.map_flags == map->def.map_flags &&
+ map_info.map_extra == map->map_extra);
}
static int
@@ -4612,7 +4821,7 @@ static void bpf_map__destroy(struct bpf_map *map);
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
- struct bpf_create_map_attr create_attr;
+ struct bpf_create_map_params create_attr;
struct bpf_map_def *def = &map->def;
int err = 0;
@@ -4626,6 +4835,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.key_size = def->key_size;
create_attr.value_size = def->value_size;
create_attr.numa_node = map->numa_node;
+ create_attr.map_extra = map->map_extra;
if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
int nr_cpus;
@@ -4700,7 +4910,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
*/
map->fd = 0;
} else {
- map->fd = bpf_create_map_xattr(&create_attr);
+ map->fd = libbpf__bpf_create_map_xattr(&create_attr);
}
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
@@ -4715,7 +4925,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
- map->fd = bpf_create_map_xattr(&create_attr);
+ map->fd = libbpf__bpf_create_map_xattr(&create_attr);
}
err = map->fd < 0 ? -errno : 0;
@@ -4892,8 +5102,8 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
size_t targ_essent_len;
int n, i;
- n = btf__get_nr_types(targ_btf);
- for (i = targ_start_id; i <= n; i++) {
+ n = btf__type_cnt(targ_btf);
+ for (i = targ_start_id; i < n; i++) {
t = btf__type_by_id(targ_btf, i);
if (btf_kind(t) != btf_kind(local_cand->t))
continue;
@@ -5068,7 +5278,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l
err = bpf_core_add_cands(&local_cand, local_essent_len,
obj->btf_modules[i].btf,
obj->btf_modules[i].name,
- btf__get_nr_types(obj->btf_vmlinux) + 1,
+ btf__type_cnt(obj->btf_vmlinux),
cands);
if (err)
goto err_out;
@@ -5212,7 +5422,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
* relocated, so it's enough to just subtract in-section offset
*/
insn_idx = insn_idx - prog->sec_insn_off;
- if (insn_idx > prog->insns_cnt)
+ if (insn_idx >= prog->insns_cnt)
return -EINVAL;
insn = &prog->insns[insn_idx];
@@ -5406,7 +5616,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_EXTERN_FUNC:
ext = &obj->externs[relo->sym_off];
insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
- insn[0].imm = ext->ksym.kernel_btf_id;
+ if (ext->is_set) {
+ insn[0].imm = ext->ksym.kernel_btf_id;
+ insn[0].off = ext->ksym.btf_fd_idx;
+ } else { /* unresolved weak kfunc */
+ insn[0].imm = 0;
+ insn[0].off = 0;
+ }
break;
case RELO_SUBPROG_ADDR:
if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
@@ -5931,10 +6147,10 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
}
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data);
+ Elf64_Shdr *shdr, Elf_Data *data);
static int bpf_object__collect_map_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data)
+ Elf64_Shdr *shdr, Elf_Data *data)
{
const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
int i, j, nrels, new_sz;
@@ -5943,10 +6159,9 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
struct bpf_map *map = NULL, *targ_map;
const struct btf_member *member;
const char *name, *mname;
- Elf_Data *symbols;
unsigned int moff;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
void *tmp;
if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
@@ -5955,28 +6170,30 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
if (!sec)
return -EINVAL;
- symbols = obj->efile.symbols;
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn(".maps relo #%d: symbol %zx not found\n",
- i, (size_t)GELF_R_SYM(rel.r_info));
+ i, (size_t)ELF64_R_SYM(rel->r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_sym_str(obj, sym.st_name) ?: "<?>";
- if (sym.st_shndx != obj->efile.btf_maps_shndx) {
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+ if (sym->st_shndx != obj->efile.btf_maps_shndx) {
pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
i, name);
return -LIBBPF_ERRNO__RELOC;
}
- pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
- i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
- (size_t)rel.r_offset, sym.st_name, name);
+ pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
+ i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
+ (size_t)rel->r_offset, sym->st_name, name);
for (j = 0; j < obj->nr_maps; j++) {
map = &obj->maps[j];
@@ -5984,13 +6201,13 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
continue;
vi = btf_var_secinfos(sec) + map->btf_var_idx;
- if (vi->offset <= rel.r_offset &&
- rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
+ if (vi->offset <= rel->r_offset &&
+ rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break;
}
if (j == obj->nr_maps) {
- pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
- i, name, (size_t)rel.r_offset);
+ pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
+ i, name, (size_t)rel->r_offset);
return -EINVAL;
}
@@ -6017,10 +6234,10 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return -EINVAL;
moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
- if (rel.r_offset - vi->offset < moff)
+ if (rel->r_offset - vi->offset < moff)
return -EINVAL;
- moff = rel.r_offset - vi->offset - moff;
+ moff = rel->r_offset - vi->offset - moff;
/* here we use BPF pointer size, which is always 64 bit, as we
* are parsing ELF that was built for BPF target
*/
@@ -6065,10 +6282,18 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
{
int i, err;
- for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
- GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
- Elf_Data *data = obj->efile.reloc_sects[i].data;
- int idx = shdr->sh_info;
+ for (i = 0; i < obj->efile.sec_cnt; i++) {
+ struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+ int idx;
+
+ if (sec_desc->sec_type != SEC_RELO)
+ continue;
+
+ shdr = sec_desc->shdr;
+ data = sec_desc->data;
+ idx = shdr->sh_info;
if (shdr->sh_type != SHT_REL) {
pr_warn("internal error at %d\n", __LINE__);
@@ -6191,6 +6416,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
{
struct bpf_prog_load_params load_attr = {};
+ struct bpf_object *obj = prog->obj;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
@@ -6211,7 +6437,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
- if (kernel_supports(prog->obj, FEAT_PROG_NAME))
+ if (kernel_supports(obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
@@ -6224,8 +6450,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_ifindex = prog->prog_ifindex;
/* specify func_info/line_info only if kernel supports them */
- btf_fd = bpf_object__btf_fd(prog->obj);
- if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
+ btf_fd = bpf_object__btf_fd(obj);
+ if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@@ -6236,6 +6462,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ load_attr.fd_array = obj->fd_array;
/* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->preload_fn) {
@@ -6247,9 +6474,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
}
- if (prog->obj->gen_loader) {
- bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
- prog - prog->obj->programs);
+ if (obj->gen_loader) {
+ bpf_gen__prog_load(obj->gen_loader, &load_attr,
+ prog - obj->programs);
*pfd = -1;
return 0;
}
@@ -6270,16 +6497,21 @@ retry_load:
if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
- if (prog->obj->rodata_map_idx >= 0 &&
- kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
- struct bpf_map *rodata_map =
- &prog->obj->maps[prog->obj->rodata_map_idx];
+ if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
+ struct bpf_map *map;
+ int i;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &prog->obj->maps[i];
+ if (map->libbpf_type != LIBBPF_MAP_RODATA)
+ continue;
- if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("prog '%s': failed to bind .rodata map: %s\n",
- prog->name, cp);
- /* Don't fail hard if can't bind rodata. */
+ if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to bind .rodata map: %s\n",
+ prog->name, cp);
+ /* Don't fail hard if can't bind rodata. */
+ }
}
}
@@ -6343,16 +6575,13 @@ static int bpf_program__record_externs(struct bpf_program *prog)
case RELO_EXTERN_VAR:
if (ext->type != EXT_KSYM)
continue;
- if (!ext->ksym.type_id) {
- pr_warn("typeless ksym %s is not supported yet\n",
- ext->name);
- return -ENOTSUP;
- }
- bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
- relo->insn_idx);
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, !ext->ksym.type_id,
+ BTF_KIND_VAR, relo->insn_idx);
break;
case RELO_EXTERN_FUNC:
- bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, false, BTF_KIND_FUNC,
relo->insn_idx);
break;
default:
@@ -6438,8 +6667,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
out:
if (err)
pr_warn("failed to load program '%s'\n", prog->name);
- zfree(&prog->insns);
- prog->insns_cnt = 0;
return libbpf_err(err);
}
@@ -6660,7 +6887,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
}
-int bpf_object__unload(struct bpf_object *obj)
+static int bpf_object_unload(struct bpf_object *obj)
{
size_t i;
@@ -6679,6 +6906,8 @@ int bpf_object__unload(struct bpf_object *obj)
return 0;
}
+int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
+
static int bpf_object__sanitize_maps(struct bpf_object *obj)
{
struct bpf_map *m;
@@ -6752,13 +6981,14 @@ out:
static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
__u16 kind, struct btf **res_btf,
- int *res_btf_fd)
+ struct module_btf **res_mod_btf)
{
- int i, id, btf_fd, err;
+ struct module_btf *mod_btf;
struct btf *btf;
+ int i, id, err;
btf = obj->btf_vmlinux;
- btf_fd = 0;
+ mod_btf = NULL;
id = btf__find_by_name_kind(btf, ksym_name, kind);
if (id == -ENOENT) {
@@ -6767,10 +6997,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return err;
for (i = 0; i < obj->btf_module_cnt; i++) {
- btf = obj->btf_modules[i].btf;
- /* we assume module BTF FD is always >0 */
- btf_fd = obj->btf_modules[i].fd;
- id = btf__find_by_name_kind(btf, ksym_name, kind);
+ /* we assume module_btf's BTF FD is always >0 */
+ mod_btf = &obj->btf_modules[i];
+ btf = mod_btf->btf;
+ id = btf__find_by_name_kind_own(btf, ksym_name, kind);
if (id != -ENOENT)
break;
}
@@ -6779,7 +7009,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return -ESRCH;
*res_btf = btf;
- *res_btf_fd = btf_fd;
+ *res_mod_btf = mod_btf;
return id;
}
@@ -6788,14 +7018,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
{
const struct btf_type *targ_var, *targ_type;
__u32 targ_type_id, local_type_id;
+ struct module_btf *mod_btf = NULL;
const char *targ_var_name;
- int id, btf_fd = 0, err;
struct btf *btf = NULL;
+ int id, err;
- id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
- if (id == -ESRCH && ext->is_weak) {
- return 0;
- } else if (id < 0) {
+ id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
+ if (id < 0) {
+ if (id == -ESRCH && ext->is_weak)
+ return 0;
pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
ext->name);
return id;
@@ -6827,7 +7058,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
}
ext->is_set = true;
- ext->ksym.kernel_btf_obj_fd = btf_fd;
+ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
ext->ksym.kernel_btf_id = id;
pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
ext->name, id, btf_kind_str(targ_var), targ_var_name);
@@ -6839,26 +7070,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
struct extern_desc *ext)
{
int local_func_proto_id, kfunc_proto_id, kfunc_id;
+ struct module_btf *mod_btf = NULL;
const struct btf_type *kern_func;
struct btf *kern_btf = NULL;
- int ret, kern_btf_fd = 0;
+ int ret;
local_func_proto_id = ext->ksym.type_id;
- kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
- &kern_btf, &kern_btf_fd);
+ kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
if (kfunc_id < 0) {
- pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
+ if (kfunc_id == -ESRCH && ext->is_weak)
+ return 0;
+ pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
ext->name);
return kfunc_id;
}
- if (kern_btf != obj->btf_vmlinux) {
- pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
- ext->name);
- return -ENOTSUP;
- }
-
kern_func = btf__type_by_id(kern_btf, kfunc_id);
kfunc_proto_id = kern_func->type;
@@ -6870,9 +7097,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
return -EINVAL;
}
+ /* set index for module BTF fd in fd_array, if unset */
+ if (mod_btf && !mod_btf->fd_array_idx) {
+ /* insn->off is s16 */
+ if (obj->fd_array_cnt == INT16_MAX) {
+ pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
+ ext->name, mod_btf->fd_array_idx);
+ return -E2BIG;
+ }
+ /* Cannot use index 0 for module BTF fd */
+ if (!obj->fd_array_cnt)
+ obj->fd_array_cnt = 1;
+
+ ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
+ obj->fd_array_cnt + 1);
+ if (ret)
+ return ret;
+ mod_btf->fd_array_idx = obj->fd_array_cnt;
+ /* we assume module BTF FD is always >0 */
+ obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
+ }
+
ext->is_set = true;
- ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
ext->ksym.kernel_btf_id = kfunc_id;
+ ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
ext->name, kfunc_id);
@@ -7032,6 +7280,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = bpf_gen__finish(obj->gen_loader);
}
+ /* clean up fd_array */
+ zfree(&obj->fd_array);
+
/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
@@ -7056,7 +7307,7 @@ out:
if (obj->maps[i].pinned && !obj->maps[i].reused)
bpf_map__unpin(&obj->maps[i], NULL);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
return libbpf_err(err);
}
@@ -7122,8 +7373,7 @@ static int check_path(const char *path)
return err;
}
-int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
- int instance)
+static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
{
char *cp, errmsg[STRERR_BUFSIZE];
int err;
@@ -7158,8 +7408,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
return 0;
}
-int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
- int instance)
+static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
{
int err;
@@ -7187,6 +7436,12 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
return 0;
}
+__attribute__((alias("bpf_program_pin_instance")))
+int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
+
+__attribute__((alias("bpf_program_unpin_instance")))
+int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
+
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
int i, err;
@@ -7211,7 +7466,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
if (prog->instances.nr == 1) {
/* don't create subdirs when pinning single instance */
- return bpf_program__pin_instance(prog, path, 0);
+ return bpf_program_pin_instance(prog, path, 0);
}
for (i = 0; i < prog->instances.nr; i++) {
@@ -7227,7 +7482,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
goto err_unpin;
}
- err = bpf_program__pin_instance(prog, buf, i);
+ err = bpf_program_pin_instance(prog, buf, i);
if (err)
goto err_unpin;
}
@@ -7245,7 +7500,7 @@ err_unpin:
else if (len >= PATH_MAX)
continue;
- bpf_program__unpin_instance(prog, buf, i);
+ bpf_program_unpin_instance(prog, buf, i);
}
rmdir(path);
@@ -7273,7 +7528,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
if (prog->instances.nr == 1) {
/* don't create subdirs when pinning single instance */
- return bpf_program__unpin_instance(prog, path, 0);
+ return bpf_program_unpin_instance(prog, path, 0);
}
for (i = 0; i < prog->instances.nr; i++) {
@@ -7286,7 +7541,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
else if (len >= PATH_MAX)
return libbpf_err(-ENAMETOOLONG);
- err = bpf_program__unpin_instance(prog, buf, i);
+ err = bpf_program_unpin_instance(prog, buf, i);
if (err)
return err;
}
@@ -7647,6 +7902,7 @@ static void bpf_map__destroy(struct bpf_map *map)
}
zfree(&map->name);
+ zfree(&map->real_name);
zfree(&map->pin_path);
if (map->fd >= 0)
@@ -7665,7 +7921,7 @@ void bpf_object__close(struct bpf_object *obj)
bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
btf__free(obj->btf);
btf_ext__free(obj->btf_ext);
@@ -7694,6 +7950,10 @@ struct bpf_object *
bpf_object__next(struct bpf_object *prev)
{
struct bpf_object *next;
+ bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
+
+ if (strict)
+ return NULL;
if (!prev)
next = list_first_entry(&bpf_objects_list,
@@ -7800,6 +8060,12 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
struct bpf_program *
bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
{
+ return bpf_object__next_program(obj, prev);
+}
+
+struct bpf_program *
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
+{
struct bpf_program *prog = prev;
do {
@@ -7812,6 +8078,12 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
struct bpf_program *
bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
{
+ return bpf_object__prev_program(obj, next);
+}
+
+struct bpf_program *
+bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
+{
struct bpf_program *prog = next;
do {
@@ -7892,6 +8164,16 @@ size_t bpf_program__size(const struct bpf_program *prog)
return prog->insns_cnt * BPF_INSN_SZ;
}
+const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
+{
+ return prog->insns;
+}
+
+size_t bpf_program__insn_cnt(const struct bpf_program *prog)
+{
+ return prog->insns_cnt;
+}
+
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
bpf_program_prep_t prep)
{
@@ -8030,6 +8312,8 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
@@ -8219,7 +8503,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
/* Collect the reloc from ELF and populate the st_ops->progs[] */
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data)
+ Elf64_Shdr *shdr, Elf_Data *data)
{
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
@@ -8227,58 +8511,58 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
unsigned int shdr_idx;
const struct btf *btf;
struct bpf_map *map;
- Elf_Data *symbols;
unsigned int moff, insn_idx;
const char *name;
__u32 member_idx;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
int i, nrels;
- symbols = obj->efile.symbols;
btf = obj->btf;
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn("struct_ops reloc: symbol %zx not found\n",
- (size_t)GELF_R_SYM(rel.r_info));
+ (size_t)ELF64_R_SYM(rel->r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_sym_str(obj, sym.st_name) ?: "<?>";
- map = find_struct_ops_map_by_offset(obj, rel.r_offset);
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+ map = find_struct_ops_map_by_offset(obj, rel->r_offset);
if (!map) {
- pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
- (size_t)rel.r_offset);
+ pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
+ (size_t)rel->r_offset);
return -EINVAL;
}
- moff = rel.r_offset - map->sec_offset;
- shdr_idx = sym.st_shndx;
+ moff = rel->r_offset - map->sec_offset;
+ shdr_idx = sym->st_shndx;
st_ops = map->st_ops;
- pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
+ pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
map->name,
- (long long)(rel.r_info >> 32),
- (long long)sym.st_value,
- shdr_idx, (size_t)rel.r_offset,
- map->sec_offset, sym.st_name, name);
+ (long long)(rel->r_info >> 32),
+ (long long)sym->st_value,
+ shdr_idx, (size_t)rel->r_offset,
+ map->sec_offset, sym->st_name, name);
if (shdr_idx >= SHN_LORESERVE) {
- pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
- map->name, (size_t)rel.r_offset, shdr_idx);
+ pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
+ map->name, (size_t)rel->r_offset, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
- if (sym.st_value % BPF_INSN_SZ) {
+ if (sym->st_value % BPF_INSN_SZ) {
pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
- map->name, (unsigned long long)sym.st_value);
+ map->name, (unsigned long long)sym->st_value);
return -LIBBPF_ERRNO__FORMAT;
}
- insn_idx = sym.st_value / BPF_INSN_SZ;
+ insn_idx = sym->st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
@@ -8412,28 +8696,27 @@ int libbpf_find_vmlinux_btf_id(const char *name,
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_prog_info *info;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
struct btf *btf;
int err;
- info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
- err = libbpf_get_error(info_linear);
+ err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (err) {
- pr_warn("failed get_prog_info_linear for FD %d\n",
- attach_prog_fd);
+ pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
+ attach_prog_fd, err);
return err;
}
err = -EINVAL;
- info = &info_linear->info;
- if (!info->btf_id) {
+ if (!info.btf_id) {
pr_warn("The target program doesn't have BTF\n");
goto out;
}
- btf = btf__load_from_kernel_by_id(info->btf_id);
- if (libbpf_get_error(btf)) {
- pr_warn("Failed to get BTF of the program\n");
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
goto out;
}
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
@@ -8443,7 +8726,6 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
goto out;
}
out:
- free(info_linear);
return err;
}
@@ -8559,9 +8841,30 @@ const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
return map ? &map->def : libbpf_err_ptr(-EINVAL);
}
+static bool map_uses_real_name(const struct bpf_map *map)
+{
+ /* Since libbpf started to support custom .data.* and .rodata.* maps,
+ * their user-visible name differs from kernel-visible name. Users see
+ * such map's corresponding ELF section name as a map name.
+ * This check distinguishes .data/.rodata from .data.* and .rodata.*
+ * maps to know which name has to be returned to the user.
+ */
+ if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
+ return true;
+ if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
+ return true;
+ return false;
+}
+
const char *bpf_map__name(const struct bpf_map *map)
{
- return map ? map->name : NULL;
+ if (!map)
+ return NULL;
+
+ if (map_uses_real_name(map))
+ return map->real_name;
+
+ return map->name;
}
enum bpf_map_type bpf_map__type(const struct bpf_map *map)
@@ -8590,6 +8893,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
return 0;
}
+__u64 bpf_map__map_extra(const struct bpf_map *map)
+{
+ return map->map_extra;
+}
+
+int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->map_extra = map_extra;
+ return 0;
+}
+
__u32 bpf_map__numa_node(const struct bpf_map *map)
{
return map->numa_node;
@@ -8744,6 +9060,12 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
{
+ return bpf_object__next_map(obj, prev);
+}
+
+struct bpf_map *
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+{
if (prev == NULL)
return obj->maps;
@@ -8753,6 +9075,12 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
struct bpf_map *
bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
{
+ return bpf_object__prev_map(obj, next);
+}
+
+struct bpf_map *
+bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
+{
if (next == NULL) {
if (!obj->nr_maps)
return NULL;
@@ -8768,7 +9096,22 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
struct bpf_map *pos;
bpf_object__for_each_map(pos, obj) {
- if (pos->name && !strcmp(pos->name, name))
+ /* if it's a special internal map name (which always starts
+ * with dot) then check if that special name matches the
+ * real map name (ELF section name)
+ */
+ if (name[0] == '.') {
+ if (pos->real_name && strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ /* otherwise map name has to be an exact match */
+ if (map_uses_real_name(pos)) {
+ if (strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ if (strcmp(pos->name, name) == 0)
return pos;
}
return errno = ENOENT, NULL;
@@ -9273,7 +9616,7 @@ static int append_to_file(const char *file, const char *fmt, ...)
int fd, n, err = 0;
va_list ap;
- fd = open(file, O_WRONLY | O_APPEND, 0);
+ fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
if (fd < 0)
return -errno;
@@ -9787,12 +10130,26 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
{
- const char *tp_name;
+ static const char *const prefixes[] = {
+ "raw_tp/",
+ "raw_tracepoint/",
+ "raw_tp.w/",
+ "raw_tracepoint.w/",
+ };
+ size_t i;
+ const char *tp_name = NULL;
- if (str_has_pfx(prog->sec_name, "raw_tp/"))
- tp_name = prog->sec_name + sizeof("raw_tp/") - 1;
- else
- tp_name = prog->sec_name + sizeof("raw_tracepoint/") - 1;
+ for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
+ if (str_has_pfx(prog->sec_name, prefixes[i])) {
+ tp_name = prog->sec_name + strlen(prefixes[i]);
+ break;
+ }
+ }
+ if (!tp_name) {
+ pr_warn("prog '%s': invalid section name '%s'\n",
+ prog->name, prog->sec_name);
+ return libbpf_err_ptr(-EINVAL);
+ }
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
@@ -10904,7 +11261,7 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
int fd, err = 0, len;
char buf[128];
- fd = open(fcpu, O_RDONLY);
+ fd = open(fcpu, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);