diff options
Diffstat (limited to 'tools/perf/util/symbol.c')
| -rw-r--r-- | tools/perf/util/symbol.c | 1362 |
1 files changed, 920 insertions, 442 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e7a98dbd2aed..814f960fa8f8 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1,9 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 #include <dirent.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> +#include <linux/capability.h> #include <linux/kernel.h> +#include <linux/mman.h> +#include <linux/string.h> +#include <linux/time64.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/param.h> @@ -12,15 +17,32 @@ #include <inttypes.h> #include "annotate.h" #include "build-id.h" -#include "util.h" +#include "cap.h" +#include "cpumap.h" #include "debug.h" +#include "demangle-cxx.h" +#include "demangle-java.h" +#include "demangle-ocaml.h" +#include "demangle-rust-v0.h" +#include "dso.h" +#include "util.h" // lsdir() +#include "debug.h" +#include "event.h" #include "machine.h" +#include "map.h" #include "symbol.h" +#include "map_symbol.h" +#include "mem-events.h" +#include "mem-info.h" +#include "symsrc.h" #include "strlist.h" #include "intlist.h" +#include "namespaces.h" #include "header.h" #include "path.h" -#include "sane_ctype.h" +#include <linux/ctype.h> +#include <linux/log2.h> +#include <linux/zalloc.h> #include <elf.h> #include <limits.h> @@ -35,49 +57,68 @@ int vmlinux_path__nr_entries; char **vmlinux_path; struct symbol_conf symbol_conf = { + .nanosecs = false, .use_modules = true, .try_vmlinux_path = true, - .annotate_src = true, .demangle = true, .demangle_kernel = false, .cumulate_callchain = true, + .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ .show_hist_headers = true, .symfs = "", .event_group = true, + .inline_name = true, + .res_sample = 0, +}; + +struct map_list_node { + struct list_head node; + struct map *map; }; +static struct map_list_node *map_list_node__new(void) +{ + return malloc(sizeof(struct map_list_node)); +} + static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__KALLSYMS, DSO_BINARY_TYPE__GUEST_KALLSYMS, DSO_BINARY_TYPE__JAVA_JIT, DSO_BINARY_TYPE__DEBUGLINK, DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, DSO_BINARY_TYPE__FEDORA_DEBUGINFO, DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__GNU_DEBUGDATA, DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__GUEST_KMODULE, DSO_BINARY_TYPE__GUEST_KMODULE_COMP, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) -bool symbol_type__is_a(char symbol_type, enum map_type map_type) +static bool symbol_type__filter(char __symbol_type) { - symbol_type = toupper(symbol_type); - - switch (map_type) { - case MAP__FUNCTION: - return symbol_type == 'T' || symbol_type == 'W'; - case MAP__VARIABLE: - return symbol_type == 'D'; - default: - return false; - } + // Since 'U' == undefined and 'u' == unique global symbol, we can't use toupper there + // 'N' is for debugging symbols, 'n' is a non-data, non-code, non-debug read-only section. + // According to 'man nm'. + // 'N' first seen in: + // ffffffff9b35d130 N __pfx__RNCINvNtNtNtCsbDUBuN8AbD4_4core4iter8adapters3map12map_try_foldjNtCs6vVzKs5jPr6_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ + // a seemingly Rust mangled name + // Ditto for '1': + // root@x1:~# grep ' 1 ' /proc/kallsyms + // ffffffffb098bc00 1 __pfx__RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ + // ffffffffb098bc10 1 _RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ + char symbol_type = toupper(__symbol_type); + return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B' || + __symbol_type == 'u' || __symbol_type == 'l' || __symbol_type == 'N' || __symbol_type == '1'; } static int prefix_underscores_count(const char *str) @@ -90,6 +131,11 @@ static int prefix_underscores_count(const char *str) return tail - str; } +const char * __weak arch__normalize_symbol_name(const char *name) +{ + return name; +} + int __weak arch__compare_symbol_names(const char *namea, const char *nameb) { return strcmp(namea, nameb); @@ -127,6 +173,13 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) else if ((a == 0) && (b > 0)) return SYMBOL_B; + if (syma->type != symb->type) { + if (syma->type == STT_NOTYPE) + return SYMBOL_B; + if (symb->type == STT_NOTYPE) + return SYMBOL_A; + } + /* Prefer a non weak symbol over a weak one */ a = syma->binding == STB_WEAK; b = symb->binding == STB_WEAK; @@ -162,7 +215,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) return arch__choose_best_symbol(syma, symb); } -void symbols__fixup_duplicate(struct rb_root *symbols) +void symbols__fixup_duplicate(struct rb_root_cached *symbols) { struct rb_node *nd; struct symbol *curr, *next; @@ -170,35 +223,39 @@ void symbols__fixup_duplicate(struct rb_root *symbols) if (symbol_conf.allow_aliases) return; - nd = rb_first(symbols); + nd = rb_first_cached(symbols); while (nd) { curr = rb_entry(nd, struct symbol, rb_node); again: nd = rb_next(&curr->rb_node); - next = rb_entry(nd, struct symbol, rb_node); - if (!nd) break; + next = rb_entry(nd, struct symbol, rb_node); if (curr->start != next->start) continue; if (choose_best_symbol(curr, next) == SYMBOL_A) { - rb_erase(&next->rb_node, symbols); + if (next->type == STT_GNU_IFUNC) + curr->ifunc_alias = true; + rb_erase_cached(&next->rb_node, symbols); symbol__delete(next); goto again; } else { + if (curr->type == STT_GNU_IFUNC) + next->ifunc_alias = true; nd = rb_next(&curr->rb_node); - rb_erase(&curr->rb_node, symbols); + rb_erase_cached(&curr->rb_node, symbols); symbol__delete(curr); } } } -void symbols__fixup_end(struct rb_root *symbols) +/* Update zero-sized symbols using the address of the next symbol */ +void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) { - struct rb_node *nd, *prevnd = rb_first(symbols); + struct rb_node *nd, *prevnd = rb_first_cached(symbols); struct symbol *curr, *prev; if (prevnd == NULL) @@ -210,42 +267,54 @@ void symbols__fixup_end(struct rb_root *symbols) prev = curr; curr = rb_entry(nd, struct symbol, rb_node); - if (prev->end == prev->start && prev->end != curr->start) - prev->end = curr->start; - } - - /* Last entry */ - if (curr->end == curr->start) - curr->end = roundup(curr->start, 4096) + 4096; -} + /* + * On some architecture kernel text segment start is located at + * some low memory address, while modules are located at high + * memory addresses (or vice versa). The gap between end of + * kernel text segment and beginning of first module's text + * segment is very big. Therefore do not fill this gap and do + * not assign it to the kernel dso map (kallsyms). + * + * Also BPF code can be allocated separately from text segments + * and modules. So the last entry in a module should not fill + * the gap too. + * + * In kallsyms, it determines module symbols using '[' character + * like in: + * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] + */ + if (prev->end == prev->start) { + const char *prev_mod; + const char *curr_mod; -void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) -{ - struct maps *maps = &mg->maps[type]; - struct map *next, *curr; + if (!is_kallsyms) { + prev->end = curr->start; + continue; + } - pthread_rwlock_wrlock(&maps->lock); + prev_mod = strchr(prev->name, '['); + curr_mod = strchr(curr->name, '['); - curr = maps__first(maps); - if (curr == NULL) - goto out_unlock; + /* Last kernel/module symbol mapped to end of page */ + if (!prev_mod != !curr_mod) + prev->end = roundup(prev->end + 4096, 4096); + /* Last symbol in the previous module */ + else if (prev_mod && strcmp(prev_mod, curr_mod)) + prev->end = roundup(prev->end + 4096, 4096); + else + prev->end = curr->start; - for (next = map__next(curr); next; next = map__next(curr)) { - curr->end = next->start; - curr = next; + pr_debug4("%s sym:%s end:%#" PRIx64 "\n", + __func__, prev->name, prev->end); + } } - /* - * We still haven't the actual symbols, so guess the - * last map final address. - */ - curr->end = ~0ULL; - -out_unlock: - pthread_rwlock_unlock(&maps->lock); + /* Last entry */ + if (curr->end == curr->start) + curr->end = roundup(curr->start, 4096) + 4096; } -struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) +struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) { size_t namelen = strlen(name) + 1; struct symbol *sym = calloc(1, (symbol_conf.priv_size + @@ -256,13 +325,14 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) if (symbol_conf.priv_size) { if (symbol_conf.init_annotation) { struct annotation *notes = (void *)sym; - pthread_mutex_init(¬es->lock, NULL); + annotation__init(notes); } sym = ((void *)sym) + symbol_conf.priv_size; } sym->start = start; sym->end = len ? start + len : start; + sym->type = type; sym->binding = binding; sym->namelen = namelen - 1; @@ -275,28 +345,37 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) void symbol__delete(struct symbol *sym) { + if (symbol_conf.priv_size) { + if (symbol_conf.init_annotation) { + struct annotation *notes = symbol__annotation(sym); + + annotation__exit(notes); + } + } free(((void *)sym) - symbol_conf.priv_size); } -void symbols__delete(struct rb_root *symbols) +void symbols__delete(struct rb_root_cached *symbols) { struct symbol *pos; - struct rb_node *next = rb_first(symbols); + struct rb_node *next = rb_first_cached(symbols); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, symbols); + rb_erase_cached(&pos->rb_node, symbols); symbol__delete(pos); } } -void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel) +void __symbols__insert(struct rb_root_cached *symbols, + struct symbol *sym, bool kernel) { - struct rb_node **p = &symbols->rb_node; + struct rb_node **p = &symbols->rb_root.rb_node; struct rb_node *parent = NULL; const u64 ip = sym->start; struct symbol *s; + bool leftmost = true; if (kernel) { const char *name = sym->name; @@ -314,26 +393,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel) s = rb_entry(parent, struct symbol, rb_node); if (ip < s->start) p = &(*p)->rb_left; - else + else { p = &(*p)->rb_right; + leftmost = false; + } } rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, symbols); + rb_insert_color_cached(&sym->rb_node, symbols, leftmost); } -void symbols__insert(struct rb_root *symbols, struct symbol *sym) +void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) { __symbols__insert(symbols, sym, false); } -static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) +static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) { struct rb_node *n; if (symbols == NULL) return NULL; - n = symbols->rb_node; + n = symbols->rb_root.rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -349,9 +430,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) return NULL; } -static struct symbol *symbols__first(struct rb_root *symbols) +static struct symbol *symbols__first(struct rb_root_cached *symbols) { - struct rb_node *n = rb_first(symbols); + struct rb_node *n = rb_first_cached(symbols); if (n) return rb_entry(n, struct symbol, rb_node); @@ -359,9 +440,9 @@ static struct symbol *symbols__first(struct rb_root *symbols) return NULL; } -static struct symbol *symbols__last(struct rb_root *symbols) +static struct symbol *symbols__last(struct rb_root_cached *symbols) { - struct rb_node *n = rb_last(symbols); + struct rb_node *n = rb_last(&symbols->rb_root); if (n) return rb_entry(n, struct symbol, rb_node); @@ -379,35 +460,35 @@ static struct symbol *symbols__next(struct symbol *sym) return NULL; } -static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) +static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs) { - struct rb_node **p = &symbols->rb_node; - struct rb_node *parent = NULL; - struct symbol_name_rb_node *symn, *s; - - symn = container_of(sym, struct symbol_name_rb_node, sym); + const struct symbol *lhs = *((const struct symbol **)vlhs); + const struct symbol *rhs = *((const struct symbol **)vrhs); - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct symbol_name_rb_node, rb_node); - if (strcmp(sym->name, s->sym.name) < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&symn->rb_node, parent, p); - rb_insert_color(&symn->rb_node, symbols); + return strcmp(lhs->name, rhs->name); } -static void symbols__sort_by_name(struct rb_root *symbols, - struct rb_root *source) +static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len) { struct rb_node *nd; + struct symbol **result; + size_t i = 0, size = 0; - for (nd = rb_first(source); nd; nd = rb_next(nd)) { + for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) + size++; + + result = malloc(sizeof(*result) * size); + if (!result) + return NULL; + + for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - symbols__insert_by_name(symbols, pos); + + result[i++] = pos; } + qsort(result, size, sizeof(*result), symbols__sort_name_cmp); + *len = size; + return result; } int symbol__match_symbol_name(const char *name, const char *str, @@ -427,91 +508,103 @@ int symbol__match_symbol_name(const char *name, const char *str, return arch__compare_symbol_names(name, str); } -static struct symbol *symbols__find_by_name(struct rb_root *symbols, +static struct symbol *symbols__find_by_name(struct symbol *symbols[], + size_t symbols_len, const char *name, - enum symbol_tag_include includes) + enum symbol_tag_include includes, + size_t *found_idx) { - struct rb_node *n; - struct symbol_name_rb_node *s = NULL; + size_t i, lower = 0, upper = symbols_len; + struct symbol *s = NULL; - if (symbols == NULL) - return NULL; + if (found_idx) + *found_idx = SIZE_MAX; - n = symbols->rb_node; + if (!symbols_len) + return NULL; - while (n) { + while (lower < upper) { int cmp; - s = rb_entry(n, struct symbol_name_rb_node, rb_node); - cmp = symbol__match_symbol_name(s->sym.name, name, includes); + i = (lower + upper) / 2; + cmp = symbol__match_symbol_name(symbols[i]->name, name, includes); if (cmp > 0) - n = n->rb_left; + upper = i; else if (cmp < 0) - n = n->rb_right; - else + lower = i + 1; + else { + if (found_idx) + *found_idx = i; + s = symbols[i]; break; + } } - - if (n == NULL) - return NULL; - - if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) + if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) { /* return first symbol that has same name (if any) */ - for (n = rb_prev(n); n; n = rb_prev(n)) { - struct symbol_name_rb_node *tmp; - - tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); - if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) + for (; i > 0; i--) { + struct symbol *tmp = symbols[i - 1]; + + if (!arch__compare_symbol_names(tmp->name, s->name)) { + if (found_idx) + *found_idx = i - 1; + s = tmp; + } else break; - - s = tmp; } - - return &s->sym; + } + assert(!found_idx || !s || s == symbols[*found_idx]); + return s; } void dso__reset_find_symbol_cache(struct dso *dso) { - enum map_type type; - - for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) { - dso->last_find_result[type].addr = 0; - dso->last_find_result[type].symbol = NULL; - } + dso__set_last_find_result_addr(dso, 0); + dso__set_last_find_result_symbol(dso, NULL); } -void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym) +void dso__insert_symbol(struct dso *dso, struct symbol *sym) { - __symbols__insert(&dso->symbols[type], sym, dso->kernel); + __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso)); /* update the symbol cache if necessary */ - if (dso->last_find_result[type].addr >= sym->start && - (dso->last_find_result[type].addr < sym->end || + if (dso__last_find_result_addr(dso) >= sym->start && + (dso__last_find_result_addr(dso) < sym->end || sym->start == sym->end)) { - dso->last_find_result[type].symbol = sym; + dso__set_last_find_result_symbol(dso, sym); } } -struct symbol *dso__find_symbol(struct dso *dso, - enum map_type type, u64 addr) +void dso__delete_symbol(struct dso *dso, struct symbol *sym) { - if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) { - dso->last_find_result[type].addr = addr; - dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr); + rb_erase_cached(&sym->rb_node, dso__symbols(dso)); + symbol__delete(sym); + dso__reset_find_symbol_cache(dso); +} + +struct symbol *dso__find_symbol(struct dso *dso, u64 addr) +{ + if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) { + dso__set_last_find_result_addr(dso, addr); + dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr)); } - return dso->last_find_result[type].symbol; + return dso__last_find_result_symbol(dso); +} + +struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr) +{ + return symbols__find(dso__symbols(dso), addr); } -struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) +struct symbol *dso__first_symbol(struct dso *dso) { - return symbols__first(&dso->symbols[type]); + return symbols__first(dso__symbols(dso)); } -struct symbol *dso__last_symbol(struct dso *dso, enum map_type type) +struct symbol *dso__last_symbol(struct dso *dso) { - return symbols__last(&dso->symbols[type]); + return symbols__last(dso__symbols(dso)); } struct symbol *dso__next_symbol(struct symbol *sym) @@ -519,38 +612,62 @@ struct symbol *dso__next_symbol(struct symbol *sym) return symbols__next(sym); } -struct symbol *symbol__next_by_name(struct symbol *sym) +struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx) { - struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); - struct rb_node *n = rb_next(&s->rb_node); + if (*idx + 1 >= dso__symbol_names_len(dso)) + return NULL; - return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; + ++*idx; + return dso__symbol_names(dso)[*idx]; } /* - * Teturns first symbol that matched with @name. + * Returns first symbol that matched with @name. */ -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, - const char *name) -{ - struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name, - SYMBOL_TAG_INCLUDE__NONE); - if (!s) - s = symbols__find_by_name(&dso->symbol_names[type], name, - SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); +struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx) +{ + struct symbol *s = symbols__find_by_name(dso__symbol_names(dso), + dso__symbol_names_len(dso), + name, SYMBOL_TAG_INCLUDE__NONE, idx); + if (!s) { + s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso), + name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx); + } return s; } -void dso__sort_by_name(struct dso *dso, enum map_type type) +void dso__sort_by_name(struct dso *dso) +{ + mutex_lock(dso__lock(dso)); + if (!dso__sorted_by_name(dso)) { + size_t len = 0; + + dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len)); + if (dso__symbol_names(dso)) { + dso__set_symbol_names_len(dso, len); + dso__set_sorted_by_name(dso); + } + } + mutex_unlock(dso__lock(dso)); +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +static int hex2u64(const char *ptr, u64 *long_val) { - dso__set_sorted_by_name(dso, type); - return symbols__sort_by_name(&dso->symbol_names[type], - &dso->symbols[type]); + char *p; + + *long_val = strtoull(ptr, &p, 16); + + return p - ptr; } + int modules__parse(const char *filename, void *arg, int (*process_module)(void *arg, const char *name, - u64 start)) + u64 start, u64 size)) { char *line = NULL; size_t n; @@ -563,8 +680,8 @@ int modules__parse(const char *filename, void *arg, while (1) { char name[PATH_MAX]; - u64 start; - char *sep; + u64 start, size; + char *sep, *endptr; ssize_t line_len; line_len = getline(&line, &n, file); @@ -596,7 +713,11 @@ int modules__parse(const char *filename, void *arg, scnprintf(name, sizeof(name), "[%s]", line); - err = process_module(arg, name, start); + size = strtoul(sep + 1, &endptr, 0); + if (*endptr != ' ' && *endptr != '\t') + continue; + + err = process_module(arg, name, start, size); if (err) break; } @@ -606,11 +727,6 @@ out: return err; } -struct process_kallsyms_args { - struct map *map; - struct dso *dso; -}; - /* * These are symbols in the kernel image, so make sure that * sym is from a kernel DSO. @@ -618,38 +734,54 @@ struct process_kallsyms_args { static bool symbol__is_idle(const char *name) { const char * const idle_symbols[] = { + "acpi_idle_do_entry", + "acpi_processor_ffh_cstate_enter", + "arch_cpu_idle", "cpu_idle", "cpu_startup_entry", + "idle_cpu", "intel_idle", + "intel_idle_ibrs", "default_idle", "native_safe_halt", "enter_idle", "exit_idle", "mwait_idle", "mwait_idle_with_hints", + "mwait_idle_with_hints.constprop.0", "poll_idle", "ppc64_runlatch_off", "pseries_dedicated_idle_sleep", + "psw_idle", + "psw_idle_exit", NULL }; int i; + static struct strlist *idle_symbols_list; - for (i = 0; idle_symbols[i]; i++) { - if (!strcmp(idle_symbols[i], name)) - return true; - } + if (idle_symbols_list) + return strlist__has_entry(idle_symbols_list, name); - return false; + idle_symbols_list = strlist__new(NULL, NULL); + + for (i = 0; idle_symbols[i]; i++) + strlist__add(idle_symbols_list, idle_symbols[i]); + + return strlist__has_entry(idle_symbols_list, name); } static int map__process_kallsym_symbol(void *arg, const char *name, char type, u64 start) { struct symbol *sym; - struct process_kallsyms_args *a = arg; - struct rb_root *root = &a->dso->symbols[a->map->type]; + struct dso *dso = arg; + struct rb_root_cached *root = dso__symbols(dso); - if (!symbol_type__is_a(type, a->map->type)) + if (!symbol_type__filter(type)) + return 0; + + /* Ignore local symbols for ARM modules */ + if (name[0] == '$') return 0; /* @@ -657,7 +789,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * symbols, setting length to 0, and rely on * symbols__fixup_end() to fix it up. */ - sym = symbol__new(start, 0, kallsyms2elf_binding(type), name); + sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); if (sym == NULL) return -ENOMEM; /* @@ -674,56 +806,57 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *dso, const char *filename, - struct map *map) +static int dso__load_all_kallsyms(struct dso *dso, const char *filename) { - struct process_kallsyms_args args = { .map = map, .dso = dso, }; - return kallsyms__parse(filename, &args, map__process_kallsym_symbol); + return kallsyms__parse(filename, dso, map__process_kallsym_symbol); } -static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) +static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) { - struct map_groups *kmaps = map__kmaps(map); - struct map *curr_map; struct symbol *pos; int count = 0; - struct rb_root old_root = dso->symbols[map->type]; - struct rb_root *root = &dso->symbols[map->type]; - struct rb_node *next = rb_first(root); + struct rb_root_cached *root = dso__symbols(dso); + struct rb_root_cached old_root = *root; + struct rb_node *next = rb_first_cached(root); if (!kmaps) return -1; - *root = RB_ROOT; + *root = RB_ROOT_CACHED; while (next) { + struct map *curr_map; + struct dso *curr_map_dso; char *module; pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase_init(&pos->rb_node, &old_root); - + rb_erase_cached(&pos->rb_node, &old_root); + RB_CLEAR_NODE(&pos->rb_node); module = strchr(pos->name, '\t'); if (module) *module = '\0'; - curr_map = map_groups__find(kmaps, map->type, pos->start); + curr_map = maps__find(kmaps, pos->start); if (!curr_map) { symbol__delete(pos); continue; } - - pos->start -= curr_map->start - curr_map->pgoff; + curr_map_dso = map__dso(curr_map); + pos->start -= map__start(curr_map) - map__pgoff(curr_map); + if (pos->end > map__end(curr_map)) + pos->end = map__end(curr_map); if (pos->end) - pos->end -= curr_map->start - curr_map->pgoff; - symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + pos->end -= map__start(curr_map) - map__pgoff(curr_map); + symbols__insert(dso__symbols(curr_map_dso), pos); ++count; + map__put(curr_map); } /* Symbols have been adjusted */ - dso->adjust_symbols = 1; + dso__set_adjust_symbols(dso, true); return count; } @@ -733,21 +866,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) +static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, + struct map *initial_map) { - struct map_groups *kmaps = map__kmaps(map); struct machine *machine; - struct map *curr_map = map; + struct map *curr_map = map__get(initial_map); struct symbol *pos; int count = 0, moved = 0; - struct rb_root *root = &dso->symbols[map->type]; - struct rb_node *next = rb_first(root); + struct rb_root_cached *root = dso__symbols(dso); + struct rb_node *next = rb_first_cached(root); int kernel_range = 0; + bool x86_64; if (!kmaps) return -1; - machine = kmaps->machine; + machine = maps__machine(kmaps); + + x86_64 = machine__is(machine, "x86_64"); while (next) { char *module; @@ -757,14 +893,16 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) module = strchr(pos->name, '\t'); if (module) { + struct dso *curr_map_dso; + if (!symbol_conf.use_modules) goto discard_symbol; *module++ = '\0'; - - if (strcmp(curr_map->dso->short_name, module)) { - if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && + curr_map_dso = map__dso(curr_map); + if (strcmp(dso__short_name(curr_map_dso), module)) { + if (!RC_CHK_EQUAL(curr_map, initial_map) && + dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && machine__is_default_guest(machine)) { /* * We assume all symbols of a module are @@ -773,32 +911,41 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) * symbols are in its kmap. Mark it as * loaded. */ - dso__set_loaded(curr_map->dso, - curr_map->type); + dso__set_loaded(curr_map_dso); } - curr_map = map_groups__find_by_name(kmaps, - map->type, module); + map__zput(curr_map); + curr_map = maps__find_by_name(kmaps, module); if (curr_map == NULL) { pr_debug("%s/proc/{kallsyms,modules} " "inconsistency while looking " "for \"%s\" module!\n", machine->root_dir, module); - curr_map = map; + curr_map = map__get(initial_map); goto discard_symbol; } - - if (curr_map->dso->loaded && + curr_map_dso = map__dso(curr_map); + if (dso__loaded(curr_map_dso) && !machine__is_default_guest(machine)) goto discard_symbol; } /* * So that we look just like we get from .ko files, - * i.e. not prelinked, relative to map->start. + * i.e. not prelinked, relative to initial_map->start. + */ + pos->start = map__map_ip(curr_map, pos->start); + pos->end = map__map_ip(curr_map, pos->end); + } else if (x86_64 && is_entry_trampoline(pos->name)) { + /* + * These symbols are not needed anymore since the + * trampoline maps refer to the text section and it's + * symbols instead. Avoid having to deal with + * relocations, and the assumption that the first symbol + * is the start of kernel text, by simply removing the + * symbols at this point. */ - pos->start = curr_map->map_ip(curr_map, pos->start); - pos->end = curr_map->map_ip(curr_map, pos->end); - } else if (curr_map != map) { + goto discard_symbol; + } else if (!RC_CHK_EQUAL(curr_map, initial_map)) { char dso_name[PATH_MAX]; struct dso *ndso; @@ -808,34 +955,43 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) pos->end -= delta; } - if (count == 0) { - curr_map = map; + if (map__start(initial_map) <= (pos->start + delta) && + (pos->start + delta) < map__end(initial_map)) { + map__zput(curr_map); + curr_map = map__get(initial_map); goto add_symbol; } - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) snprintf(dso_name, sizeof(dso_name), "[guest.kernel].%d", - kernel_range++); + kernel_range); else snprintf(dso_name, sizeof(dso_name), "[kernel].%d", - kernel_range++); + kernel_range); ndso = dso__new(dso_name); + map__zput(curr_map); if (ndso == NULL) return -1; - ndso->kernel = dso->kernel; + dso__set_kernel(ndso, dso__kernel(dso)); + dso__set_loaded(ndso); - curr_map = map__new2(pos->start, ndso, map->type); + curr_map = map__new2(pos->start, ndso); if (curr_map == NULL) { dso__put(ndso); return -1; } - curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; - map_groups__insert(kmaps, curr_map); + map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); + if (maps__insert(kmaps, curr_map)) { + map__zput(curr_map); + dso__put(ndso); + return -1; + } + dso__put(ndso); ++kernel_range; } else if (delta) { /* Kernel was relocated at boot time */ @@ -843,25 +999,27 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) pos->end -= delta; } add_symbol: - if (curr_map != map) { - rb_erase(&pos->rb_node, root); - symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + if (!RC_CHK_EQUAL(curr_map, initial_map)) { + struct dso *curr_map_dso = map__dso(curr_map); + + rb_erase_cached(&pos->rb_node, root); + symbols__insert(dso__symbols(curr_map_dso), pos); ++moved; } else ++count; continue; discard_symbol: - rb_erase(&pos->rb_node, root); + rb_erase_cached(&pos->rb_node, root); symbol__delete(pos); } - if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && - machine__is_default_guest(kmaps->machine)) { - dso__set_loaded(curr_map->dso, curr_map->type); + if (!RC_CHK_EQUAL(curr_map, initial_map) && + dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && + machine__is_default_guest(maps__machine(kmaps))) { + dso__set_loaded(map__dso(curr_map)); } - + map__put(curr_map); return count + moved; } @@ -943,7 +1101,8 @@ static struct module_info *find_module(const char *name, return NULL; } -static int __read_proc_modules(void *arg, const char *name, u64 start) +static int __read_proc_modules(void *arg, const char *name, u64 start, + u64 size __maybe_unused) { struct rb_root *modules = arg; struct module_info *mi; @@ -1019,38 +1178,35 @@ out_delete_from: return ret; } -static int do_validate_kcore_modules(const char *filename, struct map *map, - struct map_groups *kmaps) +static int do_validate_kcore_modules_cb(struct map *old_map, void *data) +{ + struct rb_root *modules = data; + struct module_info *mi; + struct dso *dso; + + if (!__map__is_kmodule(old_map)) + return 0; + + dso = map__dso(old_map); + /* Module must be in memory at the same address */ + mi = find_module(dso__short_name(dso), modules); + if (!mi || mi->start != map__start(old_map)) + return -EINVAL; + + return 0; +} + +static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) { struct rb_root modules = RB_ROOT; - struct map *old_map; int err; err = read_proc_modules(filename, &modules); if (err) return err; - old_map = map_groups__first(kmaps, map->type); - while (old_map) { - struct map *next = map_groups__next(old_map); - struct module_info *mi; + err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules); - if (old_map == map || old_map->start == map->start) { - /* The kernel map */ - old_map = next; - continue; - } - - /* Module must be in memory at the same address */ - mi = find_module(old_map->dso->short_name, &modules); - if (!mi || mi->start != old_map->start) { - err = -EINVAL; - goto out; - } - - old_map = next; - } -out: delete_modules(&modules); return err; } @@ -1083,7 +1239,7 @@ static bool filename_from_kallsyms_filename(char *filename, static int validate_kcore_modules(const char *kallsyms_filename, struct map *map) { - struct map_groups *kmaps = map__kmaps(map); + struct maps *kmaps = map__kmaps(map); char modules_filename[PATH_MAX]; if (!kmaps) @@ -1093,7 +1249,7 @@ static int validate_kcore_modules(const char *kallsyms_filename, kallsyms_filename)) return -EINVAL; - if (do_validate_kcore_modules(modules_filename, map, kmaps)) + if (do_validate_kcore_modules(modules_filename, kmaps)) return -EINVAL; return 0; @@ -1122,46 +1278,61 @@ static int validate_kcore_addresses(const char *kallsyms_filename, struct kcore_mapfn_data { struct dso *dso; - enum map_type type; struct list_head maps; }; static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) { struct kcore_mapfn_data *md = data; - struct map *map; + struct map_list_node *list_node = map_list_node__new(); + + if (!list_node) + return -ENOMEM; - map = map__new2(start, md->dso, md->type); - if (map == NULL) + list_node->map = map__new2(start, md->dso); + if (!list_node->map) { + free(list_node); return -ENOMEM; + } - map->end = map->start + len; - map->pgoff = pgoff; + map__set_end(list_node->map, map__start(list_node->map) + len); + map__set_pgoff(list_node->map, pgoff); - list_add(&map->node, &md->maps); + list_add(&list_node->node, &md->maps); return 0; } +static bool remove_old_maps(struct map *map, void *data) +{ + const struct map *map_to_save = data; + + /* + * We need to preserve eBPF maps even if they are covered by kcore, + * because we need to access eBPF dso for source data. + */ + return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map); +} + static int dso__load_kcore(struct dso *dso, struct map *map, const char *kallsyms_filename) { - struct map_groups *kmaps = map__kmaps(map); - struct machine *machine; + struct maps *kmaps = map__kmaps(map); struct kcore_mapfn_data md; - struct map *old_map, *new_map, *replacement_map = NULL; + struct map *map_ref, *replacement_map = NULL; + struct machine *machine; bool is_64_bit; int err, fd; char kcore_filename[PATH_MAX]; - struct symbol *sym; + u64 stext; if (!kmaps) return -EINVAL; - machine = kmaps->machine; + machine = maps__machine(kmaps); /* This function requires that the map is the kernel map */ - if (map != machine->vmlinux_maps[map->type]) + if (!__map__is_kernel(map)) return -EINVAL; if (!filename_from_kallsyms_filename(kcore_filename, "kcore", @@ -1173,7 +1344,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map, return -EINVAL; md.dso = dso; - md.type = map->type; INIT_LIST_HEAD(&md.maps); fd = open(kcore_filename, O_RDONLY); @@ -1184,11 +1354,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map, } /* Read new maps into temporary lists */ - err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, + err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md, &is_64_bit); if (err) goto out_err; - dso->is_64_bit = is_64_bit; + dso__set_is_64_bit(dso, is_64_bit); if (list_empty(&md.maps)) { err = -EINVAL; @@ -1196,63 +1366,104 @@ static int dso__load_kcore(struct dso *dso, struct map *map, } /* Remove old maps */ - old_map = map_groups__first(kmaps, map->type); - while (old_map) { - struct map *next = map_groups__next(old_map); + maps__remove_maps(kmaps, remove_old_maps, map); + machine->trampolines_mapped = false; - if (old_map != map) - map_groups__remove(kmaps, old_map); - old_map = next; - } + /* Find the kernel map using the '_stext' symbol */ + if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { + u64 replacement_size = 0; + struct map_list_node *new_node; - /* Find the kernel map using the first symbol */ - sym = dso__first_symbol(dso, map->type); - list_for_each_entry(new_map, &md.maps, node) { - if (sym && sym->start >= new_map->start && - sym->start < new_map->end) { - replacement_map = new_map; - break; + list_for_each_entry(new_node, &md.maps, node) { + struct map *new_map = new_node->map; + u64 new_size = map__size(new_map); + + if (!(stext >= map__start(new_map) && stext < map__end(new_map))) + continue; + + /* + * On some architectures, ARM64 for example, the kernel + * text can get allocated inside of the vmalloc segment. + * Select the smallest matching segment, in case stext + * falls within more than one in the list. + */ + if (!replacement_map || new_size < replacement_size) { + replacement_map = new_map; + replacement_size = new_size; + } } } if (!replacement_map) - replacement_map = list_entry(md.maps.next, struct map, node); + replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; + + /* + * Update addresses of vmlinux map. Re-insert it to ensure maps are + * correctly ordered. Do this before using maps__merge_in() for the + * remaining maps so vmlinux gets split if necessary. + */ + map_ref = map__get(map); + maps__remove(kmaps, map_ref); + + map__set_start(map_ref, map__start(replacement_map)); + map__set_end(map_ref, map__end(replacement_map)); + map__set_pgoff(map_ref, map__pgoff(replacement_map)); + map__set_mapping_type(map_ref, map__mapping_type(replacement_map)); + + err = maps__insert(kmaps, map_ref); + map__put(map_ref); + if (err) + goto out_err; /* Add new maps */ while (!list_empty(&md.maps)) { - new_map = list_entry(md.maps.next, struct map, node); - list_del_init(&new_map->node); - if (new_map == replacement_map) { - map->start = new_map->start; - map->end = new_map->end; - map->pgoff = new_map->pgoff; - map->map_ip = new_map->map_ip; - map->unmap_ip = new_map->unmap_ip; - /* Ensure maps are correctly ordered */ - map__get(map); - map_groups__remove(kmaps, map); - map_groups__insert(kmaps, map); - map__put(map); - } else { - map_groups__insert(kmaps, new_map); + struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); + struct map *new_map = new_node->map; + + list_del_init(&new_node->node); + + /* skip if replacement_map, already inserted above */ + if (!RC_CHK_EQUAL(new_map, replacement_map)) { + /* + * Merge kcore map into existing maps, + * and ensure that current maps (eBPF) + * stay intact. + */ + if (maps__merge_in(kmaps, new_map)) { + err = -EINVAL; + goto out_err; + } } + map__zput(new_node->map); + free(new_node); + } - map__put(new_map); + if (machine__is(machine, "x86_64")) { + u64 addr; + + /* + * If one of the corresponding symbols is there, assume the + * entry trampoline maps are too. + */ + if (!kallsyms__get_function_start(kallsyms_filename, + ENTRY_TRAMPOLINE_NAME, + &addr)) + machine->trampolines_mapped = true; } /* * Set the data type and long name so that kcore can be read via * dso__data_read_addr(). */ - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE); else - dso->binary_type = DSO_BINARY_TYPE__KCORE; + dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE); dso__set_long_name(dso, strdup(kcore_filename), true); close(fd); - if (map->type == MAP__FUNCTION) + if (map__prot(map) & PROT_EXEC) pr_debug("Using %s for kernel object code\n", kcore_filename); else pr_debug("Using %s for kernel data\n", kcore_filename); @@ -1261,26 +1472,25 @@ static int dso__load_kcore(struct dso *dso, struct map *map, out_err: while (!list_empty(&md.maps)) { - map = list_entry(md.maps.next, struct map, node); - list_del_init(&map->node); - map__put(map); + struct map_list_node *list_node; + + list_node = list_entry(md.maps.next, struct map_list_node, node); + list_del_init(&list_node->node); + map__zput(list_node->map); + free(list_node); } close(fd); - return -EINVAL; + return err; } /* * If the kernel is relocated at boot time, kallsyms won't match. Compute the * delta based on the relocation reference symbol. */ -static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) { - struct kmap *kmap = map__kmap(map); u64 addr; - if (!kmap) - return -1; - if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) return 0; @@ -1294,29 +1504,33 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, bool no_kcore) { + struct kmap *kmap = map__kmap(map); u64 delta = 0; if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; - if (dso__load_all_kallsyms(dso, filename, map) < 0) + if (!kmap || !kmap->kmaps) return -1; - if (kallsyms__delta(map, filename, &delta)) + if (dso__load_all_kallsyms(dso, filename) < 0) return -1; - symbols__fixup_end(&dso->symbols[map->type]); - symbols__fixup_duplicate(&dso->symbols[map->type]); + if (kallsyms__delta(kmap, filename, &delta)) + return -1; - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; + symbols__fixup_end(dso__symbols(dso), true); + symbols__fixup_duplicate(dso__symbols(dso)); + + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); else - dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; + dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS); if (!no_kcore && !dso__load_kcore(dso, map, filename)) - return dso__split_kallsyms_for_kcore(dso, map); + return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); else - return dso__split_kallsyms(dso, map, delta); + return maps__split_kallsyms(kmap->kmaps, dso, delta, map); } int dso__load_kallsyms(struct dso *dso, const char *filename, @@ -1325,14 +1539,14 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, return __dso__load_kallsyms(dso, filename, map, false); } -static int dso__load_perf_map(struct dso *dso, struct map *map) +static int dso__load_perf_map(const char *map_path, struct dso *dso) { char *line = NULL; size_t n; FILE *file; int nr_syms = 0; - file = fopen(dso->long_name, "r"); + file = fopen(map_path, "r"); if (file == NULL) goto out_failure; @@ -1362,12 +1576,12 @@ static int dso__load_perf_map(struct dso *dso, struct map *map) if (len + 2 >= line_len) continue; - sym = symbol__new(start, size, STB_GLOBAL, line + len); + sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); if (sym == NULL) goto out_delete_line; - symbols__insert(&dso->symbols[map->type], sym); + symbols__insert(dso__symbols(dso), sym); nr_syms++; } @@ -1391,19 +1605,21 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: - return !kmod && dso->kernel == DSO_TYPE_USER; + case DSO_BINARY_TYPE__GNU_DEBUGDATA: + return !kmod && dso__kernel(dso) == DSO_SPACE__USER; case DSO_BINARY_TYPE__KALLSYMS: case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__KCORE: - return dso->kernel == DSO_TYPE_KERNEL; + return dso__kernel(dso) == DSO_SPACE__KERNEL; case DSO_BINARY_TYPE__GUEST_KALLSYMS: case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__GUEST_KCORE: - return dso->kernel == DSO_TYPE_GUEST_KERNEL; + return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST; case DSO_BINARY_TYPE__GUEST_KMODULE: case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: @@ -1411,71 +1627,117 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: /* * kernel modules know their symtab type - it's set when - * creating a module dso in machine__findnew_module_map(). + * creating a module dso in machine__addnew_module_map(). */ - return kmod && dso->symtab_type == type; + return kmod && dso__symtab_type(dso) == type; case DSO_BINARY_TYPE__BUILD_ID_CACHE: + case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: return true; + case DSO_BINARY_TYPE__BPF_PROG_INFO: + case DSO_BINARY_TYPE__BPF_IMAGE: + case DSO_BINARY_TYPE__OOL: case DSO_BINARY_TYPE__NOT_FOUND: default: return false; } } +/* Checks for the existence of the perf-<pid>.map file in two different + * locations. First, if the process is a separate mount namespace, check in + * that namespace using the pid of the innermost pid namespace. If's not in a + * namespace, or the file can't be found there, try in the mount namespace of + * the tracing process using our view of its pid. + */ +static int dso__find_perf_map(char *filebuf, size_t bufsz, + struct nsinfo **nsip) +{ + struct nscookie nsc; + struct nsinfo *nsi; + struct nsinfo *nnsi; + int rc = -1; + + nsi = *nsip; + + if (nsinfo__need_setns(nsi)) { + snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi)); + nsinfo__mountns_enter(nsi, &nsc); + rc = access(filebuf, R_OK); + nsinfo__mountns_exit(&nsc); + if (rc == 0) + return rc; + } + + nnsi = nsinfo__copy(nsi); + if (nnsi) { + nsinfo__put(nsi); + + nsinfo__clear_need_setns(nnsi); + snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi)); + *nsip = nnsi; + rc = 0; + } + + return rc; +} + int dso__load(struct dso *dso, struct map *map) { char *name; int ret = -1; u_int i; - struct machine *machine; + struct machine *machine = NULL; char *root_dir = (char *) ""; int ss_pos = 0; struct symsrc ss_[2]; struct symsrc *syms_ss = NULL, *runtime_ss = NULL; bool kmod; - unsigned char build_id[BUILD_ID_SIZE]; + bool perfmap; + struct nscookie nsc; + char newmapname[PATH_MAX]; + const char *map_path = dso__long_name(dso); + + mutex_lock(dso__lock(dso)); + perfmap = is_perf_pid_map_name(map_path); + + if (perfmap) { + if (dso__nsinfo(dso) && + (dso__find_perf_map(newmapname, sizeof(newmapname), + dso__nsinfo_ptr(dso)) == 0)) { + map_path = newmapname; + } + } - pthread_mutex_lock(&dso->lock); + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); /* check again under the dso->lock */ - if (dso__loaded(dso, map->type)) { + if (dso__loaded(dso)) { ret = 1; goto out; } - if (dso->kernel) { - if (dso->kernel == DSO_TYPE_KERNEL) + kmod = dso__is_kmod(dso); + + if (dso__kernel(dso) && !kmod) { + if (dso__kernel(dso) == DSO_SPACE__KERNEL) ret = dso__load_kernel_sym(dso, map); - else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) ret = dso__load_guest_kernel_sym(dso, map); + machine = maps__machine(map__kmaps(map)); + if (machine__is(machine, "x86_64")) + machine__map_x86_64_entry_trampolines(machine, dso); goto out; } - if (map->groups && map->groups->machine) - machine = map->groups->machine; - else - machine = NULL; - - dso->adjust_symbols = 0; - - if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { - struct stat st; - - if (lstat(dso->name, &st) < 0) - goto out; + dso__set_adjust_symbols(dso, false); - if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { - pr_warning("File %s not owned by current user or root, " - "ignoring it (use -f to override).\n", dso->name); - goto out; - } - - ret = dso__load_perf_map(dso, map); - dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : - DSO_BINARY_TYPE__NOT_FOUND; + if (perfmap) { + ret = dso__load_perf_map(map_path, dso); + dso__set_symtab_type(dso, ret > 0 + ? DSO_BINARY_TYPE__JAVA_JIT + : DSO_BINARY_TYPE__NOT_FOUND); goto out; } @@ -1486,21 +1748,16 @@ int dso__load(struct dso *dso, struct map *map) if (!name) goto out; - kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; - - /* * Read the build id if possible. This is required for - * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work + * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work. */ - if (!dso->has_build_id && - is_regular_file(dso->long_name)) { - __symbol__join_symfs(name, PATH_MAX, dso->long_name); - if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0) - dso__set_build_id(dso, build_id); + if (!dso__has_build_id(dso)) { + struct build_id bid = { .size = 0, }; + + __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso)); + if (filename__read_build_id(name, &bid) > 0) + dso__set_build_id(dso, &bid); } /* @@ -1511,9 +1768,16 @@ int dso__load(struct dso *dso, struct map *map) for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { struct symsrc *ss = &ss_[ss_pos]; bool next_slot = false; + bool is_reg; + bool nsexit; + int bfdrc = -1; + int sirc = -1; enum dso_binary_type symtab_type = binary_type_symtab[i]; + nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || + symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); + if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) continue; @@ -1521,18 +1785,42 @@ int dso__load(struct dso *dso, struct map *map) root_dir, name, PATH_MAX)) continue; - if (!is_regular_file(name)) - continue; + if (nsexit) + nsinfo__mountns_exit(&nsc); + + is_reg = is_regular_file(name); + if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) { + char *new_name = dso__filename_with_chroot(dso, name); + if (new_name) { + is_reg = is_regular_file(new_name); + strlcpy(name, new_name, PATH_MAX); + free(new_name); + } + } - /* Name is now the name of the next image to try */ - if (symsrc__init(ss, dso, name, symtab_type) < 0) +#ifdef HAVE_LIBBFD_SUPPORT + if (is_reg) + bfdrc = dso__load_bfd_symbols(dso, name); +#endif + if (is_reg && bfdrc < 0) + sirc = symsrc__init(ss, dso, name, symtab_type); + + if (nsexit) + nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); + + if (bfdrc == 0) { + ret = 0; + break; + } + + if (!is_reg || sirc < 0) continue; if (!syms_ss && symsrc__has_symtab(ss)) { syms_ss = ss; next_slot = true; - if (!dso->symsrc_filename) - dso->symsrc_filename = strdup(name); + if (!dso__symsrc_filename(dso)) + dso__set_symsrc_filename(dso, strdup(name)); } if (!runtime_ss && symsrc__possibly_runtime(ss)) { @@ -1543,6 +1831,9 @@ int dso__load(struct dso *dso, struct map *map) if (next_slot) { ss_pos++; + if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) + dso__set_binary_type(dso, symtab_type); + if (syms_ss && runtime_ss) break; } else { @@ -1570,7 +1861,7 @@ int dso__load(struct dso *dso, struct map *map) if (ret > 0) { int nr_plt; - nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map); + nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); if (nr_plt > 0) ret += nr_plt; } @@ -1579,35 +1870,20 @@ int dso__load(struct dso *dso, struct map *map) symsrc__destroy(&ss_[ss_pos - 1]); out_free: free(name); - if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) + if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL) ret = 0; out: - dso__set_loaded(dso, map->type); - pthread_mutex_unlock(&dso->lock); + dso__set_loaded(dso); + mutex_unlock(dso__lock(dso)); + nsinfo__mountns_exit(&nsc); return ret; } -struct map *map_groups__find_by_name(struct map_groups *mg, - enum map_type type, const char *name) -{ - struct maps *maps = &mg->maps[type]; - struct map *map; - - pthread_rwlock_rdlock(&maps->lock); - - for (map = maps__first(maps); map; map = map__next(map)) { - if (map->dso && strcmp(map->dso->short_name, name) == 0) - goto out_unlock; - } - - map = NULL; - -out_unlock: - pthread_rwlock_unlock(&maps->lock); - return map; -} - +/* + * Always takes ownership of vmlinux when vmlinux_allocated == true, even if + * it returns an error. + */ int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, bool vmlinux_allocated) { @@ -1621,24 +1897,32 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, else symbol__join_symfs(symfs_vmlinux, vmlinux); - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; else symtab_type = DSO_BINARY_TYPE__VMLINUX; - if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) + if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) { + if (vmlinux_allocated) + free((char *) vmlinux); return -1; + } + + /* + * dso__load_sym() may copy 'dso' which will result in the copies having + * an incorrect long name unless we set it here first. + */ + dso__set_long_name(dso, vmlinux, vmlinux_allocated); + if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX); + else + dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX); err = dso__load_sym(dso, map, &ss, &ss, 0); symsrc__destroy(&ss); if (err > 0) { - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; - else - dso->binary_type = DSO_BINARY_TYPE__VMLINUX; - dso__set_long_name(dso, vmlinux, vmlinux_allocated); - dso__set_loaded(dso, map->type); + dso__set_loaded(dso); pr_debug("Using %s for symbols\n", symfs_vmlinux); } @@ -1660,12 +1944,11 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map) } if (!symbol_conf.ignore_vmlinux_buildid) - filename = dso__build_id_filename(dso, NULL, 0); + filename = dso__build_id_filename(dso, NULL, 0, false); if (filename != NULL) { err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) goto out; - free(filename); } out: return err; @@ -1720,12 +2003,13 @@ static bool filename__readable(const char *file) static char *dso__find_kallsyms(struct dso *dso, struct map *map) { - u8 host_build_id[BUILD_ID_SIZE]; + struct build_id bid = { .size = 0, }; char sbuild_id[SBUILD_ID_SIZE]; bool is_host = false; char path[PATH_MAX]; + struct maps *kmaps = map__kmaps(map); - if (!dso->has_build_id) { + if (!dso__has_build_id(dso)) { /* * Last resort, if we don't have a build-id and couldn't find * any vmlinux file, try the running kernel kallsyms table. @@ -1733,9 +2017,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) goto proc_kallsyms; } - if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, - sizeof(host_build_id)) == 0) - is_host = dso__build_id_equal(dso, host_build_id); + if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0) + is_host = dso__build_id_equal(dso, &bid); /* Try a fast path for /proc/kallsyms if possible */ if (is_host) { @@ -1751,7 +2034,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) goto proc_kallsyms; } - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id)); /* Find kallsyms in build-id cache with kcore */ scnprintf(path, sizeof(path), "%s/%s/%s", @@ -1761,8 +2044,13 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) return strdup(path); /* Use current /proc/kallsyms if possible */ - if (is_host) { proc_kallsyms: + if (kmaps) { + struct machine *machine = maps__machine(kmaps); + + scnprintf(path, sizeof(path), "%s/proc/kallsyms", machine->root_dir); + return strdup(path); + } else if (is_host) { return strdup("/proc/kallsyms"); } @@ -1781,6 +2069,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map) int err; const char *kallsyms_filename = NULL; char *kallsyms_allocated_filename = NULL; + char *filename = NULL; + /* * Step 1: if the user specified a kallsyms or vmlinux filename, use * it and only it, reporting errors to the user if it cannot be used. @@ -1805,6 +2095,19 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map) return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); } + /* + * Before checking on common vmlinux locations, check if it's + * stored as standard build id binary (not kallsyms) under + * .debug cache. + */ + if (!symbol_conf.ignore_vmlinux_buildid) + filename = __dso__build_id_filename(dso, NULL, 0, false, false); + if (filename != NULL) { + err = dso__load_vmlinux(dso, map, filename, true); + if (err > 0) + return err; + } + if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { err = dso__load_vmlinux_path(dso, map); if (err > 0) @@ -1828,7 +2131,7 @@ do_kallsyms: free(kallsyms_allocated_filename); if (err > 0 && !dso__is_kcore(dso)) { - dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; + dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS); dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); map__fixup_start(map); map__fixup_end(map); @@ -1840,17 +2143,13 @@ do_kallsyms: static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) { int err; - const char *kallsyms_filename = NULL; - struct machine *machine; + const char *kallsyms_filename; + struct machine *machine = maps__machine(map__kmaps(map)); char path[PATH_MAX]; - if (!map->groups) { - pr_debug("Guest kernel map hasn't the point to groups\n"); - return -1; - } - machine = map->groups->machine; - - if (machine__is_default_guest(machine)) { + if (machine->kallsyms_filename) { + kallsyms_filename = machine->kallsyms_filename; + } else if (machine__is_default_guest(machine)) { /* * if the user specified a vmlinux filename, use it and only * it, reporting errors to the user if it cannot be used. @@ -1875,9 +2174,8 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); if (err > 0 && !dso__is_kcore(dso)) { - dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; - machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(dso, strdup(path), true); + dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); + dso__set_long_name(dso, machine->mmap_name, false); map__fixup_start(map); map__fixup_end(map); } @@ -1989,42 +2287,119 @@ int setup_intlist(struct intlist **list, const char *list_str, return 0; } +static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list) +{ + struct str_node *pos, *tmp; + unsigned long val; + char *sep; + const char *end; + int i = 0, err; + + *addr_list = intlist__new(NULL); + if (!*addr_list) + return -1; + + strlist__for_each_entry_safe(pos, tmp, sym_list) { + errno = 0; + val = strtoul(pos->s, &sep, 16); + if (errno || (sep == pos->s)) + continue; + + if (*sep != '\0') { + end = pos->s + strlen(pos->s) - 1; + while (end >= sep && isspace(*end)) + end--; + + if (end >= sep) + continue; + } + + err = intlist__add(*addr_list, val); + if (err) + break; + + strlist__remove(sym_list, pos); + i++; + } + + if (i == 0) { + intlist__delete(*addr_list); + *addr_list = NULL; + } + + return 0; +} + static bool symbol__read_kptr_restrict(void) { bool value = false; FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); + bool used_root; + bool cap_syslog = perf_cap__capable(CAP_SYSLOG, &used_root); if (fp != NULL) { char line[8]; if (fgets(line, sizeof(line), fp) != NULL) - value = ((geteuid() != 0) || (getuid() != 0)) ? - (atoi(line) != 0) : - (atoi(line) == 2); + value = cap_syslog ? (atoi(line) >= 2) : (atoi(line) != 0); fclose(fp); } + /* Per kernel/kallsyms.c: + * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG + */ + if (perf_event_paranoid() > 1 && !cap_syslog) + value = true; + return value; } int symbol__annotation_init(void) { + if (symbol_conf.init_annotation) + return 0; + if (symbol_conf.initialized) { pr_err("Annotation needs to be init before symbol__init()\n"); return -1; } - if (symbol_conf.init_annotation) { - pr_warning("Annotation being initialized multiple times\n"); - return 0; - } - symbol_conf.priv_size += sizeof(struct annotation); symbol_conf.init_annotation = true; return 0; } +static int setup_parallelism_bitmap(void) +{ + struct perf_cpu_map *map; + struct perf_cpu cpu; + int i, err = -1; + + if (symbol_conf.parallelism_list_str == NULL) + return 0; + + map = perf_cpu_map__new(symbol_conf.parallelism_list_str); + if (map == NULL) { + pr_err("failed to parse parallelism filter list\n"); + return -1; + } + + bitmap_fill(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1); + perf_cpu_map__for_each_cpu(cpu, i, map) { + if (cpu.cpu <= 0 || cpu.cpu > MAX_NR_CPUS) { + pr_err("Requested parallelism level %d is invalid.\n", cpu.cpu); + goto out_delete_map; + } + __clear_bit(cpu.cpu, symbol_conf.parallelism_filter); + } + + err = 0; +out_delete_map: + perf_cpu_map__put(map); + return err; +} + int symbol__init(struct perf_env *env) { const char *symfs; @@ -2036,10 +2411,6 @@ int symbol__init(struct perf_env *env) symbol__elf_init(); - if (symbol_conf.sort_by_name) - symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - - sizeof(struct symbol)); - if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) return -1; @@ -2048,6 +2419,9 @@ int symbol__init(struct perf_env *env) return -1; } + if (setup_parallelism_bitmap()) + return -1; + if (setup_list(&symbol_conf.dso_list, symbol_conf.dso_list_str, "dso") < 0) return -1; @@ -2068,6 +2442,10 @@ int symbol__init(struct perf_env *env) symbol_conf.sym_list_str, "symbol") < 0) goto out_free_tid_list; + if (symbol_conf.sym_list && + setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0) + goto out_free_sym_list; + if (setup_list(&symbol_conf.bt_stop_list, symbol_conf.bt_stop_list_str, "symbol") < 0) goto out_free_sym_list; @@ -2091,6 +2469,7 @@ int symbol__init(struct perf_env *env) out_free_sym_list: strlist__delete(symbol_conf.sym_list); + intlist__delete(symbol_conf.addr_list); out_free_tid_list: intlist__delete(symbol_conf.tid_list); out_free_pid_list: @@ -2112,6 +2491,7 @@ void symbol__exit(void) strlist__delete(symbol_conf.comm_list); intlist__delete(symbol_conf.tid_list); intlist__delete(symbol_conf.pid_list); + intlist__delete(symbol_conf.addr_list); vmlinux_path__exit(); symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; symbol_conf.bt_stop_list = NULL; @@ -2140,3 +2520,101 @@ int symbol__config_symfs(const struct option *opt __maybe_unused, free(bf); return 0; } + +/* + * Checks that user supplied symbol kernel files are accessible because + * the default mechanism for accessing elf files fails silently. i.e. if + * debug syms for a build ID aren't found perf carries on normally. When + * they are user supplied we should assume that the user doesn't want to + * silently fail. + */ +int symbol__validate_sym_arguments(void) +{ + if (symbol_conf.vmlinux_name && + access(symbol_conf.vmlinux_name, R_OK)) { + pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); + return -EINVAL; + } + if (symbol_conf.kallsyms_name && + access(symbol_conf.kallsyms_name, R_OK)) { + pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); + return -EINVAL; + } + return 0; +} + +static bool want_demangle(bool is_kernel_sym) +{ + return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; +} + +/* + * Demangle C++ function signature, typically replaced by demangle-cxx.cpp + * version. + */ +#ifndef HAVE_CXA_DEMANGLE_SUPPORT +char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused, + bool modifiers __maybe_unused) +{ +#ifdef HAVE_LIBBFD_SUPPORT + int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0); + + return bfd_demangle(NULL, str, flags); +#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT) + int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0); + + return cplus_demangle(str, flags); +#else + return NULL; +#endif +} +#endif /* !HAVE_CXA_DEMANGLE_SUPPORT */ + +char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name) +{ + struct demangle rust_demangle = { + .style = DemangleStyleUnknown, + }; + char *demangled = NULL; + + /* + * We need to figure out if the object was created from C++ sources + * DWARF DW_compile_unit has this, but we don't always have access + * to it... + */ + if (!want_demangle((dso && dso__kernel(dso)) || kmodule)) + return demangled; + + rust_demangle_demangle(elf_name, &rust_demangle); + if (rust_demangle_is_known(&rust_demangle)) { + /* A rust mangled name. */ + if (rust_demangle.mangled_len == 0) + return demangled; + + for (size_t buf_len = roundup_pow_of_two(rust_demangle.mangled_len * 2); + buf_len < 1024 * 1024; buf_len += 32) { + char *tmp = realloc(demangled, buf_len); + + if (!tmp) { + /* Failure to grow output buffer, return what is there. */ + return demangled; + } + demangled = tmp; + if (rust_demangle_display_demangle(&rust_demangle, demangled, buf_len, + /*alternate=*/true) == OverflowOk) + return demangled; + } + /* Buffer exceeded sensible bounds, return what is there. */ + return demangled; + } + + demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0); + if (demangled) + return demangled; + + demangled = ocaml_demangle_sym(elf_name); + if (demangled) + return demangled; + + return java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET); +} |
