diff options
Diffstat (limited to 'tools/perf/util/machine.c')
-rw-r--r-- | tools/perf/util/machine.c | 353 |
1 files changed, 171 insertions, 182 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 527517db3182..2d51badfbf2e 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -16,6 +16,7 @@ #include "map_symbol.h" #include "branch.h" #include "mem-events.h" +#include "mem-info.h" #include "path.h" #include "srcline.h" #include "symbol.h" @@ -48,13 +49,6 @@ static struct dso *machine__kernel_dso(struct machine *machine) return map__dso(machine->vmlinux_map); } -static void dsos__init(struct dsos *dsos) -{ - INIT_LIST_HEAD(&dsos->head); - dsos->root = RB_ROOT; - init_rwsem(&dsos->lock); -} - static int machine__set_mmap_name(struct machine *machine) { if (machine__is_host(machine)) @@ -140,6 +134,8 @@ struct machine *machine__new_host(void) if (machine__create_kernel_maps(machine) < 0) goto out_delete; + + machine->env = &perf_env; } return machine; @@ -165,28 +161,6 @@ struct machine *machine__new_kallsyms(void) return machine; } -static void dsos__purge(struct dsos *dsos) -{ - struct dso *pos, *n; - - down_write(&dsos->lock); - - list_for_each_entry_safe(pos, n, &dsos->head, node) { - RB_CLEAR_NODE(&pos->rb_node); - pos->root = NULL; - list_del_init(&pos->node); - dso__put(pos); - } - - up_write(&dsos->lock); -} - -static void dsos__exit(struct dsos *dsos) -{ - dsos__purge(dsos); - exit_rwsem(&dsos->lock); -} - void machine__delete_threads(struct machine *machine) { threads__remove_all_threads(&machine->threads); @@ -670,36 +644,12 @@ int machine__process_lost_event(struct machine *machine __maybe_unused, int machine__process_lost_samples_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample) { - dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n", - sample->id, event->lost_samples.lost); + dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n", + sample->id, event->lost_samples.lost, + event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : ""); return 0; } -static struct dso *machine__findnew_module_dso(struct machine *machine, - struct kmod_path *m, - const char *filename) -{ - struct dso *dso; - - down_write(&machine->dsos.lock); - - dso = __dsos__find(&machine->dsos, m->name, true); - if (!dso) { - dso = __dsos__addnew(&machine->dsos, m->name); - if (dso == NULL) - goto out_unlock; - - dso__set_module_info(dso, m, machine); - dso__set_long_name(dso, strdup(filename), true); - dso->kernel = DSO_SPACE__KERNEL; - } - - dso__get(dso); -out_unlock: - up_write(&machine->dsos.lock); - return dso; -} - int machine__process_aux_event(struct machine *machine __maybe_unused, union perf_event *event) { @@ -737,7 +687,7 @@ static int machine__process_ksymbol_register(struct machine *machine, struct perf_sample *sample __maybe_unused) { struct symbol *sym; - struct dso *dso; + struct dso *dso = NULL; struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); int err = 0; @@ -748,16 +698,15 @@ static int machine__process_ksymbol_register(struct machine *machine, err = -ENOMEM; goto out; } - dso->kernel = DSO_SPACE__KERNEL; + dso__set_kernel(dso, DSO_SPACE__KERNEL); map = map__new2(0, dso); - dso__put(dso); if (!map) { err = -ENOMEM; goto out; } if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { - dso->binary_type = DSO_BINARY_TYPE__OOL; - dso->data.file_size = event->ksymbol.len; + dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL); + dso__data(dso)->file_size = event->ksymbol.len; dso__set_loaded(dso); } @@ -772,11 +721,11 @@ static int machine__process_ksymbol_register(struct machine *machine, dso__set_loaded(dso); if (is_bpf_image(event->ksymbol.name)) { - dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE; + dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE); dso__set_long_name(dso, "", false); } } else { - dso = map__dso(map); + dso = dso__get(map__dso(map)); } sym = symbol__new(map__map_ip(map, map__start(map)), @@ -789,6 +738,7 @@ static int machine__process_ksymbol_register(struct machine *machine, dso__insert_symbol(dso, sym); out: map__put(map); + dso__put(dso); return err; } @@ -883,7 +833,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start if (kmod_path__parse_name(&m, filename)) return NULL; - dso = machine__findnew_module_dso(machine, &m, filename); + dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename); if (dso == NULL) goto out; @@ -907,11 +857,11 @@ out: size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) { struct rb_node *nd; - size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); + size_t ret = dsos__fprintf(&machines->host.dsos, fp); for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += __dsos__fprintf(&pos->dsos.head, fp); + ret += dsos__fprintf(&pos->dsos, fp); } return ret; @@ -920,7 +870,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { - return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); + return dsos__fprintf_buildid(&m->dsos, fp, skip, parm); } size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, @@ -942,17 +892,17 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) size_t printed = 0; struct dso *kdso = machine__kernel_dso(machine); - if (kdso->has_build_id) { + if (dso__has_build_id(kdso)) { char filename[PATH_MAX]; - if (dso__build_id_filename(kdso, filename, sizeof(filename), - false)) + + if (dso__build_id_filename(kdso, filename, sizeof(filename), false)) printed += fprintf(fp, "[0] %s\n", filename); } - for (i = 0; i < vmlinux_path__nr_entries; ++i) - printed += fprintf(fp, "[%d] %s\n", - i + kdso->has_build_id, vmlinux_path[i]); - + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso), + vmlinux_path[i]); + } return printed; } @@ -1002,7 +952,7 @@ static struct dso *machine__get_kernel(struct machine *machine) DSO_SPACE__KERNEL_GUEST); } - if (kernel != NULL && (!kernel->has_build_id)) + if (kernel != NULL && (!dso__has_build_id(kernel))) dso__read_running_kernel_build_id(kernel, machine); return kernel; @@ -1053,7 +1003,7 @@ static int machine__get_running_kernel_start(struct machine *machine, err = kallsyms__get_symbol_start(filename, "_edata", &addr); if (err) - err = kallsyms__get_function_start(filename, "_etext", &addr); + err = kallsyms__get_symbol_start(filename, "_etext", &addr); if (!err) *end = addr; @@ -1367,8 +1317,8 @@ static char *get_kernel_version(const char *root_dir) static bool is_kmod_dso(struct dso *dso) { - return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || - dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; + return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE; } static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) @@ -1395,8 +1345,8 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo * we need to update the symtab_type if needed. */ if (m->comp && is_kmod_dso(dso)) { - dso->symtab_type++; - dso->comp = m->comp; + dso__set_symtab_type(dso, dso__symtab_type(dso)+1); + dso__set_comp(dso, m->comp); } map__put(map); return 0; @@ -1518,6 +1468,8 @@ static int machine__create_modules(struct machine *machine) if (modules__parse(modules, machine, machine__create_module)) return -1; + maps__fixup_end(machine__kernel_maps(machine)); + if (!machine__set_modules_path(machine)) return 0; @@ -1549,8 +1501,8 @@ static int machine__update_kernel_mmap(struct machine *machine, updated = map__get(orig); machine->vmlinux_map = updated; - machine__set_kernel_mmap(machine, start, end); maps__remove(machine__kernel_maps(machine), orig); + machine__set_kernel_mmap(machine, start, end); err = maps__insert(machine__kernel_maps(machine), updated); map__put(orig); @@ -1616,16 +1568,14 @@ out_put: return ret; } -static bool machine__uses_kcore(struct machine *machine) +static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused) { - struct dso *dso; - - list_for_each_entry(dso, &machine->dsos.head, node) { - if (dso__is_kcore(dso)) - return true; - } + return dso__is_kcore(dso) ? 1 : 0; +} - return false; +static bool machine__uses_kcore(struct machine *machine) +{ + return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false; } static bool perf_event__is_extra_kernel_mmap(struct machine *machine, @@ -1692,53 +1642,20 @@ static int machine__process_kernel_mmap_event(struct machine *machine, * Should be there already, from the build-id table in * the header. */ - struct dso *kernel = NULL; - struct dso *dso; - - down_read(&machine->dsos.lock); - - list_for_each_entry(dso, &machine->dsos.head, node) { - - /* - * The cpumode passed to is_kernel_module is not the - * cpumode of *this* event. If we insist on passing - * correct cpumode to is_kernel_module, we should - * record the cpumode when we adding this dso to the - * linked list. - * - * However we don't really need passing correct - * cpumode. We know the correct cpumode must be kernel - * mode (if not, we should not link it onto kernel_dsos - * list). - * - * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. - * is_kernel_module() treats it as a kernel cpumode. - */ - - if (!dso->kernel || - is_kernel_module(dso->long_name, - PERF_RECORD_MISC_CPUMODE_UNKNOWN)) - continue; - - - kernel = dso__get(dso); - break; - } - - up_read(&machine->dsos.lock); + struct dso *kernel = dsos__find_kernel_dso(&machine->dsos); if (kernel == NULL) kernel = machine__findnew_dso(machine, machine->mmap_name); if (kernel == NULL) goto out_problem; - kernel->kernel = dso_space; + dso__set_kernel(kernel, dso_space); if (__machine__create_kernel_maps(machine, kernel) < 0) { dso__put(kernel); goto out_problem; } - if (strstr(kernel->long_name, "vmlinux")) + if (strstr(dso__long_name(kernel), "vmlinux")) dso__set_short_name(kernel, "[kernel.vmlinux]", false); if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { @@ -2101,11 +2018,11 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample, if (!mi) return NULL; - ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); - ip__resolve_data(al->thread, al->cpumode, &mi->daddr, + ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip); + ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi), sample->addr, sample->phys_addr, sample->data_page_size); - mi->data_src.val = sample->data_src; + mem_info__data_src(mi)->val = sample->data_src; return mi; } @@ -2120,14 +2037,14 @@ static char *callchain_srcline(struct map_symbol *ms, u64 ip) return srcline; dso = map__dso(map); - srcline = srcline__tree_find(&dso->srclines, ip); + srcline = srcline__tree_find(dso__srclines(dso), ip); if (!srcline) { bool show_sym = false; bool show_addr = callchain_param.key == CCKEY_ADDRESS; srcline = get_srcline(dso, map__rip_2objdump(map, ip), ms->sym, show_sym, show_addr, ip); - srcline__tree_insert(&dso->srclines, ip, srcline); + srcline__tree_insert(dso__srclines(dso), ip, srcline); } return srcline; @@ -2147,7 +2064,8 @@ static int add_callchain_ip(struct thread *thread, bool branch, struct branch_flags *flags, struct iterations *iter, - u64 branch_from) + u64 branch_from, + bool symbols) { struct map_symbol ms = {}; struct addr_location al; @@ -2186,7 +2104,8 @@ static int add_callchain_ip(struct thread *thread, } goto out; } - thread__find_symbol(thread, *cpumode, ip, &al); + if (symbols) + thread__find_symbol(thread, *cpumode, ip, &al); } if (al.sym != NULL) { @@ -2229,6 +2148,7 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample, unsigned int i; const struct branch_stack *bs = sample->branch_stack; struct branch_entry *entries = perf_sample__branch_entries(sample); + u64 *branch_stack_cntr = sample->branch_stack_cntr; struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); if (!bi) @@ -2238,6 +2158,8 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample, ip__resolve_ams(al->thread, &bi[i].to, entries[i].to); ip__resolve_ams(al->thread, &bi[i].from, entries[i].from); bi[i].flags = entries[i].flags; + if (branch_stack_cntr) + bi[i].branch_stack_cntr = branch_stack_cntr[i]; } return bi; } @@ -2312,7 +2234,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread, struct symbol **parent, struct addr_location *root_al, u64 branch_from, - bool callee, int end) + bool callee, int end, + bool symbols) { struct ip_callchain *chain = sample->callchain; u8 cpumode = PERF_RECORD_MISC_USER; @@ -2322,7 +2245,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread, for (i = 0; i < end + 1; i++) { err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, chain->ips[i], - false, NULL, NULL, branch_from); + false, NULL, NULL, branch_from, + symbols); if (err) return err; } @@ -2332,7 +2256,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread, for (i = end; i >= 0; i--) { err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, chain->ips[i], - false, NULL, NULL, branch_from); + false, NULL, NULL, branch_from, + symbols); if (err) return err; } @@ -2358,8 +2283,12 @@ static void save_lbr_cursor_node(struct thread *thread, cursor->curr = cursor->first; else cursor->curr = cursor->curr->next; + + map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms); memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr, sizeof(struct callchain_cursor_node)); + lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps); + lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map); lbr_stitch->prev_lbr_cursor[idx].valid = true; cursor->pos++; @@ -2371,7 +2300,8 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, struct symbol **parent, struct addr_location *root_al, u64 *branch_from, - bool callee) + bool callee, + bool symbols) { struct branch_stack *lbr_stack = sample->branch_stack; struct branch_entry *entries = perf_sample__branch_entries(sample); @@ -2404,7 +2334,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, true, flags, NULL, - *branch_from); + *branch_from, symbols); if (err) return err; @@ -2429,7 +2359,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, true, flags, NULL, - *branch_from); + *branch_from, symbols); if (err) return err; save_lbr_cursor_node(thread, cursor, i); @@ -2444,7 +2374,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, true, flags, NULL, - *branch_from); + *branch_from, symbols); if (err) return err; save_lbr_cursor_node(thread, cursor, i); @@ -2458,7 +2388,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, true, flags, NULL, - *branch_from); + *branch_from, symbols); if (err) return err; } @@ -2570,6 +2500,9 @@ static bool has_stitched_lbr(struct thread *thread, memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i], sizeof(struct callchain_cursor_node)); + stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps); + stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map); + if (callee) list_add(&stitch_node->node, &lbr_stitch->lists); else @@ -2593,6 +2526,8 @@ static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr) if (!thread__lbr_stitch(thread)->prev_lbr_cursor) goto free_lbr_stitch; + thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1; + INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); @@ -2620,7 +2555,8 @@ static int resolve_lbr_callchain_sample(struct thread *thread, struct symbol **parent, struct addr_location *root_al, int max_stack, - unsigned int max_lbr) + unsigned int max_lbr, + bool symbols) { bool callee = (callchain_param.order == ORDER_CALLEE); struct ip_callchain *chain = sample->callchain; @@ -2648,8 +2584,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread, max_lbr, callee); if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) { - list_replace_init(&lbr_stitch->lists, - &lbr_stitch->free_lists); + struct stitch_list *stitch_node; + + list_for_each_entry(stitch_node, &lbr_stitch->lists, node) + map_symbol__exit(&stitch_node->cursor.ms); + + list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists); } memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); } @@ -2658,12 +2598,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread, /* Add kernel ip */ err = lbr_callchain_add_kernel_ip(thread, cursor, sample, parent, root_al, branch_from, - true, i); + true, i, symbols); if (err) goto error; err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, - root_al, &branch_from, true); + root_al, &branch_from, true, symbols); if (err) goto error; @@ -2680,14 +2620,14 @@ static int resolve_lbr_callchain_sample(struct thread *thread, goto error; } err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, - root_al, &branch_from, false); + root_al, &branch_from, false, symbols); if (err) goto error; /* Add kernel ip */ err = lbr_callchain_add_kernel_ip(thread, cursor, sample, parent, root_al, branch_from, - false, i); + false, i, symbols); if (err) goto error; } @@ -2701,7 +2641,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, struct callchain_cursor *cursor, struct symbol **parent, struct addr_location *root_al, - u8 *cpumode, int ent) + u8 *cpumode, int ent, bool symbols) { int err = 0; @@ -2711,7 +2651,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, if (ip >= PERF_CONTEXT_MAX) { err = add_callchain_ip(thread, cursor, parent, root_al, cpumode, ip, - false, NULL, NULL, 0); + false, NULL, NULL, 0, symbols); break; } } @@ -2733,7 +2673,8 @@ static int thread__resolve_callchain_sample(struct thread *thread, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, - int max_stack) + int max_stack, + bool symbols) { struct branch_stack *branch = sample->branch_stack; struct branch_entry *entries = perf_sample__branch_entries(sample); @@ -2753,7 +2694,8 @@ static int thread__resolve_callchain_sample(struct thread *thread, err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, root_al, max_stack, - !env ? 0 : env->max_branches); + !env ? 0 : env->max_branches, + symbols); if (err) return (err < 0) ? err : 0; } @@ -2818,13 +2760,14 @@ static int thread__resolve_callchain_sample(struct thread *thread, root_al, NULL, be[i].to, true, &be[i].flags, - NULL, be[i].from); + NULL, be[i].from, symbols); - if (!err) + if (!err) { err = add_callchain_ip(thread, cursor, parent, root_al, NULL, be[i].from, true, &be[i].flags, - &iter[i], 0); + &iter[i], 0, symbols); + } if (err == -EINVAL) break; if (err) @@ -2840,7 +2783,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, check_calls: if (chain && callchain_param.order != ORDER_CALLEE) { err = find_prev_cpumode(chain, thread, cursor, parent, root_al, - &cpumode, chain->nr - first_call); + &cpumode, chain->nr - first_call, symbols); if (err) return (err < 0) ? err : 0; } @@ -2862,7 +2805,7 @@ check_calls: ++nr_entries; else if (callchain_param.order != ORDER_CALLEE) { err = find_prev_cpumode(chain, thread, cursor, parent, - root_al, &cpumode, j); + root_al, &cpumode, j, symbols); if (err) return (err < 0) ? err : 0; continue; @@ -2889,8 +2832,8 @@ check_calls: if (leaf_frame_caller && leaf_frame_caller != ip) { err = add_callchain_ip(thread, cursor, parent, - root_al, &cpumode, leaf_frame_caller, - false, NULL, NULL, 0); + root_al, &cpumode, leaf_frame_caller, + false, NULL, NULL, 0, symbols); if (err) return (err < 0) ? err : 0; } @@ -2898,7 +2841,7 @@ check_calls: err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, - false, NULL, NULL, 0); + false, NULL, NULL, 0, symbols); if (err) return (err < 0) ? err : 0; @@ -2925,12 +2868,12 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms addr = map__rip_2objdump(map, addr); dso = map__dso(map); - inline_node = inlines__tree_find(&dso->inlined_nodes, addr); + inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr); if (!inline_node) { inline_node = dso__parse_addr_inlines(dso, addr, sym); if (!inline_node) return ret; - inlines__tree_insert(&dso->inlined_nodes, inline_node); + inlines__tree_insert(dso__inlined_nodes(dso), inline_node); } ilist_ms = (struct map_symbol) { @@ -2978,7 +2921,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread, struct callchain_cursor *cursor, struct evsel *evsel, struct perf_sample *sample, - int max_stack) + int max_stack, bool symbols) { /* Can we do dwarf post unwind? */ if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) && @@ -2990,17 +2933,21 @@ static int thread__resolve_callchain_unwind(struct thread *thread, (!sample->user_stack.size)) return 0; + if (!symbols) + pr_debug("Not resolving symbols with an unwinder isn't currently supported\n"); + return unwind__get_entries(unwind_entry, cursor, thread, sample, max_stack, false); } -int thread__resolve_callchain(struct thread *thread, - struct callchain_cursor *cursor, - struct evsel *evsel, - struct perf_sample *sample, - struct symbol **parent, - struct addr_location *root_al, - int max_stack) +int __thread__resolve_callchain(struct thread *thread, + struct callchain_cursor *cursor, + struct evsel *evsel, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack, + bool symbols) { int ret = 0; @@ -3013,22 +2960,22 @@ int thread__resolve_callchain(struct thread *thread, ret = thread__resolve_callchain_sample(thread, cursor, evsel, sample, parent, root_al, - max_stack); + max_stack, symbols); if (ret) return ret; ret = thread__resolve_callchain_unwind(thread, cursor, evsel, sample, - max_stack); + max_stack, symbols); } else { ret = thread__resolve_callchain_unwind(thread, cursor, evsel, sample, - max_stack); + max_stack, symbols); if (ret) return ret; ret = thread__resolve_callchain_sample(thread, cursor, evsel, sample, parent, root_al, - max_stack); + max_stack, symbols); } return ret; @@ -3200,7 +3147,8 @@ out: return addr_cpumode; } -struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id) +struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, + const struct dso_id *id) { return dsos__findnew_id(&machine->dsos, filename, id); } @@ -3219,21 +3167,33 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch if (sym == NULL) return NULL; - *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL; + *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL; *addrp = map__unmap_ip(map, sym->start); return sym->name; } +struct machine__for_each_dso_cb_args { + struct machine *machine; + machine__dso_t fn; + void *priv; +}; + +static int machine__for_each_dso_cb(struct dso *dso, void *data) +{ + struct machine__for_each_dso_cb_args *args = data; + + return args->fn(dso, args->machine, args->priv); +} + int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv) { - struct dso *pos; - int err = 0; + struct machine__for_each_dso_cb_args args = { + .machine = machine, + .fn = fn, + .priv = priv, + }; - list_for_each_entry(pos, &machine->dsos.head, node) { - if (fn(pos, machine, priv)) - err = -1; - } - return err; + return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args); } int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) @@ -3266,6 +3226,17 @@ bool machine__is_lock_function(struct machine *machine, u64 addr) sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); machine->lock.text_end = map__unmap_ip(kmap, sym->start); + + sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap); + if (sym) { + machine->traceiter.text_start = map__unmap_ip(kmap, sym->start); + machine->traceiter.text_end = map__unmap_ip(kmap, sym->end); + } + sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap); + if (sym) { + machine->trace.text_start = map__unmap_ip(kmap, sym->start); + machine->trace.text_end = map__unmap_ip(kmap, sym->end); + } } /* failed to get kernel symbols */ @@ -3280,5 +3251,23 @@ bool machine__is_lock_function(struct machine *machine, u64 addr) if (machine->lock.text_start <= addr && addr < machine->lock.text_end) return true; + /* traceiter functions currently don't have their own section + * but we consider them lock functions + */ + if (machine->traceiter.text_start != 0) { + if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end) + return true; + } + + if (machine->trace.text_start != 0) { + if (machine->trace.text_start <= addr && addr < machine->trace.text_end) + return true; + } + return false; } + +int machine__hit_all_dsos(struct machine *machine) +{ + return dsos__hit_all(&machine->dsos); +} |