diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 15:04:47 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 15:04:47 -0800 |
commit | 3f59dbcace56fae7e4ed303bab90f1bedadcfdf4 (patch) | |
tree | c425529202b9dbe3e3b3dde072c1edf51b1b9e93 /tools/perf/util/hist.c | |
parent | df28204bb0f29cc475c0a8893c99b46a11a4903f (diff) | |
parent | ceb9e77324fa661b1001a0ae66f061b5fcb4e4e6 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"The main kernel side changes in this cycle were:
- Various Intel-PT updates and optimizations (Alexander Shishkin)
- Prohibit kprobes on Xen/KVM emulate prefixes (Masami Hiramatsu)
- Add support for LSM and SELinux checks to control access to the
perf syscall (Joel Fernandes)
- Misc other changes, optimizations, fixes and cleanups - see the
shortlog for details.
There were numerous tooling changes as well - 254 non-merge commits.
Here are the main changes - too many to list in detail:
- Enhancements to core tooling infrastructure, perf.data, libperf,
libtraceevent, event parsing, vendor events, Intel PT, callchains,
BPF support and instruction decoding.
- There were updates to the following tools:
perf annotate
perf diff
perf inject
perf kvm
perf list
perf maps
perf parse
perf probe
perf record
perf report
perf script
perf stat
perf test
perf trace
- And a lot of other changes: please see the shortlog and Git log for
more details"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (279 commits)
perf parse: Fix potential memory leak when handling tracepoint errors
perf probe: Fix spelling mistake "addrees" -> "address"
libtraceevent: Fix memory leakage in copy_filter_type
libtraceevent: Fix header installation
perf intel-bts: Does not support AUX area sampling
perf intel-pt: Add support for decoding AUX area samples
perf intel-pt: Add support for recording AUX area samples
perf pmu: When using default config, record which bits of config were changed by the user
perf auxtrace: Add support for queuing AUX area samples
perf session: Add facility to peek at all events
perf auxtrace: Add support for dumping AUX area samples
perf inject: Cut AUX area samples
perf record: Add aux-sample-size config term
perf record: Add support for AUX area sampling
perf auxtrace: Add support for AUX area sample recording
perf auxtrace: Move perf_evsel__find_pmu()
perf record: Add a function to test for kernel support for AUX area sampling
perf tools: Add kernel AUX area sampling definitions
perf/core: Make the mlock accounting simple again
perf report: Jump to symbol source view from total cycles view
...
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r-- | tools/perf/util/hist.c | 71 |
1 files changed, 43 insertions, 28 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 7b6eaf5e0bda..0a8d72ae93ca 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -18,6 +18,7 @@ #include "srcline.h" #include "symbol.h" #include "thread.h" +#include "block-info.h" #include "ui/progress.h" #include <errno.h> #include <math.h> @@ -80,6 +81,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) int symlen; u16 len; + if (h->block_info) + return; /* * +4 accounts for '[x] ' priv level info * +2 accounts for 0x prefix on raw addresses @@ -109,13 +112,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); if (h->branch_info) { - if (h->branch_info->from.sym) { - symlen = (int)h->branch_info->from.sym->namelen + 4; + if (h->branch_info->from.ms.sym) { + symlen = (int)h->branch_info->from.ms.sym->namelen + 4; if (verbose > 0) symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); - symlen = dso__name_len(h->branch_info->from.map->dso); + symlen = dso__name_len(h->branch_info->from.ms.map->dso); hists__new_col_len(hists, HISTC_DSO_FROM, symlen); } else { symlen = unresolved_col_width + 4 + 2; @@ -123,13 +126,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); } - if (h->branch_info->to.sym) { - symlen = (int)h->branch_info->to.sym->namelen + 4; + if (h->branch_info->to.ms.sym) { + symlen = (int)h->branch_info->to.ms.sym->namelen + 4; if (verbose > 0) symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); - symlen = dso__name_len(h->branch_info->to.map->dso); + symlen = dso__name_len(h->branch_info->to.ms.map->dso); hists__new_col_len(hists, HISTC_DSO_TO, symlen); } else { symlen = unresolved_col_width + 4 + 2; @@ -146,8 +149,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) } if (h->mem_info) { - if (h->mem_info->daddr.sym) { - symlen = (int)h->mem_info->daddr.sym->namelen + 4 + if (h->mem_info->daddr.ms.sym) { + symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 + unresolved_col_width + 2; hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); @@ -161,8 +164,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) symlen); } - if (h->mem_info->iaddr.sym) { - symlen = (int)h->mem_info->iaddr.sym->namelen + 4 + if (h->mem_info->iaddr.ms.sym) { + symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 + unresolved_col_width + 2; hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); @@ -172,8 +175,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) symlen); } - if (h->mem_info->daddr.map) { - symlen = dso__name_len(h->mem_info->daddr.map->dso); + if (h->mem_info->daddr.ms.map) { + symlen = dso__name_len(h->mem_info->daddr.ms.map->dso); hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, symlen); } else { @@ -440,13 +443,13 @@ static int hist_entry__init(struct hist_entry *he, memcpy(he->branch_info, template->branch_info, sizeof(*he->branch_info)); - map__get(he->branch_info->from.map); - map__get(he->branch_info->to.map); + map__get(he->branch_info->from.ms.map); + map__get(he->branch_info->to.ms.map); } if (he->mem_info) { - map__get(he->mem_info->iaddr.map); - map__get(he->mem_info->daddr.map); + map__get(he->mem_info->iaddr.ms.map); + map__get(he->mem_info->daddr.ms.map); } if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) @@ -489,13 +492,13 @@ err_rawdata: err_infos: if (he->branch_info) { - map__put(he->branch_info->from.map); - map__put(he->branch_info->to.map); + map__put(he->branch_info->from.ms.map); + map__put(he->branch_info->to.ms.map); zfree(&he->branch_info); } if (he->mem_info) { - map__put(he->mem_info->iaddr.map); - map__put(he->mem_info->daddr.map); + map__put(he->mem_info->iaddr.ms.map); + map__put(he->mem_info->daddr.ms.map); } err: map__zput(he->ms.map); @@ -689,6 +692,7 @@ __hists__add_entry(struct hists *hists, .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, }, .ms = { + .mg = al->mg, .map = al->map, .sym = al->sym, }, @@ -755,6 +759,11 @@ struct hist_entry *hists__add_entry_block(struct hists *hists, struct hist_entry entry = { .block_info = block_info, .hists = hists, + .ms = { + .mg = al->mg, + .map = al->map, + .sym = al->sym, + }, }, *he = hists__findnew_entry(hists, &entry, al, false); return he; @@ -886,8 +895,9 @@ iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) if (iter->curr >= iter->total) return 0; - al->map = bi[i].to.map; - al->sym = bi[i].to.sym; + al->mg = bi[i].to.ms.mg; + al->map = bi[i].to.ms.map; + al->sym = bi[i].to.ms.sym; al->addr = bi[i].to.addr; return 1; } @@ -905,7 +915,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a bi = iter->priv; - if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) + if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) goto out; /* @@ -1062,6 +1072,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, .comm = thread__comm(al->thread), .ip = al->addr, .ms = { + .mg = al->mg, .map = al->map, .sym = al->sym, }, @@ -1244,16 +1255,16 @@ void hist_entry__delete(struct hist_entry *he) map__zput(he->ms.map); if (he->branch_info) { - map__zput(he->branch_info->from.map); - map__zput(he->branch_info->to.map); + map__zput(he->branch_info->from.ms.map); + map__zput(he->branch_info->to.ms.map); free_srcline(he->branch_info->srcline_from); free_srcline(he->branch_info->srcline_to); zfree(&he->branch_info); } if (he->mem_info) { - map__zput(he->mem_info->iaddr.map); - map__zput(he->mem_info->daddr.map); + map__zput(he->mem_info->iaddr.ms.map); + map__zput(he->mem_info->daddr.ms.map); mem_info__zput(he->mem_info); } @@ -2569,7 +2580,8 @@ int hists__unlink(struct hists *hists) } void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, - struct perf_sample *sample, bool nonany_branch_mode) + struct perf_sample *sample, bool nonany_branch_mode, + u64 *total_cycles) { struct branch_info *bi; @@ -2596,6 +2608,9 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, nonany_branch_mode ? NULL : prev, bi[i].flags.cycles); prev = &bi[i].to; + + if (total_cycles) + *total_cycles += bi[i].flags.cycles; } free(bi); } |