summaryrefslogtreecommitdiff
path: root/tools/perf/util/sort.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/sort.c')
-rw-r--r--tools/perf/util/sort.c959
1 files changed, 784 insertions, 175 deletions
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 37662cdec5ee..f3a565b0e230 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -23,16 +23,19 @@
#include "strlist.h"
#include "strbuf.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "annotate.h"
+#include "annotate-data.h"
#include "event.h"
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
+#include "trace-event.h"
#include <linux/kernel.h>
#include <linux/string.h>
#ifdef HAVE_LIBTRACEEVENT
-#include <traceevent/event-parse.h>
+#include <event-parse.h>
#endif
regex_t parent_regex;
@@ -53,6 +56,13 @@ static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local
static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
/*
+ * Some architectures have Adjacent Cacheline Prefetch feature, which
+ * behaves like the cacheline size is doubled. Enable this flag to
+ * check things in double cacheline granularity.
+ */
+bool chk_double_cl;
+
+/*
* Replaces all occurrences of a char used with the:
*
* -t, --field-separator
@@ -100,7 +110,7 @@ static int64_t cmp_null(const void *l, const void *r)
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->tid - left->thread->tid;
+ return thread__tid(right->thread) - thread__tid(left->thread);
}
static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
@@ -109,7 +119,7 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
const char *comm = thread__comm_str(he->thread);
width = max(7U, width) - 8;
- return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
+ return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
width, width, comm ?: "");
}
@@ -120,7 +130,7 @@ static int hist_entry__thread_filter(struct hist_entry *he, int type, const void
if (type != HIST_FILTER__THREAD)
return -1;
- return th && he->thread != th;
+ return th && !RC_CHK_EQUAL(he->thread, th);
}
struct sort_entry sort_thread = {
@@ -131,6 +141,89 @@ struct sort_entry sort_thread = {
.se_width_idx = HISTC_THREAD,
};
+/* --sort tgid */
+
+static int64_t
+sort__tgid_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return thread__pid(right->thread) - thread__pid(left->thread);
+}
+
+static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ int tgid = thread__pid(he->thread);
+ const char *comm = NULL;
+
+ /* display comm of the thread-group leader */
+ if (thread__pid(he->thread) == thread__tid(he->thread)) {
+ comm = thread__comm_str(he->thread);
+ } else {
+ struct maps *maps = thread__maps(he->thread);
+ struct thread *leader = machine__find_thread(maps__machine(maps),
+ tgid, tgid);
+ if (leader) {
+ comm = thread__comm_str(leader);
+ thread__put(leader);
+ }
+ }
+ width = max(7U, width) - 8;
+ return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
+}
+
+struct sort_entry sort_tgid = {
+ .se_header = " Tgid:Command",
+ .se_cmp = sort__tgid_cmp,
+ .se_snprintf = hist_entry__tgid_snprintf,
+ .se_width_idx = HISTC_TGID,
+};
+
+/* --sort simd */
+
+static int64_t
+sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ if (left->simd_flags.arch != right->simd_flags.arch)
+ return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
+
+ return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
+}
+
+static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
+{
+ u64 arch = simd_flags->arch;
+
+ if (arch & SIMD_OP_FLAGS_ARCH_SVE)
+ return "SVE";
+ else
+ return "n/a";
+}
+
+static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width __maybe_unused)
+{
+ const char *name;
+
+ if (!he->simd_flags.arch)
+ return repsep_snprintf(bf, size, "");
+
+ name = hist_entry__get_simd_name(&he->simd_flags);
+
+ if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
+ return repsep_snprintf(bf, size, "[e] %s", name);
+ else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
+ return repsep_snprintf(bf, size, "[p] %s", name);
+
+ return repsep_snprintf(bf, size, "[.] %s", name);
+}
+
+struct sort_entry sort_simd = {
+ .se_header = "Simd ",
+ .se_cmp = sort__simd_cmp,
+ .se_snprintf = hist_entry__simd_snprintf,
+ .se_width_idx = HISTC_SIMD,
+};
+
/* --sort comm */
/*
@@ -176,19 +269,19 @@ struct sort_entry sort_comm = {
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
{
- struct dso *dso_l = map_l ? map_l->dso : NULL;
- struct dso *dso_r = map_r ? map_r->dso : NULL;
+ struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
+ struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
const char *dso_name_l, *dso_name_r;
if (!dso_l || !dso_r)
return cmp_null(dso_r, dso_l);
if (verbose > 0) {
- dso_name_l = dso_l->long_name;
- dso_name_r = dso_r->long_name;
+ dso_name_l = dso__long_name(dso_l);
+ dso_name_r = dso__long_name(dso_r);
} else {
- dso_name_l = dso_l->short_name;
- dso_name_r = dso_r->short_name;
+ dso_name_l = dso__short_name(dso_l);
+ dso_name_r = dso__short_name(dso_r);
}
return strcmp(dso_name_l, dso_name_r);
@@ -203,13 +296,13 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
static int _hist_entry__dso_snprintf(struct map *map, char *bf,
size_t size, unsigned int width)
{
- if (map && map->dso) {
- const char *dso_name = verbose > 0 ? map->dso->long_name :
- map->dso->short_name;
- return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
- }
+ const struct dso *dso = map ? map__dso(map) : NULL;
+ const char *dso_name = "[unknown]";
+
+ if (dso)
+ dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
- return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
}
static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
@@ -225,7 +318,7 @@ static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *a
if (type != HIST_FILTER__DSO)
return -1;
- return dso && (!he->ms.map || he->ms.map->dso != dso);
+ return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
}
struct sort_entry sort_dso = {
@@ -278,7 +371,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
- if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
+ if (!hists__has(left->hists, dso)) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
@@ -305,12 +398,12 @@ static int _hist_entry__sym_snprintf(struct map_symbol *ms,
size_t ret = 0;
if (verbose > 0) {
- char o = map ? dso__symtab_origin(map->dso) : '!';
+ struct dso *dso = map ? map__dso(map) : NULL;
+ char o = dso ? dso__symtab_origin(dso) : '!';
u64 rip = ip;
- if (map && map->dso && map->dso->kernel
- && map->dso->adjust_symbols)
- rip = map->unmap_ip(map, ip);
+ if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
+ rip = map__unmap_ip(map, ip);
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
BITS_PER_LONG / 4 + 2, rip, o);
@@ -321,7 +414,7 @@ static int _hist_entry__sym_snprintf(struct map_symbol *ms,
if (sym->type == STT_OBJECT) {
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
- ip - map->unmap_ip(map, sym->start));
+ ip - map__unmap_ip(map, sym->start));
} else {
ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
@@ -364,6 +457,52 @@ struct sort_entry sort_sym = {
.se_width_idx = HISTC_SYMBOL,
};
+/* --sort symoff */
+
+static int64_t
+sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ ret = sort__sym_cmp(left, right);
+ if (ret)
+ return ret;
+
+ return left->ip - right->ip;
+}
+
+static int64_t
+sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ ret = sort__sym_sort(left, right);
+ if (ret)
+ return ret;
+
+ return left->ip - right->ip;
+}
+
+static int
+hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
+{
+ struct symbol *sym = he->ms.sym;
+
+ if (sym == NULL)
+ return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
+
+ return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
+}
+
+struct sort_entry sort_sym_offset = {
+ .se_header = "Symbol Offset",
+ .se_cmp = sort__symoff_cmp,
+ .se_sort = sort__symoff_sort,
+ .se_snprintf = hist_entry__symoff_snprintf,
+ .se_filter = hist_entry__sym_filter,
+ .se_width_idx = HISTC_SYMBOL_OFFSET,
+};
+
/* --sort srcline */
char *hist_entry__srcline(struct hist_entry *he)
@@ -529,21 +668,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct annotation *notes;
+ struct annotated_branch *branch;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- notes = symbol__annotation(sym);
+ branch = symbol__annotation(sym)->branch;
- if (notes->hit_cycles)
- ipc = notes->hit_insn / ((double)notes->hit_cycles);
+ if (branch && branch->hit_cycles)
+ ipc = branch->hit_insn / ((double)branch->hit_cycles);
- if (notes->total_insn) {
- coverage = notes->cover_insn * 100.0 /
- ((double)notes->total_insn);
+ if (branch && branch->total_insn) {
+ coverage = branch->cover_insn * 100.0 /
+ ((double)branch->total_insn);
}
snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
@@ -575,6 +714,102 @@ struct sort_entry sort_sym_ipc_null = {
.se_width_idx = HISTC_SYMBOL_IPC,
};
+/* --sort callchain_branch_predicted */
+
+static int64_t
+sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int hist_entry__callchain_branch_predicted_snprintf(
+ struct hist_entry *he, char *bf, size_t size, unsigned int width)
+{
+ u64 branch_count, predicted_count;
+ double percent = 0.0;
+ char str[32];
+
+ callchain_branch_counts(he->callchain, &branch_count,
+ &predicted_count, NULL, NULL);
+
+ if (branch_count)
+ percent = predicted_count * 100.0 / branch_count;
+
+ snprintf(str, sizeof(str), "%.1f%%", percent);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
+}
+
+struct sort_entry sort_callchain_branch_predicted = {
+ .se_header = "Predicted",
+ .se_cmp = sort__callchain_branch_predicted_cmp,
+ .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
+ .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
+};
+
+/* --sort callchain_branch_abort */
+
+static int64_t
+sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
+ char *bf, size_t size,
+ unsigned int width)
+{
+ u64 branch_count, abort_count;
+ char str[32];
+
+ callchain_branch_counts(he->callchain, &branch_count,
+ NULL, &abort_count, NULL);
+
+ snprintf(str, sizeof(str), "%" PRId64, abort_count);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
+}
+
+struct sort_entry sort_callchain_branch_abort = {
+ .se_header = "Abort",
+ .se_cmp = sort__callchain_branch_abort_cmp,
+ .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
+ .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
+};
+
+/* --sort callchain_branch_cycles */
+
+static int64_t
+sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
+ char *bf, size_t size,
+ unsigned int width)
+{
+ u64 branch_count, cycles_count, cycles = 0;
+ char str[32];
+
+ callchain_branch_counts(he->callchain, &branch_count,
+ NULL, NULL, &cycles_count);
+
+ if (branch_count)
+ cycles = cycles_count / branch_count;
+
+ snprintf(str, sizeof(str), "%" PRId64 "", cycles);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
+}
+
+struct sort_entry sort_callchain_branch_cycles = {
+ .se_header = "Cycles",
+ .se_cmp = sort__callchain_branch_cycles_cmp,
+ .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
+ .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
+};
+
/* --sort srcfile */
static char no_srcfile[1];
@@ -587,9 +822,9 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
if (!map)
return no_srcfile;
- sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
+ sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
e->ms.sym, false, true, true, e->ip);
- if (!strcmp(sf, SRCLINE_UNKNOWN))
+ if (sf == SRCLINE_UNKNOWN)
return no_srcfile;
p = strchr(sf, ':');
if (p && *sf) {
@@ -603,12 +838,7 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
static int64_t
sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!left->srcfile)
- left->srcfile = hist_entry__get_srcfile(left);
- if (!right->srcfile)
- right->srcfile = hist_entry__get_srcfile(right);
-
- return strcmp(right->srcfile, left->srcfile);
+ return sort__srcline_cmp(left, right);
}
static int64_t
@@ -699,6 +929,38 @@ struct sort_entry sort_cpu = {
.se_width_idx = HISTC_CPU,
};
+/* --sort parallelism */
+
+static int64_t
+sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->parallelism - left->parallelism;
+}
+
+static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg)
+{
+ const unsigned long *parallelism_filter = arg;
+
+ if (type != HIST_FILTER__PARALLELISM)
+ return -1;
+
+ return test_bit(he->parallelism, parallelism_filter);
+}
+
+static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
+}
+
+struct sort_entry sort_parallelism = {
+ .se_header = "Parallelism",
+ .se_cmp = sort__parallelism_cmp,
+ .se_filter = hist_entry__parallelism_filter,
+ .se_snprintf = hist_entry__parallelism_snprintf,
+ .se_width_idx = HISTC_PARALLELISM,
+};
+
/* --sort cgroup_id */
static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
@@ -754,7 +1016,7 @@ static int hist_entry__cgroup_snprintf(struct hist_entry *he,
const char *cgrp_name = "N/A";
if (he->cgroup) {
- struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
+ struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
he->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
@@ -845,17 +1107,19 @@ static char *get_trace_output(struct hist_entry *he)
.data = he->raw_data,
.size = he->raw_size,
};
+ struct tep_event *tp_format;
evsel = hists_to_evsel(he->hists);
trace_seq_init(&seq);
- if (symbol_conf.raw_trace) {
- tep_print_fields(&seq, he->raw_data, he->raw_size,
- evsel->tp_format);
- } else {
- tep_print_event(evsel->tp_format->tep,
- &seq, &rec, "%s", TEP_PRINT_INFO);
+ tp_format = evsel__tp_format(evsel);
+ if (tp_format) {
+ if (symbol_conf.raw_trace)
+ tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
+ else
+ tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
}
+
/*
* Trim the buffer, it starts at 4KB and we're not going to
* add anything more to this buffer.
@@ -933,7 +1197,7 @@ static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
return -1;
return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
- he->branch_info->from.ms.map->dso != dso);
+ map__dso(he->branch_info->from.ms.map) != dso);
}
static int64_t
@@ -965,14 +1229,13 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
return -1;
return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
- he->branch_info->to.ms.map->dso != dso);
+ map__dso(he->branch_info->to.ms.map) != dso);
}
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct addr_map_symbol *from_l = &left->branch_info->from;
- struct addr_map_symbol *from_r = &right->branch_info->from;
+ struct addr_map_symbol *from_l, *from_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
@@ -1098,7 +1361,7 @@ static int _hist_entry__addr_snprintf(struct map_symbol *ms,
if (sym->type == STT_OBJECT) {
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
- ip - map->unmap_ip(map, sym->start));
+ ip - map__unmap_ip(map, sym->start));
} else {
ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
@@ -1269,9 +1532,9 @@ sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.addr;
+ l = mem_info__daddr(left->mem_info)->addr;
if (right->mem_info)
- r = right->mem_info->daddr.addr;
+ r = mem_info__daddr(right->mem_info)->addr;
return (int64_t)(r - l);
}
@@ -1283,8 +1546,8 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
struct map_symbol *ms = NULL;
if (he->mem_info) {
- addr = he->mem_info->daddr.addr;
- ms = &he->mem_info->daddr.ms;
+ addr = mem_info__daddr(he->mem_info)->addr;
+ ms = &mem_info__daddr(he->mem_info)->ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
@@ -1295,9 +1558,9 @@ sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->iaddr.addr;
+ l = mem_info__iaddr(left->mem_info)->addr;
if (right->mem_info)
- r = right->mem_info->iaddr.addr;
+ r = mem_info__iaddr(right->mem_info)->addr;
return (int64_t)(r - l);
}
@@ -1309,8 +1572,8 @@ static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
struct map_symbol *ms = NULL;
if (he->mem_info) {
- addr = he->mem_info->iaddr.addr;
- ms = &he->mem_info->iaddr.ms;
+ addr = mem_info__iaddr(he->mem_info)->addr;
+ ms = &mem_info__iaddr(he->mem_info)->ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
@@ -1322,9 +1585,9 @@ sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
struct map *map_r = NULL;
if (left->mem_info)
- map_l = left->mem_info->daddr.ms.map;
+ map_l = mem_info__daddr(left->mem_info)->ms.map;
if (right->mem_info)
- map_r = right->mem_info->daddr.ms.map;
+ map_r = mem_info__daddr(right->mem_info)->ms.map;
return _sort__dso_cmp(map_l, map_r);
}
@@ -1335,7 +1598,7 @@ static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
struct map *map = NULL;
if (he->mem_info)
- map = he->mem_info->daddr.ms.map;
+ map = mem_info__daddr(he->mem_info)->ms.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
@@ -1347,12 +1610,12 @@ sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_lock = PERF_MEM_LOCK_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_lock = PERF_MEM_LOCK_NA;
@@ -1375,12 +1638,12 @@ sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
@@ -1403,12 +1666,12 @@ sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_lvl = PERF_MEM_LVL_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_lvl = PERF_MEM_LVL_NA;
@@ -1431,12 +1694,12 @@ sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
@@ -1457,6 +1720,7 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 l, r;
struct map *l_map, *r_map;
+ struct dso *l_dso, *r_dso;
int rc;
if (!left->mem_info) return -1;
@@ -1466,8 +1730,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (left->cpumode > right->cpumode) return -1;
if (left->cpumode < right->cpumode) return 1;
- l_map = left->mem_info->daddr.ms.map;
- r_map = right->mem_info->daddr.ms.map;
+ l_map = mem_info__daddr(left->mem_info)->ms.map;
+ r_map = mem_info__daddr(right->mem_info)->ms.map;
/* if both are NULL, jump to sort on al_addr instead */
if (!l_map && !r_map)
@@ -1476,31 +1740,39 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (!l_map) return -1;
if (!r_map) return 1;
- rc = dso__cmp_id(l_map->dso, r_map->dso);
+ l_dso = map__dso(l_map);
+ r_dso = map__dso(r_map);
+ rc = dso__cmp_id(l_dso, r_dso);
if (rc)
return rc;
/*
- * Addresses with no major/minor numbers are assumed to be
+ * Addresses with no major/minor numbers or build ID are assumed to be
* anonymous in userspace. Sort those on pid then address.
*
* The kernel and non-zero major/minor mapped areas are
* assumed to be unity mapped. Sort those on address.
*/
+ if (left->cpumode != PERF_RECORD_MISC_KERNEL && (map__flags(l_map) & MAP_SHARED) == 0) {
+ const struct dso_id *dso_id = dso__id_const(l_dso);
+
+ if (!dso_id->mmap2_valid)
+ dso_id = dso__id_const(r_dso);
- if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
- (!(l_map->flags & MAP_SHARED)) &&
- !l_map->dso->id.maj && !l_map->dso->id.min &&
- !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
- /* userspace anonymous */
+ if (!build_id__is_defined(&dso_id->build_id) &&
+ (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0))) {
+ /* userspace anonymous */
- if (left->thread->pid_ > right->thread->pid_) return -1;
- if (left->thread->pid_ < right->thread->pid_) return 1;
+ if (thread__pid(left->thread) > thread__pid(right->thread))
+ return -1;
+ if (thread__pid(left->thread) < thread__pid(right->thread))
+ return 1;
+ }
}
addr:
/* al_addr does all the right addr - start + offset calculations */
- l = cl_address(left->mem_info->daddr.al_addr);
- r = cl_address(right->mem_info->daddr.al_addr);
+ l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
+ r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
if (l > r) return -1;
if (l < r) return 1;
@@ -1517,17 +1789,18 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
char level = he->level;
if (he->mem_info) {
- struct map *map = he->mem_info->daddr.ms.map;
+ struct map *map = mem_info__daddr(he->mem_info)->ms.map;
+ struct dso *dso = map ? map__dso(map) : NULL;
+ const struct dso_id *dso_id = dso ? dso__id_const(dso) : &dso_id_empty;
- addr = cl_address(he->mem_info->daddr.al_addr);
- ms = &he->mem_info->daddr.ms;
+ addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
+ ms = &mem_info__daddr(he->mem_info)->ms;
/* print [s] for shared data mmaps */
if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
- map && !(map->prot & PROT_EXEC) &&
- (map->flags & MAP_SHARED) &&
- (map->dso->id.maj || map->dso->id.min ||
- map->dso->id.ino || map->dso->id.ino_generation))
+ map && !(map__prot(map) & PROT_EXEC) &&
+ (map__flags(map) & MAP_SHARED) &&
+ (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0)))
level = 's';
else if (!map)
level = 'X';
@@ -1611,21 +1884,20 @@ struct sort_entry sort_global_ins_lat = {
static int64_t
sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return left->p_stage_cyc - right->p_stage_cyc;
+ return left->weight3 - right->weight3;
}
static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width,
- he->p_stage_cyc * he->stat.nr_events);
+ return repsep_snprintf(bf, size, "%-*u", width, he->weight3 * he->stat.nr_events);
}
static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
+ return repsep_snprintf(bf, size, "%-*u", width, he->weight3);
}
struct sort_entry sort_local_p_stage_cyc = {
@@ -1705,12 +1977,12 @@ sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_blk = PERF_MEM_BLK_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_blk = PERF_MEM_BLK_NA;
@@ -1739,9 +2011,9 @@ sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.phys_addr;
+ l = mem_info__daddr(left->mem_info)->phys_addr;
if (right->mem_info)
- r = right->mem_info->daddr.phys_addr;
+ r = mem_info__daddr(right->mem_info)->phys_addr;
return (int64_t)(r - l);
}
@@ -1753,7 +2025,7 @@ static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
size_t ret = 0;
size_t len = BITS_PER_LONG / 4;
- addr = he->mem_info->daddr.phys_addr;
+ addr = mem_info__daddr(he->mem_info)->phys_addr;
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
@@ -1780,9 +2052,9 @@ sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.data_page_size;
+ l = mem_info__daddr(left->mem_info)->data_page_size;
if (right->mem_info)
- r = right->mem_info->daddr.data_page_size;
+ r = mem_info__daddr(right->mem_info)->data_page_size;
return (int64_t)(r - l);
}
@@ -1793,7 +2065,7 @@ static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
char str[PAGE_SIZE_NAME_LEN];
return repsep_snprintf(bf, size, "%-*s", width,
- get_page_size_name(he->mem_info->daddr.data_page_size, str));
+ get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
}
struct sort_entry sort_mem_data_page_size = {
@@ -2023,9 +2295,8 @@ sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
size_t bf_size, unsigned int width)
{
- if (map && map->dso)
- return repsep_snprintf(bf, bf_size, "%*d", width,
- map__size(map));
+ if (map && map__dso(map))
+ return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
}
@@ -2043,7 +2314,7 @@ struct sort_entry sort_dso_size = {
.se_width_idx = HISTC_DSO_SIZE,
};
-/* --sort dso_size */
+/* --sort addr */
static int64_t
sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
@@ -2054,9 +2325,9 @@ sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
struct map *right_map = right->ms.map;
if (left_map)
- left_ip = left_map->unmap_ip(left_map, left_ip);
+ left_ip = map__unmap_ip(left_map, left_ip);
if (right_map)
- right_ip = right_map->unmap_ip(right_map, right_ip);
+ right_ip = map__unmap_ip(right_map, right_ip);
return _sort__addr_cmp(left_ip, right_ip);
}
@@ -2068,7 +2339,7 @@ static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
struct map *map = he->ms.map;
if (map)
- ip = map->unmap_ip(map, ip);
+ ip = map__unmap_ip(map, ip);
return repsep_snprintf(bf, size, "%-#*llx", width, ip);
}
@@ -2080,6 +2351,178 @@ struct sort_entry sort_addr = {
.se_width_idx = HISTC_ADDR,
};
+/* --sort type */
+
+struct annotated_data_type unknown_type = {
+ .self = {
+ .type_name = (char *)"(unknown)",
+ .children = LIST_HEAD_INIT(unknown_type.self.children),
+ },
+};
+
+static int64_t
+sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__addr_cmp(left, right);
+}
+
+static void sort__type_init(struct hist_entry *he)
+{
+ if (he->mem_type)
+ return;
+
+ he->mem_type = hist_entry__get_data_type(he);
+ if (he->mem_type == NULL) {
+ he->mem_type = &unknown_type;
+ he->mem_type_off = 0;
+ }
+}
+
+static int64_t
+sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ return strcmp(left_type->self.type_name, right_type->self.type_name);
+}
+
+static int64_t
+sort__type_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__type_collapse(left, right);
+}
+
+static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
+}
+
+struct sort_entry sort_type = {
+ .se_header = "Data Type",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__type_collapse,
+ .se_sort = sort__type_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__type_snprintf,
+ .se_width_idx = HISTC_TYPE,
+};
+
+/* --sort typeoff */
+
+static int64_t
+sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+ int64_t ret;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ ret = strcmp(left_type->self.type_name, right_type->self.type_name);
+ if (ret)
+ return ret;
+ return left->mem_type_off - right->mem_type_off;
+}
+
+static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width __maybe_unused)
+{
+ struct annotated_data_type *he_type = he->mem_type;
+ char buf[4096];
+
+ if (he_type == &unknown_type || he_type == &stackop_type ||
+ he_type == &canary_type)
+ return repsep_snprintf(bf, size, "%s", he_type->self.type_name);
+
+ if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf),
+ he->mem_type_off))
+ scnprintf(buf, sizeof(buf), "no field");
+
+ return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
+ he->mem_type_off, buf);
+}
+
+struct sort_entry sort_type_offset = {
+ .se_header = "Data Type Offset",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__typeoff_sort,
+ .se_sort = sort__typeoff_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__typeoff_snprintf,
+ .se_width_idx = HISTC_TYPE_OFFSET,
+};
+
+/* --sort typecln */
+
+/* TODO: use actual value in the system */
+#define TYPE_CACHELINE_SIZE 64
+
+static int64_t
+sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+ int64_t left_cln, right_cln;
+ int64_t ret;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ ret = strcmp(left_type->self.type_name, right_type->self.type_name);
+ if (ret)
+ return ret;
+
+ left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
+ right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
+ return left_cln - right_cln;
+}
+
+static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width __maybe_unused)
+{
+ struct annotated_data_type *he_type = he->mem_type;
+
+ return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
+ he->mem_type_off / TYPE_CACHELINE_SIZE);
+}
+
+struct sort_entry sort_type_cacheline = {
+ .se_header = "Data Type Cacheline",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__typecln_sort,
+ .se_sort = sort__typecln_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__typecln_snprintf,
+ .se_width_idx = HISTC_TYPE_CACHELINE,
+};
+
struct sort_dimension {
const char *name;
@@ -2087,25 +2530,51 @@ struct sort_dimension {
int taken;
};
-int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
+static int arch_support_sort_key(const char *sort_key, struct perf_env *env)
{
+ const char *arch = perf_env__arch(env);
+
+ if (!strcmp("x86", arch) || !strcmp("powerpc", arch)) {
+ if (!strcmp(sort_key, "p_stage_cyc"))
+ return 1;
+ if (!strcmp(sort_key, "local_p_stage_cyc"))
+ return 1;
+ }
return 0;
}
-const char * __weak arch_perf_header_entry(const char *se_header)
-{
+static const char *arch_perf_header_entry(const char *se_header, struct perf_env *env)
+{
+ const char *arch = perf_env__arch(env);
+
+ if (!strcmp("x86", arch)) {
+ if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Local Retire Latency";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Retire Latency";
+ } else if (!strcmp("powerpc", arch)) {
+ if (!strcmp(se_header, "Local INSTR Latency"))
+ return "Finish Cyc";
+ else if (!strcmp(se_header, "INSTR Latency"))
+ return "Global Finish_cyc";
+ else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Dispatch Cyc";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Global Dispatch_cyc";
+ }
return se_header;
}
-static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
+static void sort_dimension_add_dynamic_header(struct sort_dimension *sd, struct perf_env *env)
{
- sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
+ sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header, env);
}
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
+ DIM(SORT_TGID, "tgid", sort_tgid),
DIM(SORT_COMM, "comm", sort_comm),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_SYM, "symbol", sort_sym),
@@ -2132,6 +2601,14 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
DIM(SORT_ADDR, "addr", sort_addr),
+ DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
+ DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
+ DIM(SORT_SIMD, "simd", sort_simd),
+ DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
+ DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
+ DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
+ DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
+ DIM(SORT_PARALLELISM, "parallelism", sort_parallelism),
};
#undef DIM
@@ -2152,6 +2629,15 @@ static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
+ DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
+ "callchain_branch_predicted",
+ sort_callchain_branch_predicted),
+ DIM(SORT_CALLCHAIN_BRANCH_ABORT,
+ "callchain_branch_abort",
+ sort_callchain_branch_abort),
+ DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
+ "callchain_branch_cycles",
+ sort_callchain_branch_cycles)
};
#undef DIM
@@ -2178,21 +2664,40 @@ struct hpp_dimension {
const char *name;
struct perf_hpp_fmt *fmt;
int taken;
+ int was_taken;
+ int mem_mode;
};
#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
+#define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, }
static struct hpp_dimension hpp_sort_dimensions[] = {
DIM(PERF_HPP__OVERHEAD, "overhead"),
+ DIM(PERF_HPP__LATENCY, "latency"),
DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
+ DIM(PERF_HPP__LATENCY_ACC, "latency_children"),
DIM(PERF_HPP__SAMPLES, "sample"),
DIM(PERF_HPP__PERIOD, "period"),
+ DIM(PERF_HPP__WEIGHT1, "weight1"),
+ DIM(PERF_HPP__WEIGHT2, "weight2"),
+ DIM(PERF_HPP__WEIGHT3, "weight3"),
+ /* aliases for weight_struct */
+ DIM(PERF_HPP__WEIGHT2, "ins_lat"),
+ DIM(PERF_HPP__WEIGHT3, "retire_lat"),
+ DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
+ /* used for output only when SORT_MODE__MEM */
+ DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"),
+ DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"),
+ DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"),
+ DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"),
+ DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"),
};
+#undef DIM_MEM
#undef DIM
struct hpp_sort_entry {
@@ -2212,18 +2717,22 @@ void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
}
static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hists *hists, int line __maybe_unused,
+ struct hists *hists, int line,
int *span __maybe_unused)
{
struct hpp_sort_entry *hse;
size_t len = fmt->user_len;
+ const char *hdr = "";
+
+ if (line == hists->hpp_list->nr_header_lines - 1)
+ hdr = fmt->name;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (!len)
len = hists__col_len(hists, hse->se->se_width_idx);
- return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
+ return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr);
}
static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
@@ -2317,6 +2826,7 @@ MK_SORT_ENTRY_CHK(thread)
MK_SORT_ENTRY_CHK(comm)
MK_SORT_ENTRY_CHK(dso)
MK_SORT_ENTRY_CHK(sym)
+MK_SORT_ENTRY_CHK(parallelism)
static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
@@ -2454,9 +2964,10 @@ static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
}
static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
- struct perf_hpp_list *list)
+ struct perf_hpp_list *list,
+ int level)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
if (hse == NULL)
return -1;
@@ -2667,10 +3178,8 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
tep_read_number_field(field, a->raw_data, &dyn);
offset = dyn & 0xffff;
size = (dyn >> 16) & 0xffff;
-#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
- if (field->flags & TEP_FIELD_IS_RELATIVE)
+ if (tep_field_is_relative(field->flags))
offset += field->offset + field->size;
-#endif
/* record max width for output */
if (size > hde->dynamic_len)
hde->dynamic_len = size;
@@ -2843,7 +3352,7 @@ static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
full_name = !!strchr(event_name, ':');
evlist__for_each_entry(evlist, pos) {
/* case 2 */
- if (full_name && !strcmp(pos->name, event_name))
+ if (full_name && evsel__name_is(pos, event_name))
return pos;
/* case 3 */
if (!full_name && strstr(pos->name, event_name)) {
@@ -2879,9 +3388,8 @@ static int __dynamic_dimension__add(struct evsel *evsel,
static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
{
int ret;
- struct tep_format_field *field;
-
- field = evsel->tp_format->format.fields;
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
while (field) {
ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
@@ -2914,13 +3422,19 @@ static int add_all_matching_fields(struct evlist *evlist,
{
int ret = -ESRCH;
struct evsel *evsel;
- struct tep_format_field *field;
evlist__for_each_entry(evlist, evsel) {
+ struct tep_event *tp_format;
+ struct tep_format_field *field;
+
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
- field = tep_find_any_field(evsel->tp_format, field_name);
+ tp_format = evsel__tp_format(evsel);
+ if (tp_format == NULL)
+ continue;
+
+ field = tep_find_any_field(tp_format, field_name);
if (field == NULL)
continue;
@@ -3002,7 +3516,9 @@ static int add_dynamic_entry(struct evlist *evlist, const char *tok,
if (!strcmp(field_name, "*")) {
ret = add_evsel_fields(evsel, raw_trace, level);
} else {
- struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *field =
+ tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
if (field == NULL) {
pr_debug("Cannot find event field for %s.%s\n",
@@ -3054,17 +3570,19 @@ static int __hpp_dimension__add(struct hpp_dimension *hd,
return -1;
hd->taken = 1;
+ hd->was_taken = 1;
perf_hpp_list__register_sort_field(list, fmt);
return 0;
}
static int __sort_dimension__add_output(struct perf_hpp_list *list,
- struct sort_dimension *sd)
+ struct sort_dimension *sd,
+ int level)
{
if (sd->taken)
return 0;
- if (__sort_dimension__add_hpp_output(sd, list) < 0)
+ if (__sort_dimension__add_hpp_output(sd, list, level) < 0)
return -1;
sd->taken = 1;
@@ -3072,14 +3590,15 @@ static int __sort_dimension__add_output(struct perf_hpp_list *list,
}
static int __hpp_dimension__add_output(struct perf_hpp_list *list,
- struct hpp_dimension *hd)
+ struct hpp_dimension *hd,
+ int level)
{
struct perf_hpp_fmt *fmt;
if (hd->taken)
return 0;
- fmt = __hpp_dimension__alloc_hpp(hd, 0);
+ fmt = __hpp_dimension__alloc_hpp(hd, level);
if (!fmt)
return -1;
@@ -3088,14 +3607,19 @@ static int __hpp_dimension__add_output(struct perf_hpp_list *list,
return 0;
}
-int hpp_dimension__add_output(unsigned col)
+int hpp_dimension__add_output(unsigned col, bool implicit)
{
+ struct hpp_dimension *hd;
+
BUG_ON(col >= PERF_HPP__MAX_INDEX);
- return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
+ hd = &hpp_sort_dimensions[col];
+ if (implicit && !hd->was_taken)
+ return 0;
+ return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0);
}
int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
- struct evlist *evlist,
+ struct evlist *evlist, struct perf_env *env,
int level)
{
unsigned int i, j;
@@ -3108,7 +3632,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
*/
for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
if (!strcmp(arch_specific_sort_keys[j], tok) &&
- !arch_support_sort_key(tok)) {
+ !arch_support_sort_key(tok, env)) {
return 0;
}
}
@@ -3121,10 +3645,10 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
if (sd->name && !strcmp(dynamic_headers[j], sd->name))
- sort_dimension_add_dynamic_header(sd);
+ sort_dimension_add_dynamic_header(sd, env);
}
- if (sd->entry == &sort_parent) {
+ if (sd->entry == &sort_parent && parent_pattern) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
char err[BUFSIZ];
@@ -3153,27 +3677,26 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
list->thread = 1;
} else if (sd->entry == &sort_comm) {
list->comm = 1;
+ } else if (sd->entry == &sort_type_offset) {
+ symbol_conf.annotate_data_member = true;
}
return __sort_dimension__add(sd, list, level);
}
- for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
- struct hpp_dimension *hd = &hpp_sort_dimensions[i];
-
- if (strncasecmp(tok, hd->name, strlen(tok)))
- continue;
-
- return __hpp_dimension__add(hd, list, level);
- }
-
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
struct sort_dimension *sd = &bstack_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
- if (sort__mode != SORT_MODE__BRANCH)
+ if ((sort__mode != SORT_MODE__BRANCH) &&
+ strncasecmp(tok, "callchain_branch_predicted",
+ strlen(tok)) &&
+ strncasecmp(tok, "callchain_branch_abort",
+ strlen(tok)) &&
+ strncasecmp(tok, "callchain_branch_cycles",
+ strlen(tok)))
return -EINVAL;
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
@@ -3202,20 +3725,59 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
return 0;
}
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+
+ if (strncasecmp(tok, hd->name, strlen(tok)))
+ continue;
+
+ return __hpp_dimension__add(hd, list, level);
+ }
+
if (!add_dynamic_entry(evlist, tok, level))
return 0;
return -ESRCH;
}
+/* This should match with sort_dimension__add() above */
+static bool is_hpp_sort_key(const char *key, struct perf_env *env)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
+ if (!strcmp(arch_specific_sort_keys[i], key) &&
+ !arch_support_sort_key(key, env)) {
+ return false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+ struct sort_dimension *sd = &common_sort_dimensions[i];
+
+ if (sd->name && !strncasecmp(key, sd->name, strlen(key)))
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+
+ if (!strncasecmp(key, hd->name, strlen(key)))
+ return true;
+ }
+ return false;
+}
+
static int setup_sort_list(struct perf_hpp_list *list, char *str,
- struct evlist *evlist)
+ struct evlist *evlist, struct perf_env *env)
{
char *tmp, *tok;
int ret = 0;
int level = 0;
int next_level = 1;
+ int prev_level = 0;
bool in_group = false;
+ bool prev_was_hpp = false;
do {
tok = str;
@@ -3236,7 +3798,20 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
}
if (*tok) {
- ret = sort_dimension__add(list, tok, evlist, level);
+ if (is_hpp_sort_key(tok, env)) {
+ /* keep output (hpp) sort keys in the same level */
+ if (prev_was_hpp) {
+ bool next_same = (level == next_level);
+
+ level = prev_level;
+ next_level = next_same ? level : level+1;
+ }
+ prev_was_hpp = true;
+ } else {
+ prev_was_hpp = false;
+ }
+
+ ret = sort_dimension__add(list, tok, evlist, env, level);
if (ret == -EINVAL) {
if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
@@ -3247,6 +3822,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
ui__error("Unknown --sort key: `%s'", tok);
break;
}
+ prev_level = level;
}
level = next_level;
@@ -3342,15 +3918,29 @@ static char *setup_overhead(char *keys)
if (sort__mode == SORT_MODE__DIFF)
return keys;
- keys = prefix_if_not_in("overhead", keys);
-
- if (symbol_conf.cumulate_callchain)
- keys = prefix_if_not_in("overhead_children", keys);
+ if (symbol_conf.prefer_latency) {
+ keys = prefix_if_not_in("overhead", keys);
+ keys = prefix_if_not_in("latency", keys);
+ if (symbol_conf.cumulate_callchain) {
+ keys = prefix_if_not_in("overhead_children", keys);
+ keys = prefix_if_not_in("latency_children", keys);
+ }
+ } else if (!keys || (!strstr(keys, "overhead") &&
+ !strstr(keys, "latency"))) {
+ if (symbol_conf.enable_latency)
+ keys = prefix_if_not_in("latency", keys);
+ keys = prefix_if_not_in("overhead", keys);
+ if (symbol_conf.cumulate_callchain) {
+ if (symbol_conf.enable_latency)
+ keys = prefix_if_not_in("latency_children", keys);
+ keys = prefix_if_not_in("overhead_children", keys);
+ }
+ }
return keys;
}
-static int __setup_sorting(struct evlist *evlist)
+static int __setup_sorting(struct evlist *evlist, struct perf_env *env)
{
char *str;
const char *sort_keys;
@@ -3390,7 +3980,7 @@ static int __setup_sorting(struct evlist *evlist)
}
}
- ret = setup_sort_list(&perf_hpp_list, str, evlist);
+ ret = setup_sort_list(&perf_hpp_list, str, evlist, env);
free(str);
return ret;
@@ -3493,26 +4083,38 @@ void sort__setup_elide(FILE *output)
}
}
-int output_field_add(struct perf_hpp_list *list, char *tok)
+int output_field_add(struct perf_hpp_list *list, const char *tok, int *level)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
- struct sort_dimension *sd = &common_sort_dimensions[i];
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
- if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
+ if (strncasecmp(tok, hd->name, strlen(tok)))
+ continue;
+
+ if (!strcasecmp(tok, "weight"))
+ ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
+
+ if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY)
continue;
- return __sort_dimension__add_output(list, sd);
+ return __hpp_dimension__add_output(list, hd, *level);
}
- for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
- struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+ /*
+ * A non-output field will increase level so that it can be in a
+ * different hierarchy.
+ */
+ (*level)++;
- if (strncasecmp(tok, hd->name, strlen(tok)))
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+ struct sort_dimension *sd = &common_sort_dimensions[i];
+
+ if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __hpp_dimension__add_output(list, hd);
+ return __sort_dimension__add_output(list, sd, *level);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -3524,7 +4126,7 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
if (sort__mode != SORT_MODE__BRANCH)
return -EINVAL;
- return __sort_dimension__add_output(list, sd);
+ return __sort_dimension__add_output(list, sd, *level);
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
@@ -3536,7 +4138,7 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
if (sort__mode != SORT_MODE__MEMORY)
return -EINVAL;
- return __sort_dimension__add_output(list, sd);
+ return __sort_dimension__add_output(list, sd, *level);
}
return -ESRCH;
@@ -3546,10 +4148,11 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
{
char *tmp, *tok;
int ret = 0;
+ int level = 0;
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = output_field_add(list, tok);
+ ret = output_field_add(list, tok, &level);
if (ret == -EINVAL) {
ui__error("Invalid --fields key: `%s'", tok);
break;
@@ -3613,16 +4216,16 @@ out:
return ret;
}
-int setup_sorting(struct evlist *evlist)
+int setup_sorting(struct evlist *evlist, struct perf_env *env)
{
int err;
- err = __setup_sorting(evlist);
+ err = __setup_sorting(evlist, env);
if (err < 0)
return err;
if (parent_pattern != default_parent_pattern) {
- err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
+ err = sort_dimension__add(&perf_hpp_list, "parent", evlist, env, -1);
if (err < 0)
return err;
}
@@ -3639,6 +4242,10 @@ int setup_sorting(struct evlist *evlist)
if (err < 0)
return err;
+ err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist);
+ if (err < 0)
+ return err;
+
/* copy sort keys to output fields */
perf_hpp__setup_output_field(&perf_hpp_list);
/* and then copy output fields to sort keys */
@@ -3698,7 +4305,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int
add_key(sb, s[i].name, llen);
}
-char *sort_help(const char *prefix)
+char *sort_help(const char *prefix, enum sort_mode mode)
{
struct strbuf sb;
char *s;
@@ -3710,10 +4317,12 @@ char *sort_help(const char *prefix)
ARRAY_SIZE(hpp_sort_dimensions), &len);
add_sort_string(&sb, common_sort_dimensions,
ARRAY_SIZE(common_sort_dimensions), &len);
- add_sort_string(&sb, bstack_sort_dimensions,
- ARRAY_SIZE(bstack_sort_dimensions), &len);
- add_sort_string(&sb, memory_sort_dimensions,
- ARRAY_SIZE(memory_sort_dimensions), &len);
+ if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
s = strbuf_detach(&sb, NULL);
strbuf_release(&sb);
return s;