diff options
Diffstat (limited to 'tools/perf/util/stat-shadow.c')
| -rw-r--r-- | tools/perf/util/stat-shadow.c | 1411 |
1 files changed, 211 insertions, 1200 deletions
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index cadb2df23c87..9c83f7d96caa 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <errno.h> #include <math.h> #include <stdio.h> #include "evsel.h" @@ -15,867 +16,99 @@ #include <linux/zalloc.h> #include "iostat.h" #include "util/hashmap.h" +#include "tool_pmu.h" -/* - * AGGR_GLOBAL: Use CPU 0 - * AGGR_SOCKET: Use first CPU of socket - * AGGR_DIE: Use first CPU of die - * AGGR_CORE: Use first CPU of core - * AGGR_NONE: Use matching CPU - * AGGR_THREAD: Not supported? - */ - -struct runtime_stat rt_stat; -struct stats walltime_nsecs_stats; -struct rusage_stats ru_stats; - -struct saved_value { - struct rb_node rb_node; - struct evsel *evsel; - enum stat_type type; - int ctx; - int map_idx; /* cpu or thread map index */ - struct cgroup *cgrp; - struct stats stats; - u64 metric_total; - int metric_other; -}; - -static int saved_value_cmp(struct rb_node *rb_node, const void *entry) -{ - struct saved_value *a = container_of(rb_node, - struct saved_value, - rb_node); - const struct saved_value *b = entry; - - if (a->map_idx != b->map_idx) - return a->map_idx - b->map_idx; - - /* - * Previously the rbtree was used to link generic metrics. - * The keys were evsel/cpu. Now the rbtree is extended to support - * per-thread shadow stats. For shadow stats case, the keys - * are cpu/type/ctx/stat (evsel is NULL). For generic metrics - * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL). - */ - if (a->type != b->type) - return a->type - b->type; - - if (a->ctx != b->ctx) - return a->ctx - b->ctx; - - if (a->cgrp != b->cgrp) - return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1; - - if (a->evsel == b->evsel) - return 0; - if ((char *)a->evsel < (char *)b->evsel) - return -1; - return +1; -} - -static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused, - const void *entry) -{ - struct saved_value *nd = malloc(sizeof(struct saved_value)); - - if (!nd) - return NULL; - memcpy(nd, entry, sizeof(struct saved_value)); - return &nd->rb_node; -} - -static void saved_value_delete(struct rblist *rblist __maybe_unused, - struct rb_node *rb_node) -{ - struct saved_value *v; - - BUG_ON(!rb_node); - v = container_of(rb_node, struct saved_value, rb_node); - free(v); -} - -static struct saved_value *saved_value_lookup(struct evsel *evsel, - int map_idx, - bool create, - enum stat_type type, - int ctx, - struct runtime_stat *st, - struct cgroup *cgrp) -{ - struct rblist *rblist; - struct rb_node *nd; - struct saved_value dm = { - .map_idx = map_idx, - .evsel = evsel, - .type = type, - .ctx = ctx, - .cgrp = cgrp, - }; - - rblist = &st->value_list; - - /* don't use context info for clock events */ - if (type == STAT_NSECS) - dm.ctx = 0; - - nd = rblist__find(rblist, &dm); - if (nd) - return container_of(nd, struct saved_value, rb_node); - if (create) { - rblist__add_node(rblist, &dm); - nd = rblist__find(rblist, &dm); - if (nd) - return container_of(nd, struct saved_value, rb_node); - } - return NULL; -} - -void runtime_stat__init(struct runtime_stat *st) -{ - struct rblist *rblist = &st->value_list; - - rblist__init(rblist); - rblist->node_cmp = saved_value_cmp; - rblist->node_new = saved_value_new; - rblist->node_delete = saved_value_delete; -} - -void runtime_stat__exit(struct runtime_stat *st) -{ - rblist__exit(&st->value_list); -} - -void perf_stat__init_shadow_stats(void) -{ - runtime_stat__init(&rt_stat); -} - -static int evsel_context(struct evsel *evsel) -{ - int ctx = 0; - - if (evsel->core.attr.exclude_kernel) - ctx |= CTX_BIT_KERNEL; - if (evsel->core.attr.exclude_user) - ctx |= CTX_BIT_USER; - if (evsel->core.attr.exclude_hv) - ctx |= CTX_BIT_HV; - if (evsel->core.attr.exclude_host) - ctx |= CTX_BIT_HOST; - if (evsel->core.attr.exclude_idle) - ctx |= CTX_BIT_IDLE; - - return ctx; -} - -static void reset_stat(struct runtime_stat *st) -{ - struct rblist *rblist; - struct rb_node *pos, *next; - - rblist = &st->value_list; - next = rb_first_cached(&rblist->entries); - while (next) { - pos = next; - next = rb_next(pos); - memset(&container_of(pos, struct saved_value, rb_node)->stats, - 0, - sizeof(struct stats)); - } -} - -void perf_stat__reset_shadow_stats(void) -{ - reset_stat(&rt_stat); - memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); - memset(&ru_stats, 0, sizeof(ru_stats)); -} - -void perf_stat__reset_shadow_per_stat(struct runtime_stat *st) -{ - reset_stat(st); -} - -struct runtime_stat_data { - int ctx; - struct cgroup *cgrp; -}; - -static void update_runtime_stat(struct runtime_stat *st, - enum stat_type type, - int map_idx, u64 count, - struct runtime_stat_data *rsd) -{ - struct saved_value *v = saved_value_lookup(NULL, map_idx, true, type, - rsd->ctx, st, rsd->cgrp); - - if (v) - update_stats(&v->stats, count); -} - -/* - * Update various tracking values we maintain to print - * more semantic information such as miss/hit ratios, - * instruction rates, etc: - */ -void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, - int map_idx, struct runtime_stat *st) -{ - u64 count_ns = count; - struct saved_value *v; - struct runtime_stat_data rsd = { - .ctx = evsel_context(counter), - .cgrp = counter->cgrp, - }; - - count *= counter->scale; - - if (evsel__is_clock(counter)) - update_runtime_stat(st, STAT_NSECS, map_idx, count_ns, &rsd); - else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_runtime_stat(st, STAT_CYCLES, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) - update_runtime_stat(st, STAT_CYCLES_IN_TX, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TRANSACTION_START)) - update_runtime_stat(st, STAT_TRANSACTION, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, ELISION_START)) - update_runtime_stat(st, STAT_ELISION, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) - update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) - update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) - update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) - update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) - update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING)) - update_runtime_stat(st, STAT_TOPDOWN_RETIRING, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC)) - update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND)) - update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND)) - update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS)) - update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT)) - update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT)) - update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT, - map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND)) - update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND, - map_idx, count, &rsd); - else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) - update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT, - map_idx, count, &rsd); - else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) - update_runtime_stat(st, STAT_STALLED_CYCLES_BACK, - map_idx, count, &rsd); - else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_runtime_stat(st, STAT_BRANCHES, map_idx, count, &rsd); - else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_runtime_stat(st, STAT_CACHEREFS, map_idx, count, &rsd); - else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) - update_runtime_stat(st, STAT_L1_DCACHE, map_idx, count, &rsd); - else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) - update_runtime_stat(st, STAT_L1_ICACHE, map_idx, count, &rsd); - else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL)) - update_runtime_stat(st, STAT_LL_CACHE, map_idx, count, &rsd); - else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) - update_runtime_stat(st, STAT_DTLB_CACHE, map_idx, count, &rsd); - else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) - update_runtime_stat(st, STAT_ITLB_CACHE, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, SMI_NUM)) - update_runtime_stat(st, STAT_SMI_NUM, map_idx, count, &rsd); - else if (perf_stat_evsel__is(counter, APERF)) - update_runtime_stat(st, STAT_APERF, map_idx, count, &rsd); - - if (counter->collect_stat) { - v = saved_value_lookup(counter, map_idx, true, STAT_NONE, 0, st, - rsd.cgrp); - update_stats(&v->stats, count); - if (counter->metric_leader) - v->metric_total += count; - } else if (counter->metric_leader) { - v = saved_value_lookup(counter->metric_leader, - map_idx, true, STAT_NONE, 0, st, rsd.cgrp); - v->metric_total += count; - v->metric_other++; - } -} - -/* used for get_ratio_color() */ -enum grc_type { - GRC_STALLED_CYCLES_FE, - GRC_STALLED_CYCLES_BE, - GRC_CACHE_MISSES, - GRC_MAX_NR -}; - -static const char *get_ratio_color(enum grc_type type, double ratio) -{ - static const double grc_table[GRC_MAX_NR][3] = { - [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, - [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, - [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, - }; - const char *color = PERF_COLOR_NORMAL; - - if (ratio > grc_table[type][0]) - color = PERF_COLOR_RED; - else if (ratio > grc_table[type][1]) - color = PERF_COLOR_MAGENTA; - else if (ratio > grc_table[type][2]) - color = PERF_COLOR_YELLOW; - - return color; -} - -static struct evsel *perf_stat__find_event(struct evlist *evsel_list, - const char *name) -{ - struct evsel *c2; - - evlist__for_each_entry (evsel_list, c2) { - if (!strcasecmp(c2->name, name) && !c2->collect_stat) - return c2; - } - return NULL; -} - -/* Mark MetricExpr target events and link events using them to them. */ -void perf_stat__collect_metric_expr(struct evlist *evsel_list) +static bool tool_pmu__is_time_event(const struct perf_stat_config *config, + const struct evsel *evsel, int *tool_aggr_idx) { - struct evsel *counter, *leader, **metric_events, *oc; - bool found; - struct expr_parse_ctx *ctx; - struct hashmap_entry *cur; - size_t bkt; - int i; - - ctx = expr__ctx_new(); - if (!ctx) { - pr_debug("expr__ctx_new failed"); - return; - } - evlist__for_each_entry(evsel_list, counter) { - bool invalid = false; - - leader = evsel__leader(counter); - if (!counter->metric_expr) - continue; + enum tool_pmu_event event = evsel__tool_event(evsel); + int aggr_idx; - expr__ctx_clear(ctx); - metric_events = counter->metric_events; - if (!metric_events) { - if (expr__find_ids(counter->metric_expr, - counter->name, - ctx) < 0) - continue; + if (event != TOOL_PMU__EVENT_DURATION_TIME && + event != TOOL_PMU__EVENT_USER_TIME && + event != TOOL_PMU__EVENT_SYSTEM_TIME) + return false; - metric_events = calloc(sizeof(struct evsel *), - hashmap__size(ctx->ids) + 1); - if (!metric_events) { - expr__ctx_free(ctx); - return; + if (config) { + cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) { + if (config->aggr_map->map[aggr_idx].cpu.cpu == 0) { + *tool_aggr_idx = aggr_idx; + return true; } - counter->metric_events = metric_events; - } - - i = 0; - hashmap__for_each_entry(ctx->ids, cur, bkt) { - const char *metric_name = cur->pkey; - - found = false; - if (leader) { - /* Search in group */ - for_each_group_member (oc, leader) { - if (!strcasecmp(oc->name, - metric_name) && - !oc->collect_stat) { - found = true; - break; - } - } - } - if (!found) { - /* Search ignoring groups */ - oc = perf_stat__find_event(evsel_list, - metric_name); - } - if (!oc) { - /* Deduping one is good enough to handle duplicated PMUs. */ - static char *printed; - - /* - * Adding events automatically would be difficult, because - * it would risk creating groups that are not schedulable. - * perf stat doesn't understand all the scheduling constraints - * of events. So we ask the user instead to add the missing - * events. - */ - if (!printed || - strcasecmp(printed, metric_name)) { - fprintf(stderr, - "Add %s event to groups to get metric expression for %s\n", - metric_name, - counter->name); - free(printed); - printed = strdup(metric_name); - } - invalid = true; - continue; - } - metric_events[i++] = oc; - oc->collect_stat = true; - } - metric_events[i] = NULL; - if (invalid) { - free(metric_events); - counter->metric_events = NULL; - counter->metric_expr = NULL; } + pr_debug("Unexpected CPU0 missing in aggregation for tool event.\n"); } - expr__ctx_free(ctx); -} - -static double runtime_stat_avg(struct runtime_stat *st, - enum stat_type type, int map_idx, - struct runtime_stat_data *rsd) -{ - struct saved_value *v; - - v = saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->cgrp); - if (!v) - return 0.0; - - return avg_stats(&v->stats); -} - -static double runtime_stat_n(struct runtime_stat *st, - enum stat_type type, int map_idx, - struct runtime_stat_data *rsd) -{ - struct saved_value *v; - - v = saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->cgrp); - if (!v) - return 0.0; - - return v->stats.n; -} - -static void print_stalled_cycles_frontend(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); - - if (ratio) - out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle", - ratio); - else - out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0); -} - -static void print_stalled_cycles_backend(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); - - out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio); -} - -static void print_branch_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_BRANCHES, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio); -} - -static void print_l1_dcache_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_L1_DCACHE, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio); -} - -static void print_l1_icache_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_L1_ICACHE, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio); -} - -static void print_dtlb_cache_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_DTLB_CACHE, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio); -} - -static void print_itlb_cache_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_ITLB_CACHE, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio); -} - -static void print_ll_cache_misses(struct perf_stat_config *config, - int map_idx, double avg, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double total, ratio = 0.0; - const char *color; - - total = runtime_stat_avg(st, STAT_LL_CACHE, map_idx, rsd); - - if (total) - ratio = avg / total * 100.0; - - color = get_ratio_color(GRC_CACHE_MISSES, ratio); - out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio); -} - -/* - * High level "TopDown" CPU core pipe line bottleneck break down. - * - * Basic concept following - * Yasin, A Top Down Method for Performance analysis and Counter architecture - * ISPASS14 - * - * The CPU pipeline is divided into 4 areas that can be bottlenecks: - * - * Frontend -> Backend -> Retiring - * BadSpeculation in addition means out of order execution that is thrown away - * (for example branch mispredictions) - * Frontend is instruction decoding. - * Backend is execution, like computation and accessing data in memory - * Retiring is good execution that is not directly bottlenecked - * - * The formulas are computed in slots. - * A slot is an entry in the pipeline each for the pipeline width - * (for example a 4-wide pipeline has 4 slots for each cycle) - * - * Formulas: - * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) / - * TotalSlots - * Retiring = SlotsRetired / TotalSlots - * FrontendBound = FetchBubbles / TotalSlots - * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound - * - * The kernel provides the mapping to the low level CPU events and any scaling - * needed for the CPU pipeline width, for example: - * - * TotalSlots = Cycles * 4 - * - * The scaling factor is communicated in the sysfs unit. - * - * In some cases the CPU may not be able to measure all the formulas due to - * missing events. In this case multiple formulas are combined, as possible. - * - * Full TopDown supports more levels to sub-divide each area: for example - * BackendBound into computing bound and memory bound. For now we only - * support Level 1 TopDown. - */ - -static double sanitize_val(double x) -{ - if (x < 0 && x >= -0.02) - return 0.0; - return x; -} - -static double td_total_slots(int map_idx, struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, map_idx, rsd); -} - -static double td_bad_spec(int map_idx, struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double bad_spec = 0; - double total_slots; - double total; - - total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, map_idx, rsd) - - runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, map_idx, rsd) + - runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, map_idx, rsd); - - total_slots = td_total_slots(map_idx, st, rsd); - if (total_slots) - bad_spec = total / total_slots; - return sanitize_val(bad_spec); + *tool_aggr_idx = 0; /* Assume the first aggregation index works. */ + return true; } -static double td_retiring(int map_idx, struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double retiring = 0; - double total_slots = td_total_slots(map_idx, st, rsd); - double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, - map_idx, rsd); - - if (total_slots) - retiring = ret_slots / total_slots; - return retiring; -} - -static double td_fe_bound(int map_idx, struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double fe_bound = 0; - double total_slots = td_total_slots(map_idx, st, rsd); - double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES, - map_idx, rsd); - - if (total_slots) - fe_bound = fetch_bub / total_slots; - return fe_bound; -} - -static double td_be_bound(int map_idx, struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double sum = (td_fe_bound(map_idx, st, rsd) + - td_bad_spec(map_idx, st, rsd) + - td_retiring(map_idx, st, rsd)); - if (sum == 0) - return 0; - return sanitize_val(1.0 - sum); -} - -/* - * Kernel reports metrics multiplied with slots. To get back - * the ratios we need to recreate the sum. - */ - -static double td_metric_ratio(int map_idx, enum stat_type type, - struct runtime_stat *stat, - struct runtime_stat_data *rsd) -{ - double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd); - double d = runtime_stat_avg(stat, type, map_idx, rsd); - - if (sum) - return d / sum; - return 0; -} - -/* - * ... but only if most of the values are actually available. - * We allow two missing. - */ - -static bool full_td(int map_idx, struct runtime_stat *stat, - struct runtime_stat_data *rsd) -{ - int c = 0; - - if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd) > 0) - c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) > 0) - c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) > 0) - c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd) > 0) - c++; - return c >= 2; -} - -static void print_smi_cost(struct perf_stat_config *config, int map_idx, - struct perf_stat_output_ctx *out, - struct runtime_stat *st, - struct runtime_stat_data *rsd) -{ - double smi_num, aperf, cycles, cost = 0.0; - const char *color = NULL; - - smi_num = runtime_stat_avg(st, STAT_SMI_NUM, map_idx, rsd); - aperf = runtime_stat_avg(st, STAT_APERF, map_idx, rsd); - cycles = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); - - if ((cycles == 0) || (aperf == 0)) - return; - - if (smi_num) - cost = (aperf - cycles) / aperf * 100.00; - - if (cost > 10) - color = PERF_COLOR_RED; - out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost); - out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num); -} - -static int prepare_metric(struct evsel **metric_events, - struct metric_ref *metric_refs, +static int prepare_metric(struct perf_stat_config *config, + const struct metric_expr *mexp, + const struct evsel *evsel, struct expr_parse_ctx *pctx, - int map_idx, - struct runtime_stat *st) + int aggr_idx) { - double scale; - char *n; - int i, j, ret; + struct evsel * const *metric_events = mexp->metric_events; + struct metric_ref *metric_refs = mexp->metric_refs; + int i; for (i = 0; metric_events[i]; i++) { - struct saved_value *v; - struct stats *stats; - u64 metric_total = 0; - int source_count; - - if (evsel__is_tool(metric_events[i])) { - source_count = 1; - switch (metric_events[i]->tool_event) { - case PERF_TOOL_DURATION_TIME: - stats = &walltime_nsecs_stats; - scale = 1e-9; - break; - case PERF_TOOL_USER_TIME: - stats = &ru_stats.ru_utime_usec_stat; - scale = 1e-6; + int source_count = 0, tool_aggr_idx; + bool is_tool_time = + tool_pmu__is_time_event(config, metric_events[i], &tool_aggr_idx); + struct perf_stat_evsel *ps = metric_events[i]->stats; + struct perf_stat_aggr *aggr; + char *n; + double val; + + /* + * If there are multiple uncore PMUs and we're not reading the + * leader's stats, determine the stats for the appropriate + * uncore PMU. + */ + if (evsel && evsel->metric_leader && + evsel->pmu != evsel->metric_leader->pmu && + mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) { + struct evsel *pos; + + evlist__for_each_entry(evsel->evlist, pos) { + if (pos->pmu != evsel->pmu) + continue; + if (pos->metric_leader != mexp->metric_events[i]) + continue; + ps = pos->stats; + source_count = 1; break; - case PERF_TOOL_SYSTEM_TIME: - stats = &ru_stats.ru_stime_usec_stat; - scale = 1e-6; - break; - case PERF_TOOL_NONE: - pr_err("Invalid tool event 'none'"); - abort(); - case PERF_TOOL_MAX: - pr_err("Invalid tool event 'max'"); - abort(); - default: - pr_err("Unknown tool event '%s'", evsel__name(metric_events[i])); - abort(); } - } else { - v = saved_value_lookup(metric_events[i], map_idx, false, - STAT_NONE, 0, st, - metric_events[i]->cgrp); - if (!v) - break; - stats = &v->stats; + } + /* Time events are always on CPU0, the first aggregation index. */ + aggr = &ps->aggr[is_tool_time ? tool_aggr_idx : aggr_idx]; + if (!aggr || !metric_events[i]->supported) { /* - * If an event was scaled during stat gathering, reverse - * the scale before computing the metric. + * Not supported events will have a count of 0, which + * can be confusing in a metric. Explicitly set the + * value to NAN. Not counted events (enable time of 0) + * are read as 0. */ - scale = 1.0 / metric_events[i]->scale; - - source_count = evsel__source_count(metric_events[i]); - - if (v->metric_other) - metric_total = v->metric_total * scale; + val = NAN; + source_count = 0; + } else { + val = aggr->counts.val; + if (is_tool_time) + val *= 1e-9; /* Convert time event nanoseconds to seconds. */ + if (!source_count) + source_count = evsel__source_count(metric_events[i]); } n = strdup(evsel__metric_id(metric_events[i])); if (!n) return -ENOMEM; - expr__add_id_val_source_count(pctx, n, - metric_total ? : avg_stats(stats) * scale, - source_count); + expr__add_id_val_source_count(pctx, n, val, source_count); } - for (j = 0; metric_refs && metric_refs[j].metric_name; j++) { - ret = expr__add_ref(pctx, &metric_refs[j]); + for (int j = 0; metric_refs && metric_refs[j].metric_name; j++) { + int ret = expr__add_ref(pctx, &metric_refs[j]); + if (ret) return ret; } @@ -884,22 +117,23 @@ static int prepare_metric(struct evsel **metric_events, } static void generic_metric(struct perf_stat_config *config, - const char *metric_expr, - struct evsel **metric_events, - struct metric_ref *metric_refs, - char *name, - const char *metric_name, - const char *metric_unit, - int runtime, - int map_idx, - struct perf_stat_output_ctx *out, - struct runtime_stat *st) + struct metric_expr *mexp, + struct evsel *evsel, + int aggr_idx, + struct perf_stat_output_ctx *out) { print_metric_t print_metric = out->print_metric; + const char *metric_name = mexp->metric_name; + const char *metric_expr = mexp->metric_expr; + const char *metric_threshold = mexp->metric_threshold; + const char *metric_unit = mexp->metric_unit; + struct evsel * const *metric_events = mexp->metric_events; + int runtime = mexp->runtime; struct expr_parse_ctx *pctx; - double ratio, scale; + double ratio, scale, threshold; int i; void *ctxp = out->ctx; + enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN; pctx = expr__ctx_new(); if (!pctx) @@ -909,7 +143,7 @@ static void generic_metric(struct perf_stat_config *config, pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list); pctx->sctx.runtime = runtime; pctx->sctx.system_wide = config->system_wide; - i = prepare_metric(metric_events, metric_refs, pctx, map_idx, st); + i = prepare_metric(config, mexp, evsel, pctx, aggr_idx); if (i < 0) { expr__ctx_free(pctx); return; @@ -917,7 +151,14 @@ static void generic_metric(struct perf_stat_config *config, if (!metric_events[i]) { if (expr__parse(&ratio, pctx, metric_expr) == 0) { char *unit; - char metric_bf[64]; + char metric_bf[128]; + + if (metric_threshold && + expr__parse(&threshold, pctx, metric_threshold) == 0 && + !isnan(threshold)) { + thresh = fpclassify(threshold) == FP_ZERO + ? METRIC_THRESHOLD_GOOD : METRIC_THRESHOLD_BAD; + } if (metric_unit && metric_name) { if (perf_pmu__convert_scale(metric_unit, @@ -931,30 +172,30 @@ static void generic_metric(struct perf_stat_config *config, scnprintf(metric_bf, sizeof(metric_bf), "%s %s", unit, metric_name); - print_metric(config, ctxp, NULL, "%8.1f", + print_metric(config, ctxp, thresh, "%8.1f", metric_bf, ratio); } else { - print_metric(config, ctxp, NULL, "%8.2f", + print_metric(config, ctxp, thresh, "%8.2f", metric_name ? metric_name : - out->force_header ? name : "", + out->force_header ? evsel->name : "", ratio); } } else { - print_metric(config, ctxp, NULL, NULL, + print_metric(config, ctxp, thresh, /*fmt=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } } else { - print_metric(config, ctxp, NULL, NULL, + print_metric(config, ctxp, thresh, /*fmt=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } expr__ctx_free(pctx); } -double test_generic_metric(struct metric_expr *mexp, int map_idx, struct runtime_stat *st) +double test_generic_metric(struct metric_expr *mexp, int aggr_idx) { struct expr_parse_ctx *pctx; double ratio = 0.0; @@ -963,7 +204,7 @@ double test_generic_metric(struct metric_expr *mexp, int map_idx, struct runtime if (!pctx) return NAN; - if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, map_idx, st) < 0) + if (prepare_metric(/*config=*/NULL, mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0) goto out; if (expr__parse(&ratio, pctx, mexp->metric_expr)) @@ -974,364 +215,134 @@ out: return ratio; } -void perf_stat__print_shadow_stats(struct perf_stat_config *config, - struct evsel *evsel, - double avg, int map_idx, - struct perf_stat_output_ctx *out, - struct rblist *metric_events, - struct runtime_stat *st) +static void perf_stat__print_metricgroup_header(struct perf_stat_config *config, + struct evsel *evsel, + void *ctxp, + const char *name, + struct perf_stat_output_ctx *out) { - void *ctxp = out->ctx; - print_metric_t print_metric = out->print_metric; - double total, ratio = 0.0, total2; - const char *color = NULL; - struct runtime_stat_data rsd = { - .ctx = evsel_context(evsel), - .cgrp = evsel->cgrp, - }; - struct metric_event *me; - int num = 1; - - if (config->iostat_run) { - iostat_print_metric(config, evsel, out); - } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { - total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); + bool need_full_name = perf_pmus__num_core_pmus() > 1; + static const char *last_name; + static const struct perf_pmu *last_pmu; + char full_name[64]; - if (total) { - ratio = avg / total; - print_metric(config, ctxp, NULL, "%7.2f ", - "insn per cycle", ratio); - } else { - print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0); - } - - total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, map_idx, &rsd); - - total = max(total, runtime_stat_avg(st, - STAT_STALLED_CYCLES_BACK, - map_idx, &rsd)); - - if (total && avg) { - out->new_line(config, ctxp); - ratio = total / avg; - print_metric(config, ctxp, NULL, "%7.2f ", - "stalled cycles per insn", - ratio); - } - } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { - if (runtime_stat_n(st, STAT_BRANCHES, map_idx, &rsd) != 0) - print_branch_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all branches", 0); - } else if ( - evsel->core.attr.type == PERF_TYPE_HW_CACHE && - evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1D | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { - - if (runtime_stat_n(st, STAT_L1_DCACHE, map_idx, &rsd) != 0) - print_l1_dcache_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0); - } else if ( - evsel->core.attr.type == PERF_TYPE_HW_CACHE && - evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1I | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { + /* + * A metricgroup may have several metric events, + * e.g.,TopdownL1 on e-core of ADL. + * The name has been output by the first metric + * event. Only align with other metics from + * different metric events. + */ + if (last_name && !strcmp(last_name, name) && last_pmu == evsel->pmu) { + out->print_metricgroup_header(config, ctxp, NULL); + return; + } - if (runtime_stat_n(st, STAT_L1_ICACHE, map_idx, &rsd) != 0) - print_l1_icache_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0); - } else if ( - evsel->core.attr.type == PERF_TYPE_HW_CACHE && - evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_DTLB | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { + if (need_full_name && evsel->pmu) + scnprintf(full_name, sizeof(full_name), "%s (%s)", name, evsel->pmu->name); + else + scnprintf(full_name, sizeof(full_name), "%s", name); - if (runtime_stat_n(st, STAT_DTLB_CACHE, map_idx, &rsd) != 0) - print_dtlb_cache_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0); - } else if ( - evsel->core.attr.type == PERF_TYPE_HW_CACHE && - evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_ITLB | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { + out->print_metricgroup_header(config, ctxp, full_name); - if (runtime_stat_n(st, STAT_ITLB_CACHE, map_idx, &rsd) != 0) - print_itlb_cache_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0); - } else if ( - evsel->core.attr.type == PERF_TYPE_HW_CACHE && - evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_LL | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { + last_name = name; + last_pmu = evsel->pmu; +} - if (runtime_stat_n(st, STAT_LL_CACHE, map_idx, &rsd) != 0) - print_ll_cache_misses(config, map_idx, avg, out, st, &rsd); - else - print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0); - } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { - total = runtime_stat_avg(st, STAT_CACHEREFS, map_idx, &rsd); +/** + * perf_stat__print_shadow_stats_metricgroup - Print out metrics associated with the evsel + * For the non-default, all metrics associated + * with the evsel are printed. + * For the default mode, only the metrics from + * the same metricgroup and the name of the + * metricgroup are printed. To print the metrics + * from the next metricgroup (if available), + * invoke the function with correspoinding + * metric_expr. + */ +void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config, + struct evsel *evsel, + int aggr_idx, + int *num, + void *from, + struct perf_stat_output_ctx *out) +{ + struct metric_event *me; + struct metric_expr *mexp = from; + void *ctxp = out->ctx; + bool header_printed = false; + const char *name = NULL; + struct rblist *metric_events = &evsel->evlist->metric_events; - if (total) - ratio = avg * 100 / total; + me = metricgroup__lookup(metric_events, evsel, false); + if (me == NULL) + return NULL; - if (runtime_stat_n(st, STAT_CACHEREFS, map_idx, &rsd) != 0) - print_metric(config, ctxp, NULL, "%8.3f %%", - "of all cache refs", ratio); - else - print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0); - } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { - print_stalled_cycles_frontend(config, map_idx, avg, out, st, &rsd); - } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles_backend(config, map_idx, avg, out, st, &rsd); - } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { - total = runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd); + if (!mexp) + mexp = list_first_entry(&me->head, typeof(*mexp), nd); - if (total) { - ratio = avg / total; - print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio); - } else { - print_metric(config, ctxp, NULL, NULL, "Ghz", 0); + list_for_each_entry_from(mexp, &me->head, nd) { + /* Print the display name of the Default metricgroup */ + if (!config->metric_only && me->is_default) { + if (!name) + name = mexp->default_metricgroup_name; + /* + * Two or more metricgroup may share the same metric + * event, e.g., TopdownL1 and TopdownL2 on SPR. + * Return and print the prefix, e.g., noise, running + * for the next metricgroup. + */ + if (strcmp(name, mexp->default_metricgroup_name)) + return (void *)mexp; + /* Only print the name of the metricgroup once */ + if (!header_printed && !evsel->default_show_events) { + header_printed = true; + perf_stat__print_metricgroup_header(config, evsel, ctxp, + name, out); + } } - } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { - total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); - - if (total) - print_metric(config, ctxp, NULL, - "%7.2f%%", "transactional cycles", - 100.0 * (avg / total)); - else - print_metric(config, ctxp, NULL, NULL, "transactional cycles", - 0); - } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { - total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); - total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); - - if (total2 < avg) - total2 = avg; - if (total) - print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles", - 100.0 * ((total2-avg) / total)); - else - print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0); - } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { - total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); - - if (avg) - ratio = total / avg; - - if (runtime_stat_n(st, STAT_CYCLES_IN_TX, map_idx, &rsd) != 0) - print_metric(config, ctxp, NULL, "%8.0f", - "cycles / transaction", ratio); - else - print_metric(config, ctxp, NULL, NULL, "cycles / transaction", - 0); - } else if (perf_stat_evsel__is(evsel, ELISION_START)) { - total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); - - if (avg) - ratio = total / avg; - - print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio); - } else if (evsel__is_clock(evsel)) { - if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) - print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized", - avg / (ratio * evsel->scale)); - else - print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { - double fe_bound = td_fe_bound(map_idx, st, &rsd); - - if (fe_bound > 0.2) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", - fe_bound * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { - double retiring = td_retiring(map_idx, st, &rsd); - - if (retiring > 0.7) - color = PERF_COLOR_GREEN; - print_metric(config, ctxp, color, "%8.1f%%", "retiring", - retiring * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { - double bad_spec = td_bad_spec(map_idx, st, &rsd); - - if (bad_spec > 0.1) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", - bad_spec * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { - double be_bound = td_be_bound(map_idx, st, &rsd); - const char *name = "backend bound"; - static int have_recovery_bubbles = -1; - - /* In case the CPU does not support topdown-recovery-bubbles */ - if (have_recovery_bubbles < 0) - have_recovery_bubbles = pmu_have_event("cpu", - "topdown-recovery-bubbles"); - if (!have_recovery_bubbles) - name = "backend bound/bad spec"; - - if (be_bound > 0.2) - color = PERF_COLOR_RED; - if (td_total_slots(map_idx, st, &rsd) > 0) - print_metric(config, ctxp, color, "%8.1f%%", name, - be_bound * 100.); - else - print_metric(config, ctxp, NULL, NULL, name, 0); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) && - full_td(map_idx, st, &rsd)) { - double retiring = td_metric_ratio(map_idx, - STAT_TOPDOWN_RETIRING, st, - &rsd); - if (retiring > 0.7) - color = PERF_COLOR_GREEN; - print_metric(config, ctxp, color, "%8.1f%%", "Retiring", - retiring * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) && - full_td(map_idx, st, &rsd)) { - double fe_bound = td_metric_ratio(map_idx, - STAT_TOPDOWN_FE_BOUND, st, - &rsd); - if (fe_bound > 0.2) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound", - fe_bound * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) && - full_td(map_idx, st, &rsd)) { - double be_bound = td_metric_ratio(map_idx, - STAT_TOPDOWN_BE_BOUND, st, - &rsd); - if (be_bound > 0.2) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound", - be_bound * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) && - full_td(map_idx, st, &rsd)) { - double bad_spec = td_metric_ratio(map_idx, - STAT_TOPDOWN_BAD_SPEC, st, - &rsd); - if (bad_spec > 0.1) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation", - bad_spec * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) && - full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { - double retiring = td_metric_ratio(map_idx, - STAT_TOPDOWN_RETIRING, st, - &rsd); - double heavy_ops = td_metric_ratio(map_idx, - STAT_TOPDOWN_HEAVY_OPS, st, - &rsd); - double light_ops = retiring - heavy_ops; - if (retiring > 0.7 && heavy_ops > 0.1) - color = PERF_COLOR_GREEN; - print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations", - heavy_ops * 100.); - if (retiring > 0.7 && light_ops > 0.6) - color = PERF_COLOR_GREEN; - else - color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "Light Operations", - light_ops * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) && - full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { - double bad_spec = td_metric_ratio(map_idx, - STAT_TOPDOWN_BAD_SPEC, st, - &rsd); - double br_mis = td_metric_ratio(map_idx, - STAT_TOPDOWN_BR_MISPREDICT, st, - &rsd); - double m_clears = bad_spec - br_mis; + if ((*num)++ > 0 && out->new_line) + out->new_line(config, ctxp); + generic_metric(config, mexp, evsel, aggr_idx, out); + } - if (bad_spec > 0.1 && br_mis > 0.05) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict", - br_mis * 100.); - if (bad_spec > 0.1 && m_clears > 0.05) - color = PERF_COLOR_RED; - else - color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears", - m_clears * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) && - full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { - double fe_bound = td_metric_ratio(map_idx, - STAT_TOPDOWN_FE_BOUND, st, - &rsd); - double fetch_lat = td_metric_ratio(map_idx, - STAT_TOPDOWN_FETCH_LAT, st, - &rsd); - double fetch_bw = fe_bound - fetch_lat; + return NULL; +} - if (fe_bound > 0.2 && fetch_lat > 0.15) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency", - fetch_lat * 100.); - if (fe_bound > 0.2 && fetch_bw > 0.1) - color = PERF_COLOR_RED; - else - color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth", - fetch_bw * 100.); - } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) && - full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { - double be_bound = td_metric_ratio(map_idx, - STAT_TOPDOWN_BE_BOUND, st, - &rsd); - double mem_bound = td_metric_ratio(map_idx, - STAT_TOPDOWN_MEM_BOUND, st, - &rsd); - double core_bound = be_bound - mem_bound; +void perf_stat__print_shadow_stats(struct perf_stat_config *config, + struct evsel *evsel, + int aggr_idx, + struct perf_stat_output_ctx *out) +{ + print_metric_t print_metric = out->print_metric; + void *ctxp = out->ctx; + int num = 0; - if (be_bound > 0.2 && mem_bound > 0.2) - color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound", - mem_bound * 100.); - if (be_bound > 0.2 && core_bound > 0.1) - color = PERF_COLOR_RED; - else - color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "Core Bound", - core_bound * 100.); - } else if (evsel->metric_expr) { - generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL, - evsel->name, evsel->metric_name, NULL, 1, - map_idx, out, st); - } else if (runtime_stat_n(st, STAT_NSECS, map_idx, &rsd) != 0) { - char unit = ' '; - char unit_buf[10] = "/sec"; + if (config->iostat_run) + iostat_print_metric(config, evsel, out); - total = runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd); - if (total) - ratio = convert_unit_double(1000000000.0 * avg / total, &unit); + perf_stat__print_shadow_stats_metricgroup(config, evsel, aggr_idx, + &num, NULL, out); - if (unit != ' ') - snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); - print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio); - } else if (perf_stat_evsel__is(evsel, SMI_NUM)) { - print_smi_cost(config, map_idx, out, st, &rsd); - } else { - num = 0; + if (num == 0) { + print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, + /*fmt=*/NULL, /*unit=*/NULL, 0); } +} + +/** + * perf_stat__skip_metric_event - Skip the evsel in the Default metricgroup, + * if it's not running or not the metric event. + */ +bool perf_stat__skip_metric_event(struct evsel *evsel, + u64 ena, u64 run) +{ + if (!evsel->default_metricgroup) + return false; - if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) { - struct metric_expr *mexp; + if (!ena || !run) + return true; - list_for_each_entry (mexp, &me->head, nd) { - if (num++ > 0) - out->new_line(config, ctxp); - generic_metric(config, mexp->metric_expr, mexp->metric_events, - mexp->metric_refs, evsel->name, mexp->metric_name, - mexp->metric_unit, mexp->runtime, - map_idx, out, st); - } - } - if (num == 0) - print_metric(config, ctxp, NULL, NULL, NULL, 0); + return !metricgroup__lookup(&evsel->evlist->metric_events, evsel, false); } |
