summaryrefslogtreecommitdiff
path: root/tools/perf/util/stat.c
diff options
context:
space:
mode:
authorJin Yao <yao.jin@linux.intel.com>2020-05-20 12:27:35 +0800
committerArnaldo Carvalho de Melo <acme@redhat.com>2020-05-28 10:03:27 -0300
commit297767ac0ce598dece202e5c9e3f22e034cb4821 (patch)
treecf2508b0b24fc09c8070ccefef3125f6275a0a38 /tools/perf/util/stat.c
parentcf4d9bd67cb1531a775599d04b9258c5e16b3fc6 (diff)
perf stat: Copy counts from prev_raw_counts to evsel->counts
It would be useful to support the overall statistics for perf-stat interval mode. For example, report the summary at the end of "perf-stat -I" output. But since perf-stat can support many aggregation modes, such as --per-thread, --per-socket, -M and etc, we need a solution which doesn't bring much complexity. The idea is to use 'evsel->prev_raw_counts' which is updated in each interval and it's saved with the latest counts. Before reporting the summary, we copy the counts from evsel->prev_raw_counts to evsel->counts, and next we just follow non-interval processing. v5: --- Don't save the previous aggr value to the member of [cpu0,thread0] in perf_counts. Originally that was a trick because the perf_stat_process_counter would create aggr values from per cpu values. But we don't need to do that all the time. We will handle it in next patch. Signed-off-by: Jin Yao <yao.jin@linux.intel.com> Reviewed-by: Jiri Olsa <jolsa@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@intel.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lore.kernel.org/lkml/20200520042737.24160-4-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/stat.c')
-rw-r--r--tools/perf/util/stat.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index e397815f0dfb..aadc723ce871 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -225,6 +225,30 @@ void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
evsel__reset_prev_raw_counts(evsel);
}
+static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel)
+{
+ int ncpus = evsel__nr_cpus(evsel);
+ int nthreads = perf_thread_map__nr(evsel->core.threads);
+
+ for (int thread = 0; thread < nthreads; thread++) {
+ for (int cpu = 0; cpu < ncpus; cpu++) {
+ *perf_counts(evsel->counts, cpu, thread) =
+ *perf_counts(evsel->prev_raw_counts, cpu,
+ thread);
+ }
+ }
+
+ evsel->counts->aggr = evsel->prev_raw_counts->aggr;
+}
+
+void perf_evlist__copy_prev_raw_counts(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ perf_evsel__copy_prev_raw_counts(evsel);
+}
+
static void zero_per_pkg(struct evsel *counter)
{
if (counter->per_pkg_mask)