From f2b91386ffe66dba0860c03c9dec1c6b45c2daba Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 27 Oct 2021 11:03:31 +0300 Subject: perf intel-pt: Support itrace A option to approximate IPC Normally, for cycle-acccurate mode, IPC values are an exact number of instructions and cycles. Due to the granularity of timestamps, that happens only when a CYC packet correlates to the event. Support the itrace 'A' option, to use instead, the number of cycles associated with the current timestamp. This provides IPC information for every change of timestamp, but at the expense of accuracy. Due to the granularity of timestamps, the actual number of cycles increases even though the cycles reported does not. The number of instructions is known, but if IPC is reported, cycles can be too low and so IPC is too high. Note that inaccuracy decreases as the period of sampling increases i.e. if the number of cycles is too low by a small amount, that becomes less significant if the number of cycles is large. Furthermore, it can be used in conjunction with dlfilter-show-cycles.so to provide higher granularity cycle information. Reviewed-by: Andi Kleen Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: https://lore.kernel.org/r/20211027080334.365596-4-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/intel-pt.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'tools/perf/util/intel-pt.c') diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index c9542fada8fb..0ee5005e9837 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -172,6 +172,7 @@ struct intel_pt_queue { bool step_through_buffers; bool use_buffer_pid_tid; bool sync_switch; + bool sample_ipc; pid_t pid, tid; int cpu; int switch_state; @@ -1581,7 +1582,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) sample.branch_stack = (struct branch_stack *)&dummy_bs; } - if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) + if (ptq->sample_ipc) sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; if (sample.cyc_cnt) { sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; @@ -1632,7 +1633,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) else sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; - if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) + if (ptq->sample_ipc) sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; if (sample.cyc_cnt) { sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; @@ -2245,8 +2246,15 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) ptq->have_sample = false; - ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; - ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; + if (pt->synth_opts.approx_ipc) { + ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; + ptq->ipc_cyc_cnt = ptq->state->cycles; + ptq->sample_ipc = true; + } else { + ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; + ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; + ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC; + } /* * Do PEBS first to allow for the possibility that the PEBS timestamp -- cgit