diff options
Diffstat (limited to 'tools/perf/arch/x86/util/intel-pt.c')
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 45 |
1 files changed, 36 insertions, 9 deletions
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d199619df3ab..add33cb5d1da 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -19,6 +19,7 @@ #include "../../../util/evlist.h" #include "../../../util/evsel.h" #include "../../../util/evsel_config.h" +#include "../../../util/config.h" #include "../../../util/cpumap.h" #include "../../../util/mmap.h" #include <subcmd/parse-options.h> @@ -32,6 +33,7 @@ #include "../../../util/tsc.h" #include <internal/lib.h> // page_size #include "../../../util/intel-pt.h" +#include <api/fs/fs.h> #define KiB(x) ((x) * 1024) #define MiB(x) ((x) * 1024 * 1024) @@ -51,6 +53,7 @@ struct intel_pt_recording { struct perf_pmu *intel_pt_pmu; int have_sched_switch; struct evlist *evlist; + bool all_switch_events; bool snapshot_mode; bool snapshot_init_done; size_t snapshot_size; @@ -74,7 +77,8 @@ static int intel_pt_parse_terms_with_default(const struct perf_pmu *pmu, goto out_free; attr.config = *config; - err = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/true, /*err=*/NULL); + err = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/true, /*apply_hardcoded=*/false, + /*err=*/NULL); if (err) goto out_free; @@ -369,7 +373,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); + per_cpu_mmaps = !perf_cpu_map__is_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -428,6 +432,16 @@ static int intel_pt_track_switches(struct evlist *evlist) } #endif +static bool intel_pt_exclude_guest(void) +{ + int pt_mode; + + if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode)) + pt_mode = 0; + + return pt_mode == 1; +} + static void intel_pt_valid_str(char *str, size_t len, u64 valid) { unsigned int val, last = 0, state = 1; @@ -620,6 +634,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->core.attr.exclude_guest = intel_pt_exclude_guest(); evsel->no_aux_samples = true; evsel->needs_auxtrace_mmap = true; intel_pt_evsel = evsel; @@ -758,7 +773,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) { - u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4; + size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4; + u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw; intel_pt_evsel->core.attr.aux_watermark = aux_watermark; } @@ -774,13 +790,13 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (have_timing_info && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !record_opts__no_switch_events(opts)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && !target__has_task(&opts->target); - if (!cpu_wide && perf_can_record_cpu_wide()) { + if (ptr->all_switch_events && !cpu_wide && perf_can_record_cpu_wide()) { struct evsel *switch_evsel; switch_evsel = evlist__add_dummy_on_all_cpus(evlist); @@ -832,7 +848,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_pt_evsel, CPU); } @@ -858,7 +874,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ evsel__set_sample_bit(tracking_evsel, CPU); @@ -870,7 +886,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only). */ - if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !target__none(&opts->target) && !intel_pt_evsel->core.attr.exclude_user) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); @@ -1164,6 +1180,16 @@ static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused) return rdtsc(); } +static int intel_pt_perf_config(const char *var, const char *value, void *data) +{ + struct intel_pt_recording *ptr = data; + + if (!strcmp(var, "intel-pt.all-switch-events")) + ptr->all_switch_events = perf_config_bool(var, value); + + return 0; +} + struct auxtrace_record *intel_pt_recording_init(int *err) { struct perf_pmu *intel_pt_pmu = perf_pmus__find(INTEL_PT_PMU_NAME); @@ -1183,8 +1209,9 @@ struct auxtrace_record *intel_pt_recording_init(int *err) return NULL; } + perf_config(intel_pt_perf_config, ptr); + ptr->intel_pt_pmu = intel_pt_pmu; - ptr->itr.pmu = intel_pt_pmu; ptr->itr.recording_options = intel_pt_recording_options; ptr->itr.info_priv_size = intel_pt_info_priv_size; ptr->itr.info_fill = intel_pt_info_fill; |