summaryrefslogtreecommitdiff
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c335
1 files changed, 260 insertions, 75 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1974395492d7..d264c143b592 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -48,6 +48,7 @@
#include "record.h"
#include "debug.h"
#include "trace-event.h"
+#include "session.h"
#include "stat.h"
#include "string2.h"
#include "memswap.h"
@@ -56,8 +57,10 @@
#include "off_cpu.h"
#include "pmu.h"
#include "pmus.h"
+#include "drm_pmu.h"
#include "hwmon_pmu.h"
#include "tool_pmu.h"
+#include "tp_pmu.h"
#include "rlimit.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
@@ -487,7 +490,7 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
return NULL;
evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
- evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
+ evsel->core.pmu_cpus = perf_cpu_map__get(orig->core.pmu_cpus);
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
@@ -552,11 +555,11 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
evsel->exclude_GH = orig->exclude_GH;
evsel->sample_read = orig->sample_read;
- evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
evsel->use_config_name = orig->use_config_name;
evsel->pmu = orig->pmu;
+ evsel->first_wildcard_match = orig->first_wildcard_match;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
@@ -570,24 +573,6 @@ out_err:
return NULL;
}
-static int trace_event__id(const char *sys, const char *name)
-{
- char *tp_dir = get_events_file(sys);
- char path[PATH_MAX];
- int id, err;
-
- if (!tp_dir)
- return -1;
-
- scnprintf(path, PATH_MAX, "%s/%s/id", tp_dir, name);
- put_events_file(tp_dir);
- err = filename__read_int(path, &id);
- if (err)
- return err;
-
- return id;
-}
-
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
@@ -621,7 +606,7 @@ struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool
event_attr_init(&attr);
if (format) {
- id = trace_event__id(sys, name);
+ id = tp_pmu__id(sys, name);
if (id < 0) {
err = id;
goto out_free;
@@ -1275,9 +1260,10 @@ static void evsel__set_default_freq_period(struct record_opts *opts,
}
}
-static bool evsel__is_offcpu_event(struct evsel *evsel)
+bool evsel__is_offcpu_event(struct evsel *evsel)
{
- return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
+ return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT) &&
+ evsel->core.attr.sample_type & PERF_SAMPLE_RAW;
}
/*
@@ -1425,7 +1411,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
evsel__set_sample_bit(evsel, CPU);
}
- if (opts->sample_address)
+ if (opts->sample_data_src)
evsel__set_sample_bit(evsel, DATA_SRC);
if (opts->sample_phys_addr)
@@ -1440,9 +1426,10 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
attr->branch_sample_type = opts->branch_stack;
}
- if (opts->sample_weight)
+ if (opts->sample_weight || evsel->retire_lat) {
arch_evsel__set_sample_weight(evsel);
-
+ evsel->retire_lat = false;
+ }
attr->task = track;
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
@@ -1524,7 +1511,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
- if (evsel->core.own_cpus || evsel->unit)
+ if (evsel->core.pmu_cpus || evsel->unit)
evsel->core.attr.read_format |= PERF_FORMAT_ID;
/*
@@ -1554,8 +1541,10 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
- if (evsel__is_offcpu_event(evsel))
+ if (evsel__is_offcpu_event(evsel)) {
evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
+ attr->inherit = 0;
+ }
arch__post_evsel_config(evsel, attr);
}
@@ -1652,10 +1641,21 @@ static void evsel__free_config_terms(struct evsel *evsel)
free_config_terms(&evsel->config_terms);
}
+static void (*evsel__priv_destructor)(void *priv);
+
+void evsel__set_priv_destructor(void (*destructor)(void *priv))
+{
+ assert(evsel__priv_destructor == NULL);
+
+ evsel__priv_destructor = destructor;
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
+ if (evsel__is_retire_lat(evsel))
+ evsel__tpebs_close(evsel);
bpf_counter__destroy(evsel);
perf_bpf_filter__destroy(evsel);
evsel__free_counts(evsel);
@@ -1663,9 +1663,7 @@ void evsel__exit(struct evsel *evsel)
perf_evsel__free_id(&evsel->core);
evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
- perf_cpu_map__put(evsel->core.cpus);
- perf_cpu_map__put(evsel->core.own_cpus);
- perf_thread_map__put(evsel->core.threads);
+ perf_evsel__exit(&evsel->core);
zfree(&evsel->group_name);
zfree(&evsel->name);
#ifdef HAVE_LIBTRACEEVENT
@@ -1680,6 +1678,8 @@ void evsel__exit(struct evsel *evsel)
hashmap__free(evsel->per_pkg_mask);
evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
+ if (evsel__priv_destructor)
+ evsel__priv_destructor(evsel->priv);
perf_evsel__object.fini(evsel);
if (evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME ||
evsel__tool_event(evsel) == TOOL_PMU__EVENT_USER_TIME)
@@ -1718,11 +1718,6 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
}
-static int evsel__read_retire_lat(struct evsel *evsel, int cpu_map_idx, int thread)
-{
- return tpebs_set_evsel(evsel, cpu_map_idx, thread);
-}
-
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
u64 val, u64 ena, u64 run, u64 lost)
{
@@ -1730,8 +1725,8 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
count = perf_counts(counter->counts, cpu_map_idx, thread);
- if (counter->retire_lat) {
- evsel__read_retire_lat(counter, cpu_map_idx, thread);
+ if (evsel__is_retire_lat(counter)) {
+ evsel__tpebs_read(counter, cpu_map_idx, thread);
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
return;
}
@@ -1888,8 +1883,11 @@ int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
if (evsel__is_hwmon(evsel))
return evsel__hwmon_pmu_read(evsel, cpu_map_idx, thread);
+ if (evsel__is_drm(evsel))
+ return evsel__drm_pmu_read(evsel, cpu_map_idx, thread);
+
if (evsel__is_retire_lat(evsel))
- return evsel__read_retire_lat(evsel, cpu_map_idx, thread);
+ return evsel__tpebs_read(evsel, cpu_map_idx, thread);
if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);
@@ -2566,25 +2564,6 @@ check:
return false;
}
-static bool evsel__handle_error_quirks(struct evsel *evsel, int error)
-{
- /*
- * AMD core PMU tries to forward events with precise_ip to IBS PMU
- * implicitly. But IBS PMU has more restrictions so it can fail with
- * supported event attributes. Let's forward it back to the core PMU
- * by clearing precise_ip only if it's from precise_max (:P).
- */
- if ((error == -EINVAL || error == -ENOENT) && x86__is_amd_cpu() &&
- evsel->core.attr.precise_ip && evsel->precise_max) {
- evsel->core.attr.precise_ip = 0;
- pr_debug2_peo("removing precise_ip on AMD\n");
- display_attr(&evsel->core.attr);
- return true;
- }
-
- return false;
-}
-
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads,
int start_cpu_map_idx, int end_cpu_map_idx)
@@ -2595,7 +2574,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_cpu cpu;
if (evsel__is_retire_lat(evsel))
- return tpebs_start(evsel->evlist);
+ return evsel__tpebs_open(evsel);
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
@@ -2628,6 +2607,11 @@ fallback_missing_features:
start_cpu_map_idx,
end_cpu_map_idx);
}
+ if (evsel__is_drm(evsel)) {
+ return evsel__drm_pmu_open(evsel, threads,
+ start_cpu_map_idx,
+ end_cpu_map_idx);
+ }
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
cpu = perf_cpu_map__cpu(cpus, idx);
@@ -2730,9 +2714,6 @@ try_fallback:
if (evsel__precise_ip_fallback(evsel))
goto retry_open;
- if (evsel__handle_error_quirks(evsel, err))
- goto retry_open;
-
out_close:
if (err)
threads->err_thread = thread;
@@ -2759,22 +2740,37 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
void evsel__close(struct evsel *evsel)
{
if (evsel__is_retire_lat(evsel))
- tpebs_delete();
+ evsel__tpebs_close(evsel);
perf_evsel__close(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
-int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
+int evsel__open_per_cpu_and_thread(struct evsel *evsel,
+ struct perf_cpu_map *cpus, int cpu_map_idx,
+ struct perf_thread_map *threads)
{
if (cpu_map_idx == -1)
- return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
+ return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
+
+ return evsel__open_cpu(evsel, cpus, threads, cpu_map_idx, cpu_map_idx + 1);
+}
- return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
+int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
+{
+ struct perf_thread_map *threads = thread_map__new_by_tid(-1);
+ int ret = evsel__open_per_cpu_and_thread(evsel, cpus, cpu_map_idx, threads);
+
+ perf_thread_map__put(threads);
+ return ret;
}
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
{
- return evsel__open(evsel, NULL, threads);
+ struct perf_cpu_map *cpus = perf_cpu_map__new_any_cpu();
+ int ret = evsel__open_per_cpu_and_thread(evsel, cpus, -1, threads);
+
+ perf_cpu_map__put(cpus);
+ return ret;
}
static int perf_evsel__parse_id_sample(const struct evsel *evsel,
@@ -2867,11 +2863,18 @@ perf_event__check_size(union perf_event *event, unsigned int sample_size)
return 0;
}
-void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
- const __u64 *array,
- u64 type __maybe_unused)
+static void perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type)
{
- data->weight = *array;
+ union perf_sample_weight weight;
+
+ weight.full = *array;
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ data->weight = weight.var1_dw;
+ data->ins_lat = weight.var2_w;
+ data->weight3 = weight.var3_w;
+ } else {
+ data->weight = weight.full;
+ }
}
u64 evsel__bitfield_swap_branch_flags(u64 value)
@@ -2945,6 +2948,35 @@ static inline bool evsel__has_branch_counters(const struct evsel *evsel)
return false;
}
+static int __set_offcpu_sample(struct perf_sample *data)
+{
+ u64 *array = data->raw_data;
+ u32 max_size = data->raw_size, *p32;
+ const void *endp = (void *)array + max_size;
+
+ if (array == NULL)
+ return -EFAULT;
+
+ OVERFLOW_CHECK_u64(array);
+ p32 = (void *)array++;
+ data->pid = p32[0];
+ data->tid = p32[1];
+
+ OVERFLOW_CHECK_u64(array);
+ data->period = *array++;
+
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ OVERFLOW_CHECK(array, data->callchain->nr * sizeof(u64), max_size);
+ data->ip = data->callchain->ips[1];
+ array += data->callchain->nr;
+
+ OVERFLOW_CHECK_u64(array);
+ data->cgroup = *array;
+
+ return 0;
+}
+
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -3228,7 +3260,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
OVERFLOW_CHECK_u64(array);
- arch_perf_parse_sample_weight(data, array, type);
+ perf_parse_sample_weight(data, array, type);
array++;
}
@@ -3299,6 +3331,9 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array = (void *)array + sz;
}
+ if (evsel__is_offcpu_event(evsel))
+ return __set_offcpu_sample(data);
+
return 0;
}
@@ -3774,6 +3809,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n");
#endif
+ if (!target__has_cpu(target))
+ return scnprintf(msg, size,
+ "Unsupported event (%s) in per-thread mode, enable system wide with '-a'.",
+ evsel__name(evsel));
break;
case EBUSY:
if (find_process("oprofiled"))
@@ -3824,11 +3863,16 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
}
+struct perf_session *evsel__session(struct evsel *evsel)
+{
+ return evsel && evsel->evlist ? evsel->evlist->session : NULL;
+}
+
struct perf_env *evsel__env(struct evsel *evsel)
{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env;
- return &perf_env;
+ struct perf_session *session = evsel__session(evsel);
+
+ return session ? perf_session__env(session) : NULL;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
@@ -3939,3 +3983,144 @@ void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
leader->core.nr_members--;
}
}
+
+bool evsel__set_needs_uniquify(struct evsel *counter, const struct perf_stat_config *config)
+{
+ struct evsel *evsel;
+
+ if (counter->needs_uniquify) {
+ /* Already set. */
+ return true;
+ }
+
+ if (counter->use_config_name || counter->is_libpfm_event) {
+ /* Original name will be used. */
+ return false;
+ }
+
+ if (!config->hybrid_merge && evsel__is_hybrid(counter)) {
+ /* Unique hybrid counters necessary. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->core.attr.type < PERF_TYPE_MAX && counter->core.attr.type != PERF_TYPE_RAW) {
+ /* Legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (counter->pmu && counter->pmu->is_core &&
+ counter->alternate_hw_config != PERF_COUNT_HW_MAX) {
+ /* A sysfs or json event replacing a legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (config->aggr_mode == AGGR_NONE) {
+ /* Always unique with no aggregation. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->first_wildcard_match != NULL) {
+ /*
+ * If stats are merged then only the first_wildcard_match is
+ * displayed, there is no need to uniquify this evsel as the
+ * name won't be shown.
+ */
+ return false;
+ }
+
+ /*
+ * Do other non-merged events in the evlist have the same name? If so
+ * uniquify is necessary.
+ */
+ evlist__for_each_entry(counter->evlist, evsel) {
+ if (evsel == counter || evsel->first_wildcard_match || evsel->pmu == counter->pmu)
+ continue;
+
+ if (evsel__name_is(counter, evsel__name(evsel))) {
+ counter->needs_uniquify = true;
+ return true;
+ }
+ }
+ return false;
+}
+
+void evsel__uniquify_counter(struct evsel *counter)
+{
+ const char *name, *pmu_name;
+ char *new_name, *config;
+ int ret;
+
+ /* No uniquification necessary. */
+ if (!counter->needs_uniquify)
+ return;
+
+ /* The evsel was already uniquified. */
+ if (counter->uniquified_name)
+ return;
+
+ /* Avoid checking to uniquify twice. */
+ counter->uniquified_name = true;
+
+ name = evsel__name(counter);
+ pmu_name = counter->pmu->name;
+ /* Already prefixed by the PMU name. */
+ if (!strncmp(name, pmu_name, strlen(pmu_name)))
+ return;
+
+ config = strchr(name, '/');
+ if (config) {
+ int len = config - name;
+
+ if (config[1] == '/') {
+ /* case: event// */
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 2);
+ } else {
+ /* case: event/.../ */
+ ret = asprintf(&new_name, "%s/%.*s,%s", pmu_name, len, name, config + 1);
+ }
+ } else {
+ config = strchr(name, ':');
+ if (config) {
+ /* case: event:.. */
+ int len = config - name;
+
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 1);
+ } else {
+ /* case: event */
+ ret = asprintf(&new_name, "%s/%s/", pmu_name, name);
+ }
+ }
+ if (ret > 0) {
+ free(counter->name);
+ counter->name = new_name;
+ } else {
+ /* ENOMEM from asprintf. */
+ counter->uniquified_name = false;
+ }
+}
+
+void evsel__warn_user_requested_cpus(struct evsel *evsel, struct perf_cpu_map *user_requested_cpus)
+{
+ struct perf_cpu_map *intersect, *online = NULL;
+ const struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ if (pmu && pmu->is_core) {
+ intersect = perf_cpu_map__intersect(pmu->cpus, user_requested_cpus);
+ } else {
+ online = cpu_map__online();
+ intersect = perf_cpu_map__intersect(online, user_requested_cpus);
+ }
+ if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
+ char buf1[128];
+ char buf2[128];
+
+ cpu_map__snprint(user_requested_cpus, buf1, sizeof(buf1));
+ cpu_map__snprint(online ?: pmu->cpus, buf2, sizeof(buf2));
+ pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
+ buf1, pmu ? pmu->name : "cpu", buf2, evsel__name(evsel));
+ }
+ perf_cpu_map__put(intersect);
+ perf_cpu_map__put(online);
+}