summaryrefslogtreecommitdiff
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c354
1 files changed, 248 insertions, 106 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 7ef43f72098e..03674d2cbd01 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -33,6 +33,10 @@
#include "util/bpf-filter.h"
#include "util/stat.h"
#include "util/util.h"
+#include "util/env.h"
+#include "util/intel-tpebs.h"
+#include "util/metricgroup.h"
+#include "util/strbuf.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -46,6 +50,7 @@
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/timerfd.h>
+#include <sys/wait.h>
#include <linux/bitops.h>
#include <linux/hash.h>
@@ -78,6 +83,9 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
evlist->ctl_fd.fd = -1;
evlist->ctl_fd.ack = -1;
evlist->ctl_fd.pos = -1;
+ evlist->nr_br_cntr = -1;
+ metricgroup__rblist_init(&evlist->metric_events);
+ INIT_LIST_HEAD(&evlist->deferred_samples);
}
struct evlist *evlist__new(void)
@@ -94,16 +102,31 @@ struct evlist *evlist__new_default(void)
{
struct evlist *evlist = evlist__new();
bool can_profile_kernel;
- int err;
+ struct perf_pmu *pmu = NULL;
if (!evlist)
return NULL;
can_profile_kernel = perf_event_paranoid_check(1);
- err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
- if (err) {
- evlist__delete(evlist);
- evlist = NULL;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ char buf[256];
+ int err;
+
+ snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name,
+ can_profile_kernel ? "P" : "Pu");
+ err = parse_event(evlist, buf);
+ if (err) {
+ evlist__delete(evlist);
+ return NULL;
+ }
+ }
+
+ if (evlist->core.nr_entries > 1) {
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
}
return evlist;
@@ -161,6 +184,7 @@ static void evlist__purge(struct evlist *evlist)
void evlist__exit(struct evlist *evlist)
{
+ metricgroup__rblist_exit(&evlist->metric_events);
event_enable_timer__exit(&evlist->eet);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
@@ -251,6 +275,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
+ /* Avoid frequency mode for dummy events to avoid associated timers. */
+ .freq = 0,
+ .sample_period = 1,
};
return evsel__new_idx(&attr, evlist->core.nr_entries);
@@ -277,8 +304,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_guest = 1;
evsel->core.attr.exclude_hv = 1;
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evsel->name = strdup("dummy:u");
@@ -290,7 +315,8 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
#ifdef HAVE_LIBTRACEEVENT
struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
{
- struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0);
+ struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0,
+ /*format=*/true);
if (IS_ERR(evsel))
return evsel;
@@ -306,62 +332,6 @@ struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
}
#endif
-int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
-{
- struct evsel *evsel, *n;
- LIST_HEAD(head);
- size_t i;
-
- for (i = 0; i < nr_attrs; i++) {
- evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
- if (evsel == NULL)
- goto out_delete_partial_list;
- list_add_tail(&evsel->core.node, &head);
- }
-
- evlist__splice_list_tail(evlist, &head);
-
- return 0;
-
-out_delete_partial_list:
- __evlist__for_each_entry_safe(&head, n, evsel)
- evsel__delete(evsel);
- return -1;
-}
-
-int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
-{
- size_t i;
-
- for (i = 0; i < nr_attrs; i++)
- event_attr_init(attrs + i);
-
- return evlist__add_attrs(evlist, attrs, nr_attrs);
-}
-
-__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
- struct perf_event_attr *attrs,
- size_t nr_attrs)
-{
- if (!nr_attrs)
- return 0;
-
- return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
-}
-
-struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
-{
- struct evsel *evsel;
-
- evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
- (int)evsel->core.attr.config == id)
- return evsel;
- }
-
- return NULL;
-}
-
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
{
struct evsel *evsel;
@@ -1048,14 +1018,13 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
* per-thread data. thread_map__new_str will call
* thread_map__new_all_cpus to enumerate all threads.
*/
- threads = thread_map__new_str(target->pid, target->tid, target->uid,
- all_threads);
+ threads = thread_map__new_str(target->pid, target->tid, all_threads);
if (!threads)
return -1;
- if (target__uses_dummy_map(target))
- cpus = perf_cpu_map__dummy_new();
+ if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist))
+ cpus = perf_cpu_map__new_any_cpu();
else
cpus = perf_cpu_map__new(target->cpu_list);
@@ -1077,7 +1046,8 @@ out_delete_threads:
return -1;
}
-int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
+int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
+ struct target *target)
{
struct evsel *evsel;
int err = 0;
@@ -1099,7 +1069,7 @@ int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
* non-tracepoint events can have BPF filters.
*/
if (!list_empty(&evsel->bpf_filters)) {
- err = perf_bpf_filter__prepare(evsel);
+ err = perf_bpf_filter__prepare(evsel, target);
if (err) {
*err_evsel = evsel;
break;
@@ -1185,11 +1155,6 @@ int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
return ret;
}
-int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
-{
- return evlist__set_tp_filter_pids(evlist, 1, &pid);
-}
-
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
{
char *filter = asprintf__tp_filter_pids(npids, pids);
@@ -1252,6 +1217,72 @@ u64 evlist__combined_branch_type(struct evlist *evlist)
return branch_type;
}
+static struct evsel *
+evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event)
+{
+ struct evsel *pos;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (event == pos)
+ break;
+ if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
+ !strcmp(pos->name, event->name))
+ return pos;
+ }
+ return NULL;
+}
+
+#define MAX_NR_ABBR_NAME (26 * 11)
+
+/*
+ * The abbr name is from A to Z9. If the number of event
+ * which requires the branch counter > MAX_NR_ABBR_NAME,
+ * return NA.
+ */
+static void evlist__new_abbr_name(char *name)
+{
+ static int idx;
+ int i = idx / 26;
+
+ if (idx >= MAX_NR_ABBR_NAME) {
+ name[0] = 'N';
+ name[1] = 'A';
+ name[2] = '\0';
+ return;
+ }
+
+ name[0] = 'A' + (idx % 26);
+
+ if (!i)
+ name[1] = '\0';
+ else {
+ name[1] = '0' + i - 1;
+ name[2] = '\0';
+ }
+
+ idx++;
+}
+
+void evlist__update_br_cntr(struct evlist *evlist)
+{
+ struct evsel *evsel, *dup;
+ int i = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) {
+ evsel->br_cntr_idx = i++;
+ evsel__leader(evsel)->br_cntr_nr++;
+
+ dup = evlist__find_dup_event_from_prev(evlist, evsel);
+ if (dup)
+ memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char));
+ else
+ evlist__new_abbr_name(evsel->abbr_name);
+ }
+ }
+ evlist->nr_br_cntr = i;
+}
+
bool evlist__valid_read_format(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist), *pos = first;
@@ -1351,21 +1382,20 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
* error, and we may not want to do that fallback to a
* default cpu identity map :-\
*/
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
- goto out;
+ return -ENOMEM;
threads = perf_thread_map__new_dummy();
- if (!threads)
- goto out_put;
+ if (!threads) {
+ perf_cpu_map__put(cpus);
+ return -ENOMEM;
+ }
perf_evlist__set_maps(&evlist->core, cpus, threads);
-
perf_thread_map__put(threads);
-out_put:
perf_cpu_map__put(cpus);
-out:
- return -ENOMEM;
+ return 0;
}
int evlist__open(struct evlist *evlist)
@@ -1404,6 +1434,8 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
int child_ready_pipe[2], go_pipe[2];
char bf;
+ evlist->workload.cork_fd = -1;
+
if (pipe(child_ready_pipe) < 0) {
perror("failed to create 'ready' pipe");
return -1;
@@ -1456,7 +1488,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
* For cancelling the workload without actually running it,
* the parent will just close workload.cork_fd, without writing
* anything, i.e. read will return zero and we just exit()
- * here.
+ * here (See evlist__cancel_workload()).
*/
if (ret != 1) {
if (ret == -1)
@@ -1520,7 +1552,7 @@ out_close_ready_pipe:
int evlist__start_workload(struct evlist *evlist)
{
- if (evlist->workload.cork_fd > 0) {
+ if (evlist->workload.cork_fd >= 0) {
char bf = 0;
int ret;
/*
@@ -1531,12 +1563,24 @@ int evlist__start_workload(struct evlist *evlist)
perror("unable to write to pipe");
close(evlist->workload.cork_fd);
+ evlist->workload.cork_fd = -1;
return ret;
}
return 0;
}
+void evlist__cancel_workload(struct evlist *evlist)
+{
+ int status;
+
+ if (evlist->workload.cork_fd >= 0) {
+ close(evlist->workload.cork_fd);
+ evlist->workload.cork_fd = -1;
+ waitpid(evlist->workload.pid, &status, WNOHANG);
+ }
+}
+
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
struct evsel *evsel = evlist__event2evsel(evlist, event);
@@ -1694,6 +1738,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev
tracking_evsel->tracking = true;
}
+struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide)
+{
+ struct evsel *evsel;
+
+ evsel = evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(evsel)) {
+ evsel = evlist__add_aux_dummy(evlist, system_wide);
+ if (!evsel)
+ return NULL;
+
+ evlist__set_tracking_event(evlist, evsel);
+ } else if (system_wide) {
+ perf_evlist__go_system_wide(&evlist->core, &evsel->core);
+ }
+
+ return evsel;
+}
+
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
{
struct evsel *evsel;
@@ -2417,23 +2479,36 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
return NULL;
}
-int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
+void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length)
{
- struct evsel *evsel;
- int printed = 0;
+ struct evsel *evsel, *leader = NULL;
+ bool first = true;
evlist__for_each_entry(evlist, evsel) {
+ struct evsel *new_leader = evsel__leader(evsel);
+
if (evsel__is_dummy_event(evsel))
continue;
- if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
- } else {
- printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
- break;
+
+ if (leader != new_leader && leader && leader->core.nr_members > 1)
+ strbuf_addch(sb, '}');
+
+ if (!first)
+ strbuf_addch(sb, ',');
+
+ if (sb->len > max_length) {
+ strbuf_addstr(sb, "...");
+ return;
}
- }
+ if (leader != new_leader && new_leader->core.nr_members > 1)
+ strbuf_addch(sb, '{');
- return printed;
+ strbuf_addstr(sb, evsel__name(evsel));
+ first = false;
+ leader = new_leader;
+ }
+ if (leader && leader->core.nr_members > 1)
+ strbuf_addch(sb, '}');
}
void evlist__check_mem_load_aux(struct evlist *evlist)
@@ -2483,19 +2558,86 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
return;
evlist__for_each_entry(evlist, pos) {
- struct perf_cpu_map *intersect, *to_test;
- const struct perf_pmu *pmu = evsel__find_pmu(pos);
+ evsel__warn_user_requested_cpus(pos, user_requested_cpus);
+ }
+ perf_cpu_map__put(user_requested_cpus);
+}
- to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online();
- intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
- if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
- char buf[128];
+/* Should uniquify be disabled for the evlist? */
+static bool evlist__disable_uniquify(const struct evlist *evlist)
+{
+ struct evsel *counter;
+ struct perf_pmu *last_pmu = NULL;
+ bool first = true;
- cpu_map__snprint(to_test, buf, sizeof(buf));
- pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
- cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
+ evlist__for_each_entry(evlist, counter) {
+ /* If PMUs vary then uniquify can be useful. */
+ if (!first && counter->pmu != last_pmu)
+ return false;
+ first = false;
+ if (counter->pmu) {
+ /* Allow uniquify for uncore PMUs. */
+ if (!counter->pmu->is_core)
+ return false;
+ /* Keep hybrid event names uniquified for clarity. */
+ if (perf_pmus__num_core_pmus() > 1)
+ return false;
}
- perf_cpu_map__put(intersect);
+ last_pmu = counter->pmu;
}
- perf_cpu_map__put(user_requested_cpus);
+ return true;
+}
+
+static bool evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config)
+{
+ struct evsel *counter;
+ bool needs_uniquify = false;
+
+ if (evlist__disable_uniquify(evlist)) {
+ evlist__for_each_entry(evlist, counter)
+ counter->uniquified_name = true;
+ return false;
+ }
+
+ evlist__for_each_entry(evlist, counter) {
+ if (evsel__set_needs_uniquify(counter, config))
+ needs_uniquify = true;
+ }
+ return needs_uniquify;
+}
+
+void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config)
+{
+ if (evlist__set_needs_uniquify(evlist, config)) {
+ struct evsel *pos;
+
+ evlist__for_each_entry(evlist, pos)
+ evsel__uniquify_counter(pos);
+ }
+}
+
+bool evlist__has_bpf_output(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_bpf_output(evsel))
+ return true;
+ }
+
+ return false;
+}
+
+bool evlist__needs_bpf_sb_event(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_dummy_event(evsel))
+ continue;
+ if (!evsel->core.attr.exclude_kernel)
+ return true;
+ }
+
+ return false;
}