diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
| -rw-r--r-- | tools/perf/util/evlist.c | 860 |
1 files changed, 654 insertions, 206 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index eaad04e1672a..03674d2cbd01 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -15,6 +15,7 @@ #include "target.h" #include "evlist.h" #include "evsel.h" +#include "record.h" #include "debug.h" #include "units.h" #include "bpf_counter.h" @@ -23,11 +24,19 @@ #include "../perf.h" #include "asm/bug.h" #include "bpf-event.h" +#include "util/event.h" #include "util/string2.h" #include "util/perf_api_probe.h" #include "util/evsel_fprintf.h" -#include "util/evlist-hybrid.h" #include "util/pmu.h" +#include "util/sample.h" +#include "util/bpf-filter.h" +#include "util/stat.h" +#include "util/util.h" +#include "util/env.h" +#include "util/intel-tpebs.h" +#include "util/metricgroup.h" +#include "util/strbuf.h" #include <signal.h> #include <unistd.h> #include <sched.h> @@ -40,12 +49,15 @@ #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/prctl.h> +#include <sys/timerfd.h> +#include <sys/wait.h> #include <linux/bitops.h> #include <linux/hash.h> #include <linux/log2.h> #include <linux/err.h> #include <linux/string.h> +#include <linux/time64.h> #include <linux/zalloc.h> #include <perf/evlist.h> #include <perf/evsel.h> @@ -71,6 +83,9 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, evlist->ctl_fd.fd = -1; evlist->ctl_fd.ack = -1; evlist->ctl_fd.pos = -1; + evlist->nr_br_cntr = -1; + metricgroup__rblist_init(&evlist->metric_events); + INIT_LIST_HEAD(&evlist->deferred_samples); } struct evlist *evlist__new(void) @@ -86,10 +101,32 @@ struct evlist *evlist__new(void) struct evlist *evlist__new_default(void) { struct evlist *evlist = evlist__new(); + bool can_profile_kernel; + struct perf_pmu *pmu = NULL; - if (evlist && evlist__add_default(evlist)) { - evlist__delete(evlist); - evlist = NULL; + if (!evlist) + return NULL; + + can_profile_kernel = perf_event_paranoid_check(1); + + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { + char buf[256]; + int err; + + snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name, + can_profile_kernel ? "P" : "Pu"); + err = parse_event(evlist, buf); + if (err) { + evlist__delete(evlist); + return NULL; + } + } + + if (evlist->core.nr_entries > 1) { + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) + evsel__set_sample_id(evsel, /*can_sample_identifier=*/false); } return evlist; @@ -147,6 +184,8 @@ static void evlist__purge(struct evlist *evlist) void evlist__exit(struct evlist *evlist) { + metricgroup__rblist_exit(&evlist->metric_events); + event_enable_timer__exit(&evlist->eet); zfree(&evlist->mmap); zfree(&evlist->overwrite_mmap); perf_evlist__exit(&evlist->core); @@ -157,6 +196,7 @@ void evlist__delete(struct evlist *evlist) if (evlist == NULL) return; + evlist__free_stats(evlist); evlist__munmap(evlist); evlist__close(evlist); evlist__purge(evlist); @@ -224,32 +264,28 @@ out: return err; } -void evlist__set_leader(struct evlist *evlist) +static void evlist__set_leader(struct evlist *evlist) { perf_evlist__set_leader(&evlist->core); } -int __evlist__add_default(struct evlist *evlist, bool precise) -{ - struct evsel *evsel; - - evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE, - PERF_COUNT_HW_CPU_CYCLES); - if (evsel == NULL) - return -ENOMEM; - - evlist__add(evlist, evsel); - return 0; -} - -int evlist__add_dummy(struct evlist *evlist) +static struct evsel *evlist__dummy_event(struct evlist *evlist) { struct perf_event_attr attr = { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_DUMMY, .size = sizeof(attr), /* to capture ABI version */ + /* Avoid frequency mode for dummy events to avoid associated timers. */ + .freq = 0, + .sample_period = 1, }; - struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); + + return evsel__new_idx(&attr, evlist->core.nr_entries); +} + +int evlist__add_dummy(struct evlist *evlist) +{ + struct evsel *evsel = evlist__dummy_event(evlist); if (evsel == NULL) return -ENOMEM; @@ -258,56 +294,43 @@ int evlist__add_dummy(struct evlist *evlist) return 0; } -static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) +struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) { - struct evsel *evsel, *n; - LIST_HEAD(head); - size_t i; - - for (i = 0; i < nr_attrs; i++) { - evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); - if (evsel == NULL) - goto out_delete_partial_list; - list_add_tail(&evsel->core.node, &head); - } + struct evsel *evsel = evlist__dummy_event(evlist); - evlist__splice_list_tail(evlist, &head); + if (!evsel) + return NULL; - return 0; + evsel->core.attr.exclude_kernel = 1; + evsel->core.attr.exclude_guest = 1; + evsel->core.attr.exclude_hv = 1; + evsel->core.system_wide = system_wide; + evsel->no_aux_samples = true; + evsel->name = strdup("dummy:u"); -out_delete_partial_list: - __evlist__for_each_entry_safe(&head, n, evsel) - evsel__delete(evsel); - return -1; + evlist__add(evlist, evsel); + return evsel; } -int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) +#ifdef HAVE_LIBTRACEEVENT +struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) { - size_t i; - - for (i = 0; i < nr_attrs; i++) - event_attr_init(attrs + i); + struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0, + /*format=*/true); - return evlist__add_attrs(evlist, attrs, nr_attrs); -} - -__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused) -{ - return 0; -} + if (IS_ERR(evsel)) + return evsel; -struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) -{ - struct evsel *evsel; + evsel__set_sample_bit(evsel, CPU); + evsel__set_sample_bit(evsel, TIME); - evlist__for_each_entry(evlist, evsel) { - if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && - (int)evsel->core.attr.config == id) - return evsel; - } + evsel->core.system_wide = system_wide; + evsel->no_aux_samples = true; - return NULL; + evlist__add(evlist, evsel); + return evsel; } +#endif struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) { @@ -322,6 +345,7 @@ struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char return NULL; } +#ifdef HAVE_LIBTRACEEVENT int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) { struct evsel *evsel = evsel__newtp(sys, name); @@ -333,20 +357,13 @@ int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, evlist__add(evlist, evsel); return 0; } - -static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel) -{ - if (evsel->core.system_wide) - return 1; - else - return perf_thread_map__nr(evlist->core.threads); -} +#endif struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) { struct evlist_cpu_iterator itr = { .container = evlist, - .evsel = evlist__first(evlist), + .evsel = NULL, .cpu_map_idx = 0, .evlist_cpu_map_idx = 0, .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), @@ -354,16 +371,22 @@ struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affin .affinity = affinity, }; - if (itr.affinity) { - itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); - affinity__set(itr.affinity, itr.cpu.cpu); - itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); - /* - * If this CPU isn't in the evsel's cpu map then advance through - * the list. - */ - if (itr.cpu_map_idx == -1) - evlist_cpu_iterator__next(&itr); + if (evlist__empty(evlist)) { + /* Ensure the empty list doesn't iterate. */ + itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr; + } else { + itr.evsel = evlist__first(evlist); + if (itr.affinity) { + itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); + affinity__set(itr.affinity, itr.cpu.cpu); + itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); + /* + * If this CPU isn't in the evsel's cpu map then advance + * through the list. + */ + if (itr.cpu_map_idx == -1) + evlist_cpu_iterator__next(&itr); + } } return itr; } @@ -409,7 +432,7 @@ static int evsel__strcmp(struct evsel *pos, char *evsel_name) return 0; if (evsel__is_dummy_event(pos)) return 1; - return strcmp(pos->name, evsel_name); + return !evsel__name_is(pos, evsel_name); } static int evlist__is_enabled(struct evlist *evlist) @@ -426,7 +449,7 @@ static int evlist__is_enabled(struct evlist *evlist) return false; } -static void __evlist__disable(struct evlist *evlist, char *evsel_name) +static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy) { struct evsel *pos; struct evlist_cpu_iterator evlist_cpu_itr; @@ -434,7 +457,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) bool has_imm = false; // See explanation in evlist__close() - if (!cpu_map__is_dummy(evlist->core.cpus)) { + if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { if (affinity__setup(&saved_affinity) < 0) return; affinity = &saved_affinity; @@ -448,6 +471,8 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) continue; if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) continue; + if (excl_dummy && evsel__is_dummy_event(pos)) + continue; if (pos->immediate) has_imm = true; if (pos->immediate != imm) @@ -464,6 +489,8 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) continue; if (!evsel__is_group_leader(pos) || !pos->core.fd) continue; + if (excl_dummy && evsel__is_dummy_event(pos)) + continue; pos->disabled = true; } @@ -479,22 +506,27 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) void evlist__disable(struct evlist *evlist) { - __evlist__disable(evlist, NULL); + __evlist__disable(evlist, NULL, false); +} + +void evlist__disable_non_dummy(struct evlist *evlist) +{ + __evlist__disable(evlist, NULL, true); } void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) { - __evlist__disable(evlist, evsel_name); + __evlist__disable(evlist, evsel_name, false); } -static void __evlist__enable(struct evlist *evlist, char *evsel_name) +static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy) { struct evsel *pos; struct evlist_cpu_iterator evlist_cpu_itr; struct affinity saved_affinity, *affinity = NULL; // See explanation in evlist__close() - if (!cpu_map__is_dummy(evlist->core.cpus)) { + if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { if (affinity__setup(&saved_affinity) < 0) return; affinity = &saved_affinity; @@ -506,6 +538,8 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) continue; if (!evsel__is_group_leader(pos) || !pos->core.fd) continue; + if (excl_dummy && evsel__is_dummy_event(pos)) + continue; evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); } affinity__cleanup(affinity); @@ -514,6 +548,8 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) continue; if (!evsel__is_group_leader(pos) || !pos->core.fd) continue; + if (excl_dummy && evsel__is_dummy_event(pos)) + continue; pos->disabled = false; } @@ -527,59 +563,22 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) void evlist__enable(struct evlist *evlist) { - __evlist__enable(evlist, NULL); + __evlist__enable(evlist, NULL, false); } -void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) +void evlist__enable_non_dummy(struct evlist *evlist) { - __evlist__enable(evlist, evsel_name); -} - -void evlist__toggle_enable(struct evlist *evlist) -{ - (evlist->enabled ? evlist__disable : evlist__enable)(evlist); -} - -static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu) -{ - int thread; - int nr_threads = evlist__nr_threads(evlist, evsel); - - if (!evsel->core.fd) - return -EINVAL; - - for (thread = 0; thread < nr_threads; thread++) { - int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); - if (err) - return err; - } - return 0; + __evlist__enable(evlist, NULL, true); } -static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) +void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) { - int cpu; - int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); - - if (!evsel->core.fd) - return -EINVAL; - - for (cpu = 0; cpu < nr_cpus; cpu++) { - int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); - if (err) - return err; - } - return 0; + __evlist__enable(evlist, evsel_name, false); } -int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) +void evlist__toggle_enable(struct evlist *evlist) { - bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); - - if (per_cpu_mmaps) - return evlist__enable_event_cpu(evlist, evsel, idx); - - return evlist__enable_event_thread(evlist, evsel, idx); + (evlist->enabled ? evlist__disable : evlist__enable)(evlist); } int evlist__add_pollfd(struct evlist *evlist, int fd) @@ -596,7 +595,8 @@ int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) { return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, - fdarray_flag__nonfilterable); + fdarray_flag__nonfilterable | + fdarray_flag__non_perf_event); } #endif @@ -791,13 +791,15 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist, static void perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, + struct perf_evsel *_evsel, struct perf_mmap_param *_mp, - int idx, bool per_cpu) + int idx) { struct evlist *evlist = container_of(_evlist, struct evlist, core); struct mmap_params *mp = container_of(_mp, struct mmap_params, core); + struct evsel *evsel = container_of(_evsel, struct evsel, core); - auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); + auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); } static struct perf_mmap* @@ -1016,21 +1018,20 @@ int evlist__create_maps(struct evlist *evlist, struct target *target) * per-thread data. thread_map__new_str will call * thread_map__new_all_cpus to enumerate all threads. */ - threads = thread_map__new_str(target->pid, target->tid, target->uid, - all_threads); + threads = thread_map__new_str(target->pid, target->tid, all_threads); if (!threads) return -1; - if (target__uses_dummy_map(target)) - cpus = perf_cpu_map__dummy_new(); + if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist)) + cpus = perf_cpu_map__new_any_cpu(); else cpus = perf_cpu_map__new(target->cpu_list); if (!cpus) goto out_delete_threads; - evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid; + evlist->core.has_user_cpus = !!target->cpu_list; perf_evlist__set_maps(&evlist->core, cpus, threads); @@ -1045,23 +1046,34 @@ out_delete_threads: return -1; } -int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) +int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel, + struct target *target) { struct evsel *evsel; int err = 0; evlist__for_each_entry(evlist, evsel) { - if (evsel->filter == NULL) - continue; - /* * filters only work for tracepoint event, which doesn't have cpu limit. * So evlist and evsel should always be same. */ - err = perf_evsel__apply_filter(&evsel->core, evsel->filter); - if (err) { - *err_evsel = evsel; - break; + if (evsel->filter) { + err = perf_evsel__apply_filter(&evsel->core, evsel->filter); + if (err) { + *err_evsel = evsel; + break; + } + } + + /* + * non-tracepoint events can have BPF filters. + */ + if (!list_empty(&evsel->bpf_filters)) { + err = perf_bpf_filter__prepare(evsel, target); + if (err) { + *err_evsel = evsel; + break; + } } } @@ -1143,11 +1155,6 @@ int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) return ret; } -int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) -{ - return evlist__set_tp_filter_pids(evlist, 1, &pid); -} - int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) { char *filter = asprintf__tp_filter_pids(npids, pids); @@ -1210,6 +1217,72 @@ u64 evlist__combined_branch_type(struct evlist *evlist) return branch_type; } +static struct evsel * +evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event) +{ + struct evsel *pos; + + evlist__for_each_entry(evlist, pos) { + if (event == pos) + break; + if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && + !strcmp(pos->name, event->name)) + return pos; + } + return NULL; +} + +#define MAX_NR_ABBR_NAME (26 * 11) + +/* + * The abbr name is from A to Z9. If the number of event + * which requires the branch counter > MAX_NR_ABBR_NAME, + * return NA. + */ +static void evlist__new_abbr_name(char *name) +{ + static int idx; + int i = idx / 26; + + if (idx >= MAX_NR_ABBR_NAME) { + name[0] = 'N'; + name[1] = 'A'; + name[2] = '\0'; + return; + } + + name[0] = 'A' + (idx % 26); + + if (!i) + name[1] = '\0'; + else { + name[1] = '0' + i - 1; + name[2] = '\0'; + } + + idx++; +} + +void evlist__update_br_cntr(struct evlist *evlist) +{ + struct evsel *evsel, *dup; + int i = 0; + + evlist__for_each_entry(evlist, evsel) { + if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) { + evsel->br_cntr_idx = i++; + evsel__leader(evsel)->br_cntr_nr++; + + dup = evlist__find_dup_event_from_prev(evlist, evsel); + if (dup) + memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char)); + else + evlist__new_abbr_name(evsel->abbr_name); + } + } + evlist->nr_br_cntr = i; +} + bool evlist__valid_read_format(struct evlist *evlist) { struct evsel *first = evlist__first(evlist), *pos = first; @@ -1235,34 +1308,8 @@ bool evlist__valid_read_format(struct evlist *evlist) u16 evlist__id_hdr_size(struct evlist *evlist) { struct evsel *first = evlist__first(evlist); - struct perf_sample *data; - u64 sample_type; - u16 size = 0; - - if (!first->core.attr.sample_id_all) - goto out; - - sample_type = first->core.attr.sample_type; - - if (sample_type & PERF_SAMPLE_TID) - size += sizeof(data->tid) * 2; - if (sample_type & PERF_SAMPLE_TIME) - size += sizeof(data->time); - - if (sample_type & PERF_SAMPLE_ID) - size += sizeof(data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - size += sizeof(data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - size += sizeof(data->cpu) * 2; - - if (sample_type & PERF_SAMPLE_IDENTIFIER) - size += sizeof(data->id); -out: - return size; + return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; } bool evlist__valid_sample_id_all(struct evlist *evlist) @@ -1295,10 +1342,11 @@ void evlist__close(struct evlist *evlist) struct affinity affinity; /* - * With perf record core.cpus is usually NULL. + * With perf record core.user_requested_cpus is usually NULL. * Use the old method to handle this for now. */ - if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { + if (!evlist->core.user_requested_cpus || + cpu_map__is_dummy(evlist->core.user_requested_cpus)) { evlist__for_each_entry_reverse(evlist, evsel) evsel__close(evsel); return; @@ -1324,7 +1372,6 @@ static int evlist__create_syswide_maps(struct evlist *evlist) { struct perf_cpu_map *cpus; struct perf_thread_map *threads; - int err = -ENOMEM; /* * Try reading /sys/devices/system/cpu/online to get @@ -1335,21 +1382,20 @@ static int evlist__create_syswide_maps(struct evlist *evlist) * error, and we may not want to do that fallback to a * default cpu identity map :-\ */ - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) - goto out; + return -ENOMEM; threads = perf_thread_map__new_dummy(); - if (!threads) - goto out_put; + if (!threads) { + perf_cpu_map__put(cpus); + return -ENOMEM; + } perf_evlist__set_maps(&evlist->core, cpus, threads); - perf_thread_map__put(threads); -out_put: perf_cpu_map__put(cpus); -out: - return err; + return 0; } int evlist__open(struct evlist *evlist) @@ -1361,7 +1407,7 @@ int evlist__open(struct evlist *evlist) * Default: one fd per CPU, all threads, aka systemwide * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL */ - if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { + if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { err = evlist__create_syswide_maps(evlist); if (err < 0) goto out_err; @@ -1388,6 +1434,8 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const int child_ready_pipe[2], go_pipe[2]; char bf; + evlist->workload.cork_fd = -1; + if (pipe(child_ready_pipe) < 0) { perror("failed to create 'ready' pipe"); return -1; @@ -1440,7 +1488,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const * For cancelling the workload without actually running it, * the parent will just close workload.cork_fd, without writing * anything, i.e. read will return zero and we just exit() - * here. + * here (See evlist__cancel_workload()). */ if (ret != 1) { if (ret == -1) @@ -1504,7 +1552,7 @@ out_close_ready_pipe: int evlist__start_workload(struct evlist *evlist) { - if (evlist->workload.cork_fd > 0) { + if (evlist->workload.cork_fd >= 0) { char bf = 0; int ret; /* @@ -1515,19 +1563,43 @@ int evlist__start_workload(struct evlist *evlist) perror("unable to write to pipe"); close(evlist->workload.cork_fd); + evlist->workload.cork_fd = -1; return ret; } return 0; } +void evlist__cancel_workload(struct evlist *evlist) +{ + int status; + + if (evlist->workload.cork_fd >= 0) { + close(evlist->workload.cork_fd); + evlist->workload.cork_fd = -1; + waitpid(evlist->workload.pid, &status, WNOHANG); + } +} + int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) { struct evsel *evsel = evlist__event2evsel(evlist, event); + int ret; if (!evsel) return -EFAULT; - return evsel__parse_sample(evsel, event, sample); + ret = evsel__parse_sample(evsel, event, sample); + if (ret) + return ret; + if (perf_guest && sample->id) { + struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); + + if (sid) { + sample->machine_pid = sid->machine_pid; + sample->vcpu = sid->vcpu.cpu; + } + } + return 0; } int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) @@ -1666,6 +1738,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev tracking_evsel->tracking = true; } +struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide) +{ + struct evsel *evsel; + + evsel = evlist__get_tracking_event(evlist); + if (!evsel__is_dummy_event(evsel)) { + evsel = evlist__add_aux_dummy(evlist, system_wide); + if (!evsel) + return NULL; + + evlist__set_tracking_event(evlist, evsel); + } else if (system_wide) { + perf_evlist__go_system_wide(&evlist->core, &evsel->core); + } + + return evsel; +} + struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) { struct evsel *evsel; @@ -1673,7 +1763,7 @@ struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) evlist__for_each_entry(evlist, evsel) { if (!evsel->name) continue; - if (strcmp(str, evsel->name) == 0) + if (evsel__name_is(evsel, str)) return evsel; } @@ -1756,7 +1846,7 @@ bool evlist__exclude_kernel(struct evlist *evlist) */ void evlist__force_leader(struct evlist *evlist) { - if (!evlist->core.nr_groups) { + if (evlist__nr_groups(evlist) == 0) { struct evsel *leader = evlist__first(evlist); evlist__set_leader(evlist); @@ -1784,8 +1874,13 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * if (evsel__has_leader(c2, leader)) { if (is_open && close) perf_evsel__close(&c2->core); - evsel__set_leader(c2, c2); - c2->core.nr_members = 0; + /* + * We want to close all members of the group and reopen + * them. Some events, like Intel topdown, require being + * in a group and so keep these in the group. + */ + evsel__remove_from_group(c2, leader); + /* * Set this for all former members of the group * to indicate they get reopened. @@ -1793,6 +1888,9 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * c2->reset_group = true; } } + /* Reset the leader count if all entries were removed. */ + if (leader->core.nr_members == 1) + leader->core.nr_members = 0; return leader; } @@ -1889,7 +1987,8 @@ int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) } evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, - fdarray_flag__nonfilterable); + fdarray_flag__nonfilterable | + fdarray_flag__non_perf_event); if (evlist->ctl_fd.pos < 0) { evlist->ctl_fd.pos = -1; pr_err("Failed to add ctl fd entry: %m\n"); @@ -2139,6 +2238,236 @@ int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) return err; } +/** + * struct event_enable_time - perf record -D/--delay single time range. + * @start: start of time range to enable events in milliseconds + * @end: end of time range to enable events in milliseconds + * + * N.B. this structure is also accessed as an array of int. + */ +struct event_enable_time { + int start; + int end; +}; + +static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) +{ + const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; + int ret, start, end, n; + + ret = sscanf(str, fmt, &start, &end, &n); + if (ret != 2 || end <= start) + return -EINVAL; + if (range) { + range->start = start; + range->end = end; + } + return n; +} + +static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) +{ + int incr = !!range; + bool first = true; + ssize_t ret, cnt; + + for (cnt = 0; *str; cnt++) { + ret = parse_event_enable_time(str, range, first); + if (ret < 0) + return ret; + /* Check no overlap */ + if (!first && range && range->start <= range[-1].end) + return -EINVAL; + str += ret; + range += incr; + first = false; + } + return cnt; +} + +/** + * struct event_enable_timer - control structure for perf record -D/--delay. + * @evlist: event list + * @times: time ranges that events are enabled (N.B. this is also accessed as an + * array of int) + * @times_cnt: number of time ranges + * @timerfd: timer file descriptor + * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray) + * @times_step: current position in (int *)@times)[], + * refer event_enable_timer__process() + * + * Note, this structure is only used when there are time ranges, not when there + * is only an initial delay. + */ +struct event_enable_timer { + struct evlist *evlist; + struct event_enable_time *times; + size_t times_cnt; + int timerfd; + int pollfd_pos; + size_t times_step; +}; + +static int str_to_delay(const char *str) +{ + char *endptr; + long d; + + d = strtol(str, &endptr, 10); + if (*endptr || d > INT_MAX || d < -1) + return 0; + return d; +} + +int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts, + const char *str, int unset) +{ + enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event; + struct event_enable_timer *eet; + ssize_t times_cnt; + ssize_t ret; + int err; + + if (unset) + return 0; + + opts->target.initial_delay = str_to_delay(str); + if (opts->target.initial_delay) + return 0; + + ret = parse_event_enable_times(str, NULL); + if (ret < 0) + return ret; + + times_cnt = ret; + if (times_cnt == 0) + return -EINVAL; + + eet = zalloc(sizeof(*eet)); + if (!eet) + return -ENOMEM; + + eet->times = calloc(times_cnt, sizeof(*eet->times)); + if (!eet->times) { + err = -ENOMEM; + goto free_eet; + } + + if (parse_event_enable_times(str, eet->times) != times_cnt) { + err = -EINVAL; + goto free_eet_times; + } + + eet->times_cnt = times_cnt; + + eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); + if (eet->timerfd == -1) { + err = -errno; + pr_err("timerfd_create failed: %s\n", strerror(errno)); + goto free_eet_times; + } + + eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); + if (eet->pollfd_pos < 0) { + err = eet->pollfd_pos; + goto close_timerfd; + } + + eet->evlist = evlist; + evlist->eet = eet; + opts->target.initial_delay = eet->times[0].start; + + return 0; + +close_timerfd: + close(eet->timerfd); +free_eet_times: + zfree(&eet->times); +free_eet: + free(eet); + return err; +} + +static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms) +{ + struct itimerspec its = { + .it_value.tv_sec = ms / MSEC_PER_SEC, + .it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC, + }; + int err = 0; + + if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { + err = -errno; + pr_err("timerfd_settime failed: %s\n", strerror(errno)); + } + return err; +} + +int event_enable_timer__start(struct event_enable_timer *eet) +{ + int ms; + + if (!eet) + return 0; + + ms = eet->times[0].end - eet->times[0].start; + eet->times_step = 1; + + return event_enable_timer__set_timer(eet, ms); +} + +int event_enable_timer__process(struct event_enable_timer *eet) +{ + struct pollfd *entries; + short revents; + + if (!eet) + return 0; + + entries = eet->evlist->core.pollfd.entries; + revents = entries[eet->pollfd_pos].revents; + entries[eet->pollfd_pos].revents = 0; + + if (revents & POLLIN) { + size_t step = eet->times_step; + size_t pos = step / 2; + + if (step & 1) { + evlist__disable_non_dummy(eet->evlist); + pr_info(EVLIST_DISABLED_MSG); + if (pos >= eet->times_cnt - 1) { + /* Disarm timer */ + event_enable_timer__set_timer(eet, 0); + return 1; /* Stop */ + } + } else { + evlist__enable_non_dummy(eet->evlist); + pr_info(EVLIST_ENABLED_MSG); + } + + step += 1; + pos = step / 2; + + if (pos < eet->times_cnt) { + int *times = (int *)eet->times; /* Accessing 'times' as array of int */ + int ms = times[step] - times[step - 1]; + + eet->times_step = step; + return event_enable_timer__set_timer(eet, ms); + } + } + + return 0; +} + +void event_enable_timer__exit(struct event_enable_timer **ep) +{ + if (!ep || !*ep) + return; + zfree(&(*ep)->times); + zfree(ep); +} + struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) { struct evsel *evsel; @@ -2150,23 +2479,36 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) return NULL; } -int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf) +void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length) { - struct evsel *evsel; - int printed = 0; + struct evsel *evsel, *leader = NULL; + bool first = true; evlist__for_each_entry(evlist, evsel) { + struct evsel *new_leader = evsel__leader(evsel); + if (evsel__is_dummy_event(evsel)) continue; - if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) { - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); - } else { - printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); - break; + + if (leader != new_leader && leader && leader->core.nr_members > 1) + strbuf_addch(sb, '}'); + + if (!first) + strbuf_addch(sb, ','); + + if (sb->len > max_length) { + strbuf_addstr(sb, "..."); + return; } - } + if (leader != new_leader && new_leader->core.nr_members > 1) + strbuf_addch(sb, '{'); - return printed; + strbuf_addstr(sb, evsel__name(evsel)); + first = false; + leader = new_leader; + } + if (leader && leader->core.nr_members > 1) + strbuf_addch(sb, '}'); } void evlist__check_mem_load_aux(struct evlist *evlist) @@ -2193,3 +2535,109 @@ void evlist__check_mem_load_aux(struct evlist *evlist) } } } + +/** + * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs + * and warn if the user CPU list is inapplicable for the event's PMU's + * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a + * user requested CPU and so any online CPU is applicable. Core PMUs handle + * events on the CPUs in their list and otherwise the event isn't supported. + * @evlist: The list of events being checked. + * @cpu_list: The user provided list of CPUs. + */ +void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) +{ + struct perf_cpu_map *user_requested_cpus; + struct evsel *pos; + + if (!cpu_list) + return; + + user_requested_cpus = perf_cpu_map__new(cpu_list); + if (!user_requested_cpus) + return; + + evlist__for_each_entry(evlist, pos) { + evsel__warn_user_requested_cpus(pos, user_requested_cpus); + } + perf_cpu_map__put(user_requested_cpus); +} + +/* Should uniquify be disabled for the evlist? */ +static bool evlist__disable_uniquify(const struct evlist *evlist) +{ + struct evsel *counter; + struct perf_pmu *last_pmu = NULL; + bool first = true; + + evlist__for_each_entry(evlist, counter) { + /* If PMUs vary then uniquify can be useful. */ + if (!first && counter->pmu != last_pmu) + return false; + first = false; + if (counter->pmu) { + /* Allow uniquify for uncore PMUs. */ + if (!counter->pmu->is_core) + return false; + /* Keep hybrid event names uniquified for clarity. */ + if (perf_pmus__num_core_pmus() > 1) + return false; + } + last_pmu = counter->pmu; + } + return true; +} + +static bool evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config) +{ + struct evsel *counter; + bool needs_uniquify = false; + + if (evlist__disable_uniquify(evlist)) { + evlist__for_each_entry(evlist, counter) + counter->uniquified_name = true; + return false; + } + + evlist__for_each_entry(evlist, counter) { + if (evsel__set_needs_uniquify(counter, config)) + needs_uniquify = true; + } + return needs_uniquify; +} + +void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config) +{ + if (evlist__set_needs_uniquify(evlist, config)) { + struct evsel *pos; + + evlist__for_each_entry(evlist, pos) + evsel__uniquify_counter(pos); + } +} + +bool evlist__has_bpf_output(struct evlist *evlist) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + if (evsel__is_bpf_output(evsel)) + return true; + } + + return false; +} + +bool evlist__needs_bpf_sb_event(struct evlist *evlist) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + if (evsel__is_dummy_event(evsel)) + continue; + if (!evsel->core.attr.exclude_kernel) + return true; + } + + return false; +} |
