diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
| -rw-r--r-- | tools/perf/util/evlist.c | 123 |
1 files changed, 67 insertions, 56 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 52ea004ba01e..48af7d379d82 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -242,14 +242,20 @@ int __evlist__add_default(struct evlist *evlist, bool precise) return 0; } -int evlist__add_dummy(struct evlist *evlist) +static struct evsel *evlist__dummy_event(struct evlist *evlist) { struct perf_event_attr attr = { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_DUMMY, .size = sizeof(attr), /* to capture ABI version */ }; - struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); + + return evsel__new_idx(&attr, evlist->core.nr_entries); +} + +int evlist__add_dummy(struct evlist *evlist) +{ + struct evsel *evsel = evlist__dummy_event(evlist); if (evsel == NULL) return -ENOMEM; @@ -258,6 +264,51 @@ int evlist__add_dummy(struct evlist *evlist) return 0; } +static void evlist__add_on_all_cpus(struct evlist *evlist, struct evsel *evsel) +{ + evsel->core.system_wide = true; + + /* + * All CPUs. + * + * Note perf_event_open() does not accept CPUs that are not online, so + * in fact this CPU list will include only all online CPUs. + */ + perf_cpu_map__put(evsel->core.own_cpus); + evsel->core.own_cpus = perf_cpu_map__new(NULL); + perf_cpu_map__put(evsel->core.cpus); + evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus); + + /* No threads */ + perf_thread_map__put(evsel->core.threads); + evsel->core.threads = perf_thread_map__new_dummy(); + + evlist__add(evlist, evsel); +} + +struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) +{ + struct evsel *evsel = evlist__dummy_event(evlist); + + if (!evsel) + return NULL; + + evsel->core.attr.exclude_kernel = 1; + evsel->core.attr.exclude_guest = 1; + evsel->core.attr.exclude_hv = 1; + evsel->core.attr.freq = 0; + evsel->core.attr.sample_period = 1; + evsel->no_aux_samples = true; + evsel->name = strdup("dummy:u"); + + if (system_wide) + evlist__add_on_all_cpus(evlist, evsel); + else + evlist__add(evlist, evsel); + + return evsel; +} + static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) { struct evsel *evsel, *n; @@ -334,14 +385,6 @@ int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, return 0; } -static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel) -{ - if (evsel->core.system_wide) - return 1; - else - return perf_thread_map__nr(evlist->core.threads); -} - struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) { struct evlist_cpu_iterator itr = { @@ -546,48 +589,6 @@ void evlist__toggle_enable(struct evlist *evlist) (evlist->enabled ? evlist__disable : evlist__enable)(evlist); } -static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu) -{ - int thread; - int nr_threads = evlist__nr_threads(evlist, evsel); - - if (!evsel->core.fd) - return -EINVAL; - - for (thread = 0; thread < nr_threads; thread++) { - int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); - if (err) - return err; - } - return 0; -} - -static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) -{ - int cpu; - int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); - - if (!evsel->core.fd) - return -EINVAL; - - for (cpu = 0; cpu < nr_cpus; cpu++) { - int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); - if (err) - return err; - } - return 0; -} - -int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) -{ - bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus); - - if (per_cpu_mmaps) - return evlist__enable_event_cpu(evlist, evsel, idx); - - return evlist__enable_event_thread(evlist, evsel, idx); -} - int evlist__add_pollfd(struct evlist *evlist, int fd) { return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); @@ -797,13 +798,15 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist, static void perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, + struct perf_evsel *_evsel, struct perf_mmap_param *_mp, - int idx, bool per_cpu) + int idx) { struct evlist *evlist = container_of(_evlist, struct evlist, core); struct mmap_params *mp = container_of(_mp, struct mmap_params, core); + struct evsel *evsel = container_of(_evsel, struct evsel, core); - auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); + auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); } static struct perf_mmap* @@ -1790,8 +1793,13 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * if (evsel__has_leader(c2, leader)) { if (is_open && close) perf_evsel__close(&c2->core); - evsel__set_leader(c2, c2); - c2->core.nr_members = 0; + /* + * We want to close all members of the group and reopen + * them. Some events, like Intel topdown, require being + * in a group and so keep these in the group. + */ + evsel__remove_from_group(c2, leader); + /* * Set this for all former members of the group * to indicate they get reopened. @@ -1799,6 +1807,9 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * c2->reset_group = true; } } + /* Reset the leader count if all entries were removed. */ + if (leader->core.nr_members == 1) + leader->core.nr_members = 0; return leader; } |
