summaryrefslogtreecommitdiff
path: root/tools/perf/tests/perf-record.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/perf-record.c')
-rw-r--r--tools/perf/tests/perf-record.c136
1 files changed, 93 insertions, 43 deletions
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index d37cd9588cc0..efbd9cd60c63 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -1,23 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
-/* For the CLR_() macros */
-#include <pthread.h>
+#include <linux/string.h>
#include <sched.h>
+#include <perf/mmap.h>
+#include "event.h"
#include "evlist.h"
#include "evsel.h"
-#include "perf.h"
#include "debug.h"
+#include "record.h"
#include "tests.h"
+#include "util/mmap.h"
+#include "util/sample.h"
+#include "util/cpumap.h"
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
- int i, cpu = -1, nrcpus = 1024;
+ int i, cpu = -1;
+ int nrcpus = cpu__max_cpu().cpu;
+ size_t size = CPU_ALLOC_SIZE(nrcpus);
+
realloc:
- CPU_ZERO(maskp);
+ CPU_ZERO_S(size, maskp);
- if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
- if (errno == EINVAL && nrcpus < (1024 << 8)) {
+ if (sched_getaffinity(pid, size, maskp) == -1) {
+ if (errno == EINVAL && nrcpus < (cpu__max_cpu().cpu << 8)) {
nrcpus = nrcpus << 2;
goto realloc;
}
@@ -26,37 +34,38 @@ realloc:
}
for (i = 0; i < nrcpus; i++) {
- if (CPU_ISSET(i, maskp)) {
+ if (CPU_ISSET_S(i, size, maskp)) {
if (cpu == -1)
cpu = i;
else
- CPU_CLR(i, maskp);
+ CPU_CLR_S(i, size, maskp);
}
}
return cpu;
}
-int test__PERF_RECORD(int subtest __maybe_unused)
+static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
- .uid = UINT_MAX,
.uses_mmap = true,
},
.no_buffering = true,
.mmap_pages = 256,
};
- cpu_set_t cpu_mask;
- size_t cpu_mask_size = sizeof(cpu_mask);
- struct perf_evlist *evlist = perf_evlist__new_dummy();
- struct perf_evsel *evsel;
+ int nrcpus = cpu__max_cpu().cpu;
+ cpu_set_t *cpu_mask;
+ size_t cpu_mask_size;
+ struct evlist *evlist = evlist__new_dummy();
+ struct evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
const char *argv[] = { cmd, "1", NULL, };
char *bname, *mmap_filename;
u64 prev_time = 0;
bool found_cmd_mmap = false,
+ found_coreutils_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
@@ -65,21 +74,32 @@ int test__PERF_RECORD(int subtest __maybe_unused)
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
char sbuf[STRERR_BUFSIZE];
+ cpu_mask = CPU_ALLOC(nrcpus);
+ if (!cpu_mask) {
+ pr_debug("failed to create cpumask\n");
+ goto out;
+ }
+
+ cpu_mask_size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(cpu_mask_size, cpu_mask);
+
+ perf_sample__init(&sample, /*all=*/false);
if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
- evlist = perf_evlist__new_default();
+ evlist = evlist__new_default();
if (evlist == NULL) {
pr_debug("Not enough memory to create evlist\n");
+ CPU_FREE(cpu_mask);
goto out;
}
/*
* Create maps of threads and cpus to monitor. In this case
* we start with all threads and cpus (-1, -1) but then in
- * perf_evlist__prepare_workload we'll fill in the only thread
+ * evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there.
*/
- err = perf_evlist__create_maps(evlist, &opts.target);
+ err = evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
@@ -87,11 +107,11 @@ int test__PERF_RECORD(int subtest __maybe_unused)
/*
* Prepare the workload in argv[] to run, it'll fork it, and then wait
- * for perf_evlist__start_workload() to exec it. This is done this way
+ * for evlist__start_workload() to exec it. This is done this way
* so that we have time to open the evlist (calling sys_perf_event_open
* on all the fds) and then mmap them.
*/
- err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
+ err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_evlist;
@@ -100,16 +120,17 @@ int test__PERF_RECORD(int subtest __maybe_unused)
/*
* Config the evsels, setting attr->comm on the first one, etc.
*/
- evsel = perf_evlist__first(evlist);
- perf_evsel__set_sample_bit(evsel, CPU);
- perf_evsel__set_sample_bit(evsel, TID);
- perf_evsel__set_sample_bit(evsel, TIME);
- perf_evlist__config(evlist, &opts, NULL);
+ evsel = evlist__first(evlist);
+ evsel__set_sample_bit(evsel, CPU);
+ evsel__set_sample_bit(evsel, TID);
+ evsel__set_sample_bit(evsel, TIME);
+ evlist__config(evlist, &opts, NULL);
- err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
+ err = sched__get_first_possible_cpu(evlist->workload.pid, cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -118,9 +139,10 @@ int test__PERF_RECORD(int subtest __maybe_unused)
/*
* So that we can check perf_sample.cpu on all the samples.
*/
- if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -128,10 +150,11 @@ int test__PERF_RECORD(int subtest __maybe_unused)
* Call sys_perf_event_open on all the fds on all the evsels,
* grouping them if asked to.
*/
- err = perf_evlist__open(evlist);
+ err = evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -140,10 +163,11 @@ int test__PERF_RECORD(int subtest __maybe_unused)
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n",
+ pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -151,20 +175,25 @@ int test__PERF_RECORD(int subtest __maybe_unused)
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
*/
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
/*
* Now!
*/
- perf_evlist__start_workload(evlist);
+ evlist__start_workload(evlist);
while (1) {
int before = total_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
+ struct mmap *md;
+
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
- while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
const char *name = perf_event__name(type);
@@ -172,17 +201,17 @@ int test__PERF_RECORD(int subtest __maybe_unused)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_evlist__parse_sample(evlist, event, &sample);
+ err = evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
if (verbose > 0)
- perf_event__fprintf(event, stderr);
+ perf_event__fprintf(event, NULL, stderr);
pr_debug("Couldn't parse sample\n");
goto out_delete_evlist;
}
if (verbose > 0) {
pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
- perf_event__fprintf(event, stderr);
+ perf_event__fprintf(event, NULL, stderr);
}
if (prev_time > sample.time) {
@@ -248,6 +277,8 @@ int test__PERF_RECORD(int subtest __maybe_unused)
if (bname != NULL) {
if (!found_cmd_mmap)
found_cmd_mmap = !strcmp(bname + 1, cmd);
+ if (!found_coreutils_mmap)
+ found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
if (!found_libc_mmap)
found_libc_mmap = !strncmp(bname + 1, "libc", 4);
if (!found_ld_mmap)
@@ -265,8 +296,9 @@ int test__PERF_RECORD(int subtest __maybe_unused)
++errs;
}
- perf_evlist__mmap_consume(evlist, i);
+ perf_mmap__consume(&md->core);
}
+ perf_mmap__read_done(&md->core);
}
/*
@@ -275,7 +307,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
*/
if (total_events == before && false)
- perf_evlist__poll(evlist, -1);
+ evlist__poll(evlist, -1);
sleep(1);
if (++wakeups > 5) {
@@ -285,7 +317,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
}
found_exit:
- if (nr_events[PERF_RECORD_COMM] > 1) {
+ if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
++errs;
}
@@ -295,7 +327,7 @@ found_exit:
++errs;
}
- if (!found_cmd_mmap) {
+ if (!found_cmd_mmap && !found_coreutils_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
++errs;
}
@@ -315,7 +347,25 @@ found_exit:
++errs;
}
out_delete_evlist:
- perf_evlist__delete(evlist);
+ CPU_FREE(cpu_mask);
+ evlist__delete(evlist);
out:
- return (err < 0 || errs > 0) ? -1 : 0;
+ perf_sample__exit(&sample);
+ if (err == -EACCES)
+ return TEST_SKIP;
+ if (err < 0 || errs != 0)
+ return TEST_FAIL;
+ return TEST_OK;
}
+
+static struct test_case tests__PERF_RECORD[] = {
+ TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
+ PERF_RECORD,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__PERF_RECORD = {
+ .desc = "PERF_RECORD_* events & perf_sample fields",
+ .test_cases = tests__PERF_RECORD,
+};