summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-record.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r--tools/perf/builtin-record.c775
1 files changed, 467 insertions, 308 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 29dcd454b8e2..2584d0d8bc82 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -26,6 +26,7 @@
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/stat.h"
#include "util/symbol.h"
#include "util/record.h"
#include "util/cpumap.h"
@@ -37,8 +38,6 @@
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/perf_api_probe.h"
-#include "util/llvm-utils.h"
-#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/cpu-set-sched.h"
@@ -48,10 +47,12 @@
#include "util/bpf-event.h"
#include "util/util.h"
#include "util/pfm.h"
+#include "util/pmu.h"
+#include "util/pmus.h"
#include "util/clockid.h"
-#include "util/pmu-hybrid.h"
-#include "util/evlist-hybrid.h"
#include "util/off_cpu.h"
+#include "util/bpf-filter.h"
+#include "util/strbuf.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
@@ -154,6 +155,7 @@ struct record {
struct perf_tool tool;
struct record_opts opts;
u64 bytes_written;
+ u64 thread_bytes_written;
struct perf_data data;
struct auxtrace_record *itr;
struct evlist *evlist;
@@ -161,6 +163,7 @@ struct record {
struct evlist *sb_evlist;
pthread_t thread_id;
int realtime_prio;
+ bool latency;
bool switch_output_event_set;
bool no_buildid;
bool no_buildid_set;
@@ -168,9 +171,12 @@ struct record {
bool no_buildid_cache_set;
bool buildid_all;
bool buildid_mmap;
+ bool buildid_mmap_set;
bool timestamp_filename;
bool timestamp_boundary;
bool off_cpu;
+ const char *filter_action;
+ const char *uid_str;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -193,6 +199,15 @@ static const char *affinity_tags[PERF_AFFINITY_MAX] = {
"SYS", "NODE", "CPU"
};
+static int build_id__process_mmap(const struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+static int build_id__process_mmap2(const struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+static int process_timestamp_boundary(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
#ifndef HAVE_GETTID
static inline pid_t gettid(void)
{
@@ -226,14 +241,7 @@ static bool switch_output_time(struct record *rec)
static u64 record__bytes_written(struct record *rec)
{
- int t;
- u64 bytes_written = rec->bytes_written;
- struct record_thread *thread_data = rec->thread_data;
-
- for (t = 0; t < rec->nr_threads; t++)
- bytes_written += thread_data[t].bytes_written;
-
- return bytes_written;
+ return rec->bytes_written + rec->thread_bytes_written;
}
static bool record__output_max_size_exceeded(struct record *rec)
@@ -255,10 +263,12 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
return -1;
}
- if (map && map->file)
+ if (map && map->file) {
thread->bytes_written += size;
- else
+ rec->thread_bytes_written += size;
+ } else {
rec->bytes_written += size;
+ }
if (record__output_max_size_exceeded(rec) && !done) {
fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
@@ -275,7 +285,7 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
static int record__aio_enabled(struct record *rec);
static int record__comp_enabled(struct record *rec);
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size);
#ifdef HAVE_AIO_SUPPORT
@@ -337,7 +347,7 @@ static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
} else {
/*
* aio write request may require restart with the
- * reminder if the kernel didn't write whole
+ * remainder if the kernel didn't write whole
* chunk at once.
*/
rem_off = cblock->aio_offset + written;
@@ -405,14 +415,18 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
*
* Coping can be done in two steps in case the chunk of profiling data
* crosses the upper bound of the kernel buffer. In this case we first move
- * part of data from map->start till the upper bound and then the reminder
+ * part of data from map->start till the upper bound and then the remainder
* from the beginning of the kernel buffer till the end of the data chunk.
*/
if (record__comp_enabled(aio->rec)) {
- size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
- mmap__mmap_len(map) - aio->size,
- buf, size);
+ ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
+ mmap__mmap_len(map) - aio->size,
+ buf, size);
+ if (compressed < 0)
+ return (int)compressed;
+
+ size = compressed;
} else {
memcpy(aio->data + aio->size, buf, size);
}
@@ -609,7 +623,7 @@ static int record__comp_enabled(struct record *rec)
return rec->opts.comp_level > 0;
}
-static int process_synthesized_event(struct perf_tool *tool,
+static int process_synthesized_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -620,7 +634,7 @@ static int process_synthesized_event(struct perf_tool *tool,
static struct mutex synth_lock;
-static int process_locked_synthesized_event(struct perf_tool *tool,
+static int process_locked_synthesized_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -638,8 +652,27 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
struct record *rec = to;
if (record__comp_enabled(rec)) {
- size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
- bf = map->data;
+ struct perf_record_compressed2 *event = map->data;
+ size_t padding = 0;
+ u8 pad[8] = {0};
+ ssize_t compressed = zstd_compress(rec->session, map, map->data,
+ mmap__mmap_len(map), bf, size);
+
+ if (compressed < 0)
+ return (int)compressed;
+
+ bf = event;
+ thread->samples++;
+
+ /*
+ * The record from `zstd_compress` is not 8 bytes aligned, which would cause asan
+ * error. We make it aligned here.
+ */
+ event->data_size = compressed - sizeof(struct perf_record_compressed2);
+ event->header.size = PERF_ALIGN(compressed, sizeof(u64));
+ padding = event->header.size - compressed;
+ return record__write(rec, map, bf, compressed) ||
+ record__write(rec, map, &pad, padding);
}
thread->samples++;
@@ -697,9 +730,7 @@ static void record__sig_exit(void)
raise(signr);
}
-#ifdef HAVE_AUXTRACE_SUPPORT
-
-static int record__process_auxtrace(struct perf_tool *tool,
+static int record__process_auxtrace(const struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
@@ -742,7 +773,9 @@ static int record__auxtrace_mmap_read(struct record *rec,
{
int ret;
- ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read(map, rec->itr,
+ perf_session__env(rec->session),
+ &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
@@ -758,7 +791,9 @@ static int record__auxtrace_mmap_read_snapshot(struct record *rec,
{
int ret;
- ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read_snapshot(map, rec->itr,
+ perf_session__env(rec->session),
+ &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
@@ -845,45 +880,13 @@ static int record__auxtrace_init(struct record *rec)
if (err)
return err;
- auxtrace_regroup_aux_output(rec->evlist);
+ err = auxtrace_parse_aux_action(rec->evlist);
+ if (err)
+ return err;
return auxtrace_parse_filters(rec->evlist);
}
-#else
-
-static inline
-int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct mmap *map __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
- bool on_exit __maybe_unused)
-{
-}
-
-static inline
-int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
-{
- return 0;
-}
-
-static inline
-int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-static int record__auxtrace_init(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-#endif
-
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
@@ -911,6 +914,64 @@ static int record__config_off_cpu(struct record *rec)
return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
}
+static bool record__tracking_system_wide(struct record *rec)
+{
+ struct evlist *evlist = rec->evlist;
+ struct evsel *evsel;
+
+ /*
+ * If non-dummy evsel exists, system_wide sideband is need to
+ * help parse sample information.
+ * For example, PERF_EVENT_MMAP event to help parse symbol,
+ * and PERF_EVENT_COMM event to help parse task executable name.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if (!evsel__is_dummy_event(evsel))
+ return true;
+ }
+
+ return false;
+}
+
+static int record__config_tracking_events(struct record *rec)
+{
+ struct record_opts *opts = &rec->opts;
+ struct evlist *evlist = rec->evlist;
+ bool system_wide = false;
+ struct evsel *evsel;
+
+ /*
+ * For initial_delay, system wide or a hybrid system, we need to add
+ * tracking event so that we can track PERF_RECORD_MMAP to cover the
+ * delay of waiting or event synthesis.
+ */
+ if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
+ perf_pmus__num_core_pmus() > 1) {
+ /*
+ * User space tasks can migrate between CPUs, so when tracing
+ * selected CPUs, sideband for all CPUs is still needed.
+ */
+ if (!!opts->target.cpu_list && record__tracking_system_wide(rec))
+ system_wide = true;
+
+ evsel = evlist__findnew_tracking_event(evlist, system_wide);
+ if (!evsel)
+ return -ENOMEM;
+
+ /*
+ * Enable the tracking event when the process is forked for
+ * initial_delay, immediately for system wide.
+ */
+ if (opts->target.initial_delay && !evsel->immediate &&
+ !target__has_cpu(&opts->target))
+ evsel->core.attr.enable_on_exec = 1;
+ else
+ evsel->immediate = 1;
+ }
+
+ return 0;
+}
+
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
@@ -1290,40 +1351,28 @@ static int record__open(struct record *rec)
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
int rc = 0;
-
- /*
- * For initial_delay, system wide or a hybrid system, we need to add a
- * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
- * of waiting or event synthesis.
- */
- if (opts->initial_delay || target__has_cpu(&opts->target) ||
- perf_pmu__has_hybrid()) {
- pos = evlist__get_tracking_event(evlist);
- if (!evsel__is_dummy_event(pos)) {
- /* Set up dummy event. */
- if (evlist__add_dummy(evlist))
- return -ENOMEM;
- pos = evlist__last(evlist);
- evlist__set_tracking_event(evlist, pos);
- }
-
- /*
- * Enable the dummy event when the process is forked for
- * initial_delay, immediately for system wide.
- */
- if (opts->initial_delay && !pos->immediate &&
- !target__has_cpu(&opts->target))
- pos->core.attr.enable_on_exec = 1;
- else
- pos->immediate = 1;
- }
-
- evlist__config(evlist, opts, &callchain_param);
+ bool skipped = false;
+ bool removed_tracking = false;
evlist__for_each_entry(evlist, pos) {
+ if (removed_tracking) {
+ /*
+ * Normally the head of the list has tracking enabled
+ * for sideband data like mmaps. If this event is
+ * removed, make sure to add tracking to the next
+ * processed event.
+ */
+ if (!pos->tracking) {
+ pos->tracking = true;
+ evsel__config(pos, opts, &callchain_param);
+ }
+ removed_tracking = false;
+ }
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
- if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
+ bool report_error = true;
+
+ if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
@@ -1334,15 +1383,72 @@ try_again:
pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
}
- rc = -errno;
- evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
- ui__error("%s\n", msg);
- goto out;
+#if defined(__aarch64__) || defined(__arm__)
+ if (strstr(evsel__name(pos), "cycles")) {
+ struct evsel *pos2;
+ /*
+ * Unfortunately ARM has many events named
+ * "cycles" on PMUs like the system-level (L3)
+ * cache which don't support sampling. Only
+ * display such failures to open when there is
+ * only 1 cycles event or verbose is enabled.
+ */
+ evlist__for_each_entry(evlist, pos2) {
+ if (pos2 == pos)
+ continue;
+ if (strstr(evsel__name(pos2), "cycles")) {
+ report_error = false;
+ break;
+ }
+ }
+ }
+#endif
+ if (report_error || verbose > 0) {
+ ui__error("Failure to open event '%s' on PMU '%s' which will be "
+ "removed.\n%s\n",
+ evsel__name(pos), evsel__pmu_name(pos), msg);
+ }
+ if (pos->tracking)
+ removed_tracking = true;
+ pos->skippable = true;
+ skipped = true;
}
-
- pos->supported = true;
}
+ if (skipped) {
+ struct evsel *tmp;
+ int idx = 0;
+ bool evlist_empty = true;
+
+ /* Remove evsels that failed to open and update indices. */
+ evlist__for_each_entry_safe(evlist, tmp, pos) {
+ if (pos->skippable) {
+ evlist__remove(evlist, pos);
+ continue;
+ }
+
+ /*
+ * Note, dummy events may be command line parsed or
+ * added by the tool. We care about supporting `perf
+ * record -e dummy` which may be used as a permission
+ * check. Dummy events that are added to the command
+ * line and opened along with other events that fail,
+ * will still fail as if the dummy events were tool
+ * added events for the sake of code simplicity.
+ */
+ if (!evsel__is_dummy_event(pos))
+ evlist_empty = false;
+ }
+ evlist__for_each_entry(evlist, pos) {
+ pos->core.idx = idx++;
+ }
+ /* If list is empty then fail. */
+ if (evlist_empty) {
+ ui__error("Failure to open any events for recording.\n");
+ rc = -1;
+ goto out;
+ }
+ }
if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
@@ -1354,9 +1460,9 @@ try_again:
"even with a suitable vmlinux or kallsyms file.\n\n");
}
- if (evlist__apply_filters(evlist, &pos)) {
+ if (evlist__apply_filters(evlist, &pos, &opts->target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
- pos->filter, evsel__name(pos), errno,
+ pos->filter ?: "BPF", evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
@@ -1381,7 +1487,7 @@ static void set_timestamp_boundary(struct record *rec, u64 sample_time)
rec->evlist->last_sample_time = sample_time;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1423,7 +1529,7 @@ static int process_buildids(struct record *rec)
* first/last samples.
*/
if (rec->buildid_all && !rec->timestamp_boundary)
- rec->tool.sample = NULL;
+ rec->tool.sample = process_event_sample_stub;
return perf_session__process_events(session);
}
@@ -1486,7 +1592,7 @@ static void record__adjust_affinity(struct record *rec, struct mmap *map)
static size_t process_comp_header(void *record, size_t increment)
{
- struct perf_record_compressed *event = record;
+ struct perf_record_compressed2 *event = record;
size_t size = sizeof(*event);
if (increment) {
@@ -1494,17 +1600,17 @@ static size_t process_comp_header(void *record, size_t increment)
return increment;
}
- event->header.type = PERF_RECORD_COMPRESSED;
+ event->header.type = PERF_RECORD_COMPRESSED2;
event->header.size = size;
return size;
}
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size)
{
- size_t compressed;
- size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
+ ssize_t compressed;
+ size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed2) - 1;
struct zstd_data *zstd_data = &session->zstd_data;
if (map && map->file)
@@ -1512,6 +1618,8 @@ static size_t zstd_compress(struct perf_session *session, struct mmap *map,
compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
max_record_size, process_comp_header);
+ if (compressed < 0)
+ return compressed;
if (map && map->file) {
thread->bytes_transferred += src_size;
@@ -1701,10 +1809,8 @@ static void record__init_features(struct record *rec)
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
-#ifdef HAVE_LIBTRACEEVENT
if (!have_tracepoints(&rec->evlist->core.entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
-#endif
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
@@ -1734,8 +1840,11 @@ record__finish_output(struct record *rec)
struct perf_data *data = &rec->data;
int fd = perf_data__fd(data);
- if (data->is_pipe)
+ if (data->is_pipe) {
+ /* Just to display approx. size */
+ data->file.size = rec->bytes_written;
return;
+ }
rec->session->header.data_size += rec->bytes_written;
data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
@@ -1744,15 +1853,15 @@ record__finish_output(struct record *rec)
data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
}
- if (!rec->no_buildid) {
+ /* Buildid scanning disabled or build ID in kernel and synthesized map events. */
+ if (!rec->no_buildid || !rec->no_buildid_cache) {
process_buildids(rec);
if (rec->buildid_all)
- dsos__hit_all(rec->session);
+ perf_session__dsos_hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
-
- return;
+ perf_session__cache_build_ids(rec->session);
}
static int record__synthesize_workload(struct record *rec, bool tail)
@@ -1791,8 +1900,8 @@ static int
record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
+ char *new_filename = NULL;
int fd, err;
- char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -1814,16 +1923,17 @@ record__switch_output(struct record *rec, bool at_exit)
}
fd = perf_data__switch(data, timestamp,
- rec->session->header.data_offset,
- at_exit, &new_filename);
+ rec->session->header.data_offset,
+ at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
}
- if (!quiet)
+ if (!quiet) {
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
data->path, timestamp);
+ }
if (rec->switch_output.num_files) {
int n = rec->switch_output.cur_file + 1;
@@ -1860,24 +1970,17 @@ record__switch_output(struct record *rec, bool at_exit)
return fd;
}
-static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
+static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
struct perf_record_lost_samples *lost,
- int cpu_idx, int thread_idx)
+ int cpu_idx, int thread_idx, u64 lost_count,
+ u16 misc_flag)
{
- struct perf_counts_values count;
struct perf_sample_id *sid;
- struct perf_sample sample = {};
+ struct perf_sample sample;
int id_hdr_size;
- if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) {
- pr_err("read LOST count failed\n");
- return;
- }
-
- if (count.lost == 0)
- return;
-
- lost->lost = count.lost;
+ perf_sample__init(&sample, /*all=*/true);
+ lost->lost = lost_count;
if (evsel->core.ids) {
sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
sample.id = sid->id;
@@ -1886,29 +1989,24 @@ static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
evsel->core.attr.sample_type, &sample);
lost->header.size = sizeof(*lost) + id_hdr_size;
+ lost->header.misc = misc_flag;
record__write(rec, NULL, lost, lost->header.size);
+ perf_sample__exit(&sample);
}
static void record__read_lost_samples(struct record *rec)
{
struct perf_session *session = rec->session;
- struct perf_record_lost_samples *lost;
+ struct perf_record_lost_samples_and_ids lost;
struct evsel *evsel;
/* there was an error during record__open */
if (session->evlist == NULL)
return;
- lost = zalloc(PERF_SAMPLE_MAX_SIZE);
- if (lost == NULL) {
- pr_debug("Memory allocation failed\n");
- return;
- }
-
- lost->header.type = PERF_RECORD_LOST_SAMPLES;
-
evlist__for_each_entry(session->evlist, evsel) {
struct xyarray *xy = evsel->core.sample_id;
+ u64 lost_count;
if (xy == NULL || evsel->core.fd == NULL)
continue;
@@ -1920,12 +2018,30 @@ static void record__read_lost_samples(struct record *rec)
for (int x = 0; x < xyarray__max_x(xy); x++) {
for (int y = 0; y < xyarray__max_y(xy); y++) {
- __record__read_lost_samples(rec, evsel, lost, x, y);
+ struct perf_counts_values count;
+
+ if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
+ pr_debug("read LOST count failed\n");
+ return;
+ }
+
+ if (count.lost) {
+ memset(&lost, 0, sizeof(lost));
+ lost.lost.header.type = PERF_RECORD_LOST_SAMPLES;
+ __record__save_lost_samples(rec, evsel, &lost.lost,
+ x, y, count.lost, 0);
+ }
}
}
- }
- free(lost);
+ lost_count = perf_bpf_filter__lost_count(evsel);
+ if (lost_count) {
+ memset(&lost, 0, sizeof(lost));
+ lost.lost.header.type = PERF_RECORD_LOST_SAMPLES;
+ __record__save_lost_samples(rec, evsel, &lost.lost, 0, 0, lost_count,
+ PERF_RECORD_MISC_LOST_SAMPLES_BPF);
+ }
+ }
}
static volatile sig_atomic_t workload_exec_errno;
@@ -2088,6 +2204,14 @@ out:
return err;
}
+static void record__synthesize_final_bpf_metadata(struct record *rec __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ perf_event__synthesize_final_bpf_metadata(rec->session,
+ process_synthesized_event);
+#endif
+}
+
static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
{
struct record *rec = data;
@@ -2119,7 +2243,7 @@ static int record__setup_sb_evlist(struct record *rec)
}
}
- if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
+ if (evlist__add_bpf_sb_event(rec->sb_evlist, perf_session__env(rec->session))) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
return -1;
}
@@ -2138,15 +2262,16 @@ static int record__init_clock(struct record *rec)
struct perf_session *session = rec->session;
struct timespec ref_clockid;
struct timeval ref_tod;
+ struct perf_env *env = perf_session__env(session);
u64 ref;
if (!rec->opts.use_clockid)
return 0;
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
- session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
+ env->clock.clockid_res_ns = rec->opts.clockid_res_ns;
- session->header.env.clock.clockid = rec->opts.clockid;
+ env->clock.clockid = rec->opts.clockid;
if (gettimeofday(&ref_tod, NULL) != 0) {
pr_err("gettimeofday failed, cannot set reference time.\n");
@@ -2161,12 +2286,12 @@ static int record__init_clock(struct record *rec)
ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
(u64) ref_tod.tv_usec * NSEC_PER_USEC;
- session->header.env.clock.tod_ns = ref;
+ env->clock.tod_ns = ref;
ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
(u64) ref_clockid.tv_nsec;
- session->header.env.clock.clockid_ns = ref;
+ env->clock.clockid_ns = ref;
return 0;
}
@@ -2180,32 +2305,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
}
}
-static void record__uniquify_name(struct record *rec)
-{
- struct evsel *pos;
- struct evlist *evlist = rec->evlist;
- char *new_name;
- int ret;
-
- if (!perf_pmu__has_hybrid())
- return;
-
- evlist__for_each_entry(evlist, pos) {
- if (!evsel__is_hybrid(pos))
- continue;
-
- if (strchr(pos->name, '/'))
- continue;
-
- ret = asprintf(&new_name, "%s/%s/",
- pos->pmu_name, pos->name);
- if (ret) {
- free(pos->name);
- pos->name = new_name;
- }
- }
-}
-
static int record__terminate_thread(struct record_thread *thread_data)
{
int err;
@@ -2338,6 +2437,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
int fd;
float ratio = 0;
enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
+ struct perf_env *env;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
@@ -2345,13 +2445,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGTERM, sig_handler);
signal(SIGSEGV, sigsegv_handler);
- if (rec->opts.record_namespaces)
- tool->namespace_events = true;
-
if (rec->opts.record_cgroup) {
-#ifdef HAVE_FILE_HANDLE
- tool->cgroup_events = true;
-#else
+#ifndef HAVE_FILE_HANDLE
pr_err("cgroup tracking is not supported\n");
return -1;
#endif
@@ -2367,12 +2462,24 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGUSR2, SIG_IGN);
}
+ perf_tool__init(tool, /*ordered_events=*/true);
+ tool->sample = process_sample_event;
+ tool->fork = perf_event__process_fork;
+ tool->exit = perf_event__process_exit;
+ tool->comm = perf_event__process_comm;
+ tool->namespaces = perf_event__process_namespaces;
+ tool->mmap = build_id__process_mmap;
+ tool->mmap2 = build_id__process_mmap2;
+ tool->itrace_start = process_timestamp_boundary;
+ tool->aux = process_timestamp_boundary;
+ tool->namespace_events = rec->opts.record_namespaces;
+ tool->cgroup_events = rec->opts.record_cgroup;
session = perf_session__new(data, tool);
if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
return PTR_ERR(session);
}
-
+ env = perf_session__env(session);
if (record__threads_enabled(rec)) {
if (perf_data__is_pipe(&rec->data)) {
pr_err("Parallel trace streaming is not available in pipe mode.\n");
@@ -2406,8 +2513,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
#endif // HAVE_EVENTFD_SUPPORT
- session->header.env.comp_type = PERF_COMP_ZSTD;
- session->header.env.comp_level = rec->opts.comp_level;
+ env->comp_type = PERF_COMP_ZSTD;
+ env->comp_level = rec->opts.comp_level;
if (rec->opts.kcore &&
!record__kcore_readable(&session->machines.host)) {
@@ -2439,7 +2546,18 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
- record__uniquify_name(rec);
+ if (rec->timestamp_filename && perf_data__is_pipe(data)) {
+ rec->timestamp_filename = false;
+ pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
+ }
+
+ /*
+ * Use global stat_config that is zero meaning aggr_mode is AGGR_NONE
+ * and hybrid_merge is false.
+ */
+ evlist__uniquify_evsel_names(rec->evlist, &stat_config);
+
+ evlist__config(rec->evlist, opts, &callchain_param);
/* Debug message used by test scripts */
pr_debug3("perf record opening and mmapping events\n");
@@ -2449,7 +2567,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
/* Debug message used by test scripts */
pr_debug3("perf record done opening and mmapping events\n");
- session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
+ env->comp_mmap_len = session->evlist->core.mmap_len;
if (rec->opts.kcore) {
err = record__kcore_copy(&session->machines.host, data);
@@ -2459,16 +2577,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
}
- err = bpf__apply_obj_config();
- if (err) {
- char errbuf[BUFSIZ];
-
- bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
- pr_err("ERROR: Apply config to BPF failed: %s\n",
- errbuf);
- goto out_free_threads;
- }
-
/*
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
@@ -2478,7 +2586,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
rec->tool.ordered_events = false;
}
- if (!rec->evlist->core.nr_groups)
+ if (evlist__nr_groups(rec->evlist) == 0)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (data->is_pipe) {
@@ -2499,6 +2607,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_free_threads;
}
+ if (!evlist__needs_bpf_sb_event(rec->evlist))
+ opts->no_bpf_event = true;
+
err = record__setup_sb_evlist(rec);
if (err)
goto out_free_threads;
@@ -2526,10 +2637,17 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
- if (!target__none(&opts->target) && !opts->initial_delay)
+ if (!target__none(&opts->target) && !opts->target.initial_delay)
evlist__enable(rec->evlist);
/*
+ * offcpu-time does not call execve, so enable_on_exe wouldn't work
+ * when recording a workload, do it manually
+ */
+ if (rec->off_cpu)
+ evlist__enable_evsel(rec->evlist, (char *)OFFCPU_EVENT);
+
+ /*
* Let the child rip
*/
if (forks) {
@@ -2578,10 +2696,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
evlist__start_workload(rec->evlist);
}
- if (opts->initial_delay) {
+ if (opts->target.initial_delay) {
pr_info(EVLIST_DISABLED_MSG);
- if (opts->initial_delay > 0) {
- usleep(opts->initial_delay * USEC_PER_MSEC);
+ if (opts->target.initial_delay > 0) {
+ usleep(opts->target.initial_delay * USEC_PER_MSEC);
evlist__enable(rec->evlist);
pr_info(EVLIST_ENABLED_MSG);
}
@@ -2741,17 +2859,21 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
trigger_off(&auxtrace_snapshot_trigger);
trigger_off(&switch_output_trigger);
+ record__synthesize_final_bpf_metadata(rec);
+
if (opts->auxtrace_snapshot_on_exit)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
- char msg[STRERR_BUFSIZE], strevsels[2048];
+ char msg[STRERR_BUFSIZE];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
+ struct strbuf sb = STRBUF_INIT;
- evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
+ evlist__format_evsels(rec->evlist, &sb, 2048);
pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
- strevsels, argv[0], emsg);
+ sb.buf, argv[0], emsg);
+ strbuf_release(&sb);
err = -1;
goto out_child;
}
@@ -2775,7 +2897,7 @@ out_free_threads:
if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
- session->header.env.comp_ratio = ratio + 0.5;
+ env->comp_ratio = ratio + 0.5;
}
if (forks) {
@@ -2799,11 +2921,11 @@ out_free_threads:
rec->bytes_written += off_cpu_write(rec->session);
record__read_lost_samples(rec);
- record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
if (!err) {
+ record__synthesize(rec, true);
if (!rec->timestamp_filename) {
record__finish_output(rec);
} else {
@@ -2849,10 +2971,10 @@ out_delete_session:
}
#endif
zstd_fini(&session->zstd_data);
- perf_session__delete(session);
-
if (!opts->no_bpf_event)
evlist__stop_sb_thread(rec->sb_evlist);
+
+ perf_session__delete(session);
return status;
}
@@ -2924,9 +3046,11 @@ static int perf_record_config(const char *var, const char *value, void *cb)
else if (!strcmp(value, "no-cache"))
rec->no_buildid_cache = true;
else if (!strcmp(value, "skip"))
- rec->no_buildid = true;
+ rec->no_buildid = rec->no_buildid_cache = true;
else if (!strcmp(value, "mmap"))
rec->buildid_mmap = true;
+ else if (!strcmp(value, "no-mmap"))
+ rec->buildid_mmap = false;
else
return -1;
return 0;
@@ -3116,6 +3240,28 @@ out_free:
return ret;
}
+static int record__parse_off_cpu_thresh(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+ char *endptr;
+ u64 off_cpu_thresh_ms;
+
+ if (!str)
+ return -EINVAL;
+
+ off_cpu_thresh_ms = strtoull(str, &endptr, 10);
+
+ /* the threshold isn't string "0", yet strtoull() returns 0, parsing failed */
+ if (*endptr || (off_cpu_thresh_ms == 0 && strcmp(str, "0")))
+ return -EINVAL;
+ else
+ opts->off_cpu_thresh_ns = off_cpu_thresh_ms * NSEC_PER_MSEC;
+
+ return 0;
+}
+
void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
{
}
@@ -3166,7 +3312,7 @@ static int switch_output_setup(struct record *rec)
unsigned long val;
/*
- * If we're using --switch-output-events, then we imply its
+ * If we're using --switch-output-events, then we imply its
* --switch-output=signal, as we'll send a SIGUSR2 from the side band
* thread to its parent.
*/
@@ -3227,7 +3373,7 @@ static const char * const __record_usage[] = {
};
const char * const *record_usage = __record_usage;
-static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
+static int build_id__process_mmap(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
@@ -3239,7 +3385,7 @@ static int build_id__process_mmap(struct perf_tool *tool, union perf_event *even
return perf_event__process_mmap(tool, event, sample, machine);
}
-static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
+static int build_id__process_mmap2(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
@@ -3252,7 +3398,7 @@ static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *eve
return perf_event__process_mmap2(tool, event, sample, machine);
}
-static int process_timestamp_boundary(struct perf_tool *tool,
+static int process_timestamp_boundary(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
@@ -3309,19 +3455,9 @@ static struct record record = {
.ctl_fd = -1,
.ctl_fd_ack = -1,
.synth = PERF_SYNTH_ALL,
+ .off_cpu_thresh_ns = OFFCPU_THRESH,
},
- .tool = {
- .sample = process_sample_event,
- .fork = perf_event__process_fork,
- .exit = perf_event__process_exit,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .mmap = build_id__process_mmap,
- .mmap2 = build_id__process_mmap2,
- .itrace_start = process_timestamp_boundary,
- .aux = process_timestamp_boundary,
- .ordered_events = true,
- },
+ .buildid_mmap = true,
};
const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
@@ -3329,6 +3465,14 @@ const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
static bool dry_run;
+static struct parse_events_option_args parse_events_option_args = {
+ .evlistp = &record.evlist,
+};
+
+static struct parse_events_option_args switch_output_parse_events_option_args = {
+ .evlistp = &record.sb_evlist,
+};
+
/*
* XXX Will stay a global variable till we fix builtin-script.c to stop messing
* with it and switch to use the library functions in perf_evlist that came
@@ -3337,11 +3481,14 @@ static bool dry_run;
* using pipes, etc.
*/
static struct option __record_options[] = {
- OPT_CALLBACK('e', "event", &record.evlist, "event",
+ OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_CALLBACK(0, "filter", &record.evlist, "filter",
"event filter", parse_filter),
+ OPT_BOOLEAN(0, "latency", &record.latency,
+ "Enable data collection for latency profiling.\n"
+ "\t\t\t Use perf report --latency for latency-centric profile."),
OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
NULL, "don't record events from perf itself",
exclude_perf),
@@ -3398,6 +3545,8 @@ static struct option __record_options[] = {
"Record the sampled data address data page size"),
OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
"Record the sampled code address (ip) page size"),
+ OPT_BOOLEAN(0, "sample-mem-info", &record.opts.sample_data_src,
+ "Record the data source for memory operations"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
"Record the sample identifier"),
@@ -3422,8 +3571,7 @@ static struct option __record_options[] = {
"or ranges of time to enable events e.g. '-D 10-20,30-40'",
record__parse_event_enable_time),
OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
- OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
- "user to profile"),
+ OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"),
OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
"branch any", "sample any taken branches",
@@ -3442,7 +3590,7 @@ static struct option __record_options[] = {
"sample selected machine registers on interrupt,"
" use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
- "sample selected machine registers on interrupt,"
+ "sample selected machine registers in user space,"
" use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
@@ -3472,16 +3620,12 @@ static struct option __record_options[] = {
"collect kernel callchains"),
OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
"collect user callchains"),
- OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
- "clang binary to use for compiling BPF scriptlets"),
- OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
- "options passed to clang when compiling BPF scriptlets"),
OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
"Record build-id of all DSOs regardless of hits"),
- OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
- "Record build-id in map events"),
+ OPT_BOOLEAN_SET(0, "buildid-mmap", &record.buildid_mmap, &record.buildid_mmap_set,
+ "Record build-id in mmap events and skip build-id processing."),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
@@ -3490,7 +3634,8 @@ static struct option __record_options[] = {
&record.switch_output.set, "signal or size[BKMG] or time[smhd]",
"Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
- OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
+ OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
+ &record.switch_output_event_set, "switch output event",
"switch output event selector. use 'perf list' to list available events",
parse_events_option_new_evlist),
OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
@@ -3536,6 +3681,11 @@ static struct option __record_options[] = {
"write collected trace data into several data files using parallel threads",
record__parse_threads),
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
+ OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
+ "BPF filter action"),
+ OPT_CALLBACK(0, "off-cpu-thresh", &record.opts, "ms",
+ "Dump off-cpu samples if off-cpu time exceeds this threshold (in milliseconds). (Default: 500ms)",
+ record__parse_off_cpu_thresh),
OPT_END()
};
@@ -3549,9 +3699,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
if (cpu_map__is_dummy(cpus))
return 0;
- perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
- if (cpu.cpu == -1)
- continue;
+ perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
/* Return ENODEV is input cpu is greater than max cpu */
if ((unsigned long)cpu.cpu > mask->nbits)
return -ENODEV;
@@ -3952,33 +4100,14 @@ int cmd_record(int argc, const char **argv)
setlocale(LC_ALL, "");
-#ifndef HAVE_LIBBPF_SUPPORT
-# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
- set_nobuild('\0', "clang-path", true);
- set_nobuild('\0', "clang-opt", true);
-# undef set_nobuild
-#endif
-
-#ifndef HAVE_BPF_PROLOGUE
-# if !defined (HAVE_DWARF_SUPPORT)
-# define REASON "NO_DWARF=1"
-# elif !defined (HAVE_LIBBPF_SUPPORT)
-# define REASON "NO_LIBBPF=1"
-# else
-# define REASON "this architecture doesn't support BPF prologue"
-# endif
-# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
- set_nobuild('\0', "vmlinux", true);
-# undef set_nobuild
-# undef REASON
-#endif
-
#ifndef HAVE_BPF_SKEL
# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
# undef set_nobuild
#endif
+ /* Disable eager loading of kernel symbols that adds overhead to perf record. */
+ symbol_conf.lazy_load_kernel_maps = true;
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
@@ -4010,19 +4139,41 @@ int cmd_record(int argc, const char **argv)
}
- if (rec->buildid_mmap) {
- if (!perf_can_record_build_id()) {
- pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
+ if (record.latency) {
+ /*
+ * There is no fundamental reason why latency profiling
+ * can't work for system-wide mode, but exact semantics
+ * and details are to be defined.
+ * See the following thread for details:
+ * https://lore.kernel.org/all/Z4XDJyvjiie3howF@google.com/
+ */
+ if (record.opts.target.system_wide) {
+ pr_err("Failed: latency profiling is not supported with system-wide collection.\n");
err = -EINVAL;
goto out_opts;
}
- pr_debug("Enabling build id in mmap2 events.\n");
- /* Enable mmap build id synthesizing. */
- symbol_conf.buildid_mmap2 = true;
+ record.opts.record_switch_events = true;
+ }
+
+ if (rec->buildid_mmap && !perf_can_record_build_id()) {
+ pr_warning("Missing support for build id in kernel mmap events.\n"
+ "Disable this warning with --no-buildid-mmap\n");
+ rec->buildid_mmap = false;
+ }
+
+ if (rec->buildid_mmap) {
/* Enable perf_event_attr::build_id bit. */
rec->opts.build_id = true;
- /* Disable build id cache. */
+ /* Disable build-ID table in the header. */
rec->no_buildid = true;
+ } else {
+ pr_debug("Disabling build id in synthesized mmap2 events.\n");
+ symbol_conf.no_buildid_mmap2 = true;
+ }
+
+ if (rec->no_buildid_set && rec->no_buildid) {
+ /* -B implies -N for historic reasons. */
+ rec->no_buildid_cache = true;
}
if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
@@ -4073,8 +4224,8 @@ int cmd_record(int argc, const char **argv)
}
if (rec->switch_output.num_files) {
- rec->switch_output.filenames = calloc(sizeof(char *),
- rec->switch_output.num_files);
+ rec->switch_output.filenames = calloc(rec->switch_output.num_files,
+ sizeof(char *));
if (!rec->switch_output.filenames) {
err = -EINVAL;
goto out_opts;
@@ -4086,6 +4237,22 @@ int cmd_record(int argc, const char **argv)
pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
}
+ if (rec->filter_action) {
+ if (!strcmp(rec->filter_action, "pin"))
+ err = perf_bpf_filter__pin();
+ else if (!strcmp(rec->filter_action, "unpin"))
+ err = perf_bpf_filter__unpin();
+ else {
+ pr_warning("Unknown BPF filter action: %s\n", rec->filter_action);
+ err = -EINVAL;
+ }
+ goto out_opts;
+ }
+
+ /* For backward compatibility, -d implies --mem-info */
+ if (rec->opts.sample_address)
+ rec->opts.sample_data_src = true;
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -4101,17 +4268,9 @@ int cmd_record(int argc, const char **argv)
if (dry_run)
goto out;
- err = bpf__setup_stdout(rec->evlist);
- if (err) {
- bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
- pr_err("ERROR: Setup BPF stdout failed: %s\n",
- errbuf);
- goto out;
- }
-
err = -ENOMEM;
- if (rec->no_buildid_cache || rec->no_buildid) {
+ if (rec->no_buildid_cache) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {
/*
@@ -4146,18 +4305,13 @@ int cmd_record(int argc, const char **argv)
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
- if (perf_pmu__has_hybrid()) {
- err = evlist__add_default_hybrid(rec->evlist,
- !record.opts.no_samples);
- } else {
- err = __evlist__add_default(rec->evlist,
- !record.opts.no_samples);
- }
+ struct evlist *def_evlist = evlist__new_default();
- if (err < 0) {
- pr_err("Not enough memory for event selector list\n");
+ if (!def_evlist)
goto out;
- }
+
+ evlist__splice_list_tail(rec->evlist, &def_evlist->core.entries);
+ evlist__delete(def_evlist);
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
@@ -4169,27 +4323,26 @@ int cmd_record(int argc, const char **argv)
ui__warning("%s\n", errbuf);
}
- err = target__parse_uid(&rec->opts.target);
- if (err) {
- int saved_errno = errno;
+ if (rec->uid_str) {
+ uid_t uid = parse_uid(rec->uid_str);
- target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
- ui__error("%s", errbuf);
+ if (uid == UINT_MAX) {
+ ui__error("Invalid User: %s", rec->uid_str);
+ err = -EINVAL;
+ goto out;
+ }
+ err = parse_uid_filter(rec->evlist, uid);
+ if (err)
+ goto out;
- err = -saved_errno;
- goto out;
+ /* User ID filtering implies system wide. */
+ rec->opts.target.system_wide = true;
}
- /* Enable ignoring missing threads when -u/-p option is defined. */
- rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
-
- if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) {
- pr_err("failed to use cpu list %s\n",
- rec->opts.target.cpu_list);
- goto out;
- }
+ /* Enable ignoring missing threads when -p option is defined. */
+ rec->opts.ignore_missing_thread = rec->opts.target.pid;
- rec->opts.target.hybrid = perf_pmu__has_hybrid();
+ evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
arch__add_leaf_frame_record_opts(&rec->opts);
@@ -4238,6 +4391,12 @@ int cmd_record(int argc, const char **argv)
goto out;
}
+ err = record__config_tracking_events(rec);
+ if (err) {
+ pr_err("record__config_tracking_events failed, error %d\n", err);
+ goto out;
+ }
+
err = record__init_thread_masks(rec);
if (err) {
pr_err("Failed to initialize parallel data streaming masks\n");
@@ -4257,13 +4416,13 @@ int cmd_record(int argc, const char **argv)
err = __cmd_record(&record, argc, argv);
out:
- evlist__delete(rec->evlist);
+ record__free_thread_masks(rec, rec->nr_threads);
+ rec->nr_threads = 0;
symbol__exit();
auxtrace_record__free(rec->itr);
out_opts:
- record__free_thread_masks(rec, rec->nr_threads);
- rec->nr_threads = 0;
evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
+ evlist__delete(rec->evlist);
return err;
}