diff options
Diffstat (limited to 'tools/perf/util/session.c')
| -rw-r--r-- | tools/perf/util/session.c | 2969 |
1 files changed, 2157 insertions, 812 deletions
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index cf1fe01b7e89..4236503c8f6c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1,309 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include <signal.h> +#include <inttypes.h> +#include <linux/err.h> #include <linux/kernel.h> +#include <linux/zalloc.h> +#include <api/fs/fs.h> #include <byteswap.h> #include <unistd.h> #include <sys/types.h> #include <sys/mman.h> +#include <perf/cpumap.h> +#include <perf/event.h> +#include "map_symbol.h" +#include "branch.h" +#include "debug.h" +#include "env.h" #include "evlist.h" #include "evsel.h" +#include "memswap.h" +#include "map.h" +#include "symbol.h" #include "session.h" #include "tool.h" -#include "sort.h" -#include "util.h" -#include "cpumap.h" -#include "event-parse.h" #include "perf_regs.h" -#include "vdso.h" - -static int perf_session__open(struct perf_session *self, bool force) -{ - struct stat input_stat; +#include "asm/bug.h" +#include "auxtrace.h" +#include "thread.h" +#include "thread-stack.h" +#include "sample-raw.h" +#include "stat.h" +#include "tsc.h" +#include "ui/progress.h" +#include "util.h" +#include "arch/common.h" +#include "units.h" +#include "annotate.h" +#include "perf.h" +#include <internal/lib.h> - if (!strcmp(self->filename, "-")) { - self->fd_pipe = true; - self->fd = STDIN_FILENO; +static int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + const struct perf_tool *tool, + u64 file_offset, + const char *file_path); - if (perf_session__read_header(self, self->fd) < 0) - pr_err("incompatible file format (rerun with -v to learn more)"); +static int perf_session__open(struct perf_session *session) +{ + struct perf_data *data = session->data; - return 0; + if (perf_session__read_header(session) < 0) { + pr_err("incompatible file format (rerun with -v to learn more)\n"); + return -1; } - self->fd = open(self->filename, O_RDONLY); - if (self->fd < 0) { - int err = errno; - - pr_err("failed to open %s: %s", self->filename, strerror(err)); - if (err == ENOENT && !strcmp(self->filename, "perf.data")) - pr_err(" (try 'perf record' first)"); - pr_err("\n"); - return -errno; + if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) { + /* Auxiliary events may reference exited threads, hold onto dead ones. */ + symbol_conf.keep_exited_threads = true; } - if (fstat(self->fd, &input_stat) < 0) - goto out_close; - - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - pr_err("file %s not owned by current user or root\n", - self->filename); - goto out_close; - } + if (perf_data__is_pipe(data)) + return 0; - if (!input_stat.st_size) { - pr_info("zero-sized file (%s), nothing to do!\n", - self->filename); - goto out_close; - } + if (perf_header__has_feat(&session->header, HEADER_STAT)) + return 0; - if (perf_session__read_header(self, self->fd) < 0) { - pr_err("incompatible file format (rerun with -v to learn more)"); - goto out_close; + if (!evlist__valid_sample_type(session->evlist)) { + pr_err("non matching sample_type\n"); + return -1; } - if (!perf_evlist__valid_sample_type(self->evlist)) { - pr_err("non matching sample_type"); - goto out_close; + if (!evlist__valid_sample_id_all(session->evlist)) { + pr_err("non matching sample_id_all\n"); + return -1; } - if (!perf_evlist__valid_sample_id_all(self->evlist)) { - pr_err("non matching sample_id_all"); - goto out_close; + if (!evlist__valid_read_format(session->evlist)) { + pr_err("non matching read_format\n"); + return -1; } - self->size = input_stat.st_size; return 0; - -out_close: - close(self->fd); - self->fd = -1; - return -1; } void perf_session__set_id_hdr_size(struct perf_session *session) { - u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); + u16 id_hdr_size = evlist__id_hdr_size(session->evlist); machines__set_id_hdr_size(&session->machines, id_hdr_size); } -int perf_session__create_kernel_maps(struct perf_session *self) +int perf_session__create_kernel_maps(struct perf_session *session) { - int ret = machine__create_kernel_maps(&self->machines.host); + int ret = machine__create_kernel_maps(&session->machines.host); if (ret >= 0) - ret = machines__create_guest_kernel_maps(&self->machines); + ret = machines__create_guest_kernel_maps(&session->machines); return ret; } -static void perf_session__destroy_kernel_maps(struct perf_session *self) +static void perf_session__destroy_kernel_maps(struct perf_session *session) { - machines__destroy_kernel_maps(&self->machines); + machines__destroy_kernel_maps(&session->machines); } -struct perf_session *perf_session__new(const char *filename, int mode, - bool force, bool repipe, - struct perf_tool *tool) +static bool perf_session__has_comm_exec(struct perf_session *session) { - struct perf_session *self; - struct stat st; - size_t len; - - if (!filename || !strlen(filename)) { - if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) - filename = "-"; - else - filename = "perf.data"; - } - - len = strlen(filename); - self = zalloc(sizeof(*self) + len); - - if (self == NULL) - goto out; - - memcpy(self->filename, filename, len); - self->repipe = repipe; - INIT_LIST_HEAD(&self->ordered_samples.samples); - INIT_LIST_HEAD(&self->ordered_samples.sample_cache); - INIT_LIST_HEAD(&self->ordered_samples.to_free); - machines__init(&self->machines); + struct evsel *evsel; - if (mode == O_RDONLY) { - if (perf_session__open(self, force) < 0) - goto out_delete; - perf_session__set_id_hdr_size(self); - } else if (mode == O_WRONLY) { - /* - * In O_RDONLY mode this will be performed when reading the - * kernel MMAP event, in perf_event__process_mmap(). - */ - if (perf_session__create_kernel_maps(self) < 0) - goto out_delete; + evlist__for_each_entry(session->evlist, evsel) { + if (evsel->core.attr.comm_exec) + return true; } - if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { - dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); - tool->ordered_samples = false; - } - -out: - return self; -out_delete: - perf_session__delete(self); - return NULL; + return false; } -static void perf_session__delete_dead_threads(struct perf_session *session) +static void perf_session__set_comm_exec(struct perf_session *session) { - machine__delete_dead_threads(&session->machines.host); -} + bool comm_exec = perf_session__has_comm_exec(session); -static void perf_session__delete_threads(struct perf_session *session) -{ - machine__delete_threads(&session->machines.host); + machines__set_comm_exec(&session->machines, comm_exec); } -static void perf_session_env__delete(struct perf_session_env *env) +static int ordered_events__deliver_event(struct ordered_events *oe, + struct ordered_event *event) { - free(env->hostname); - free(env->os_release); - free(env->version); - free(env->arch); - free(env->cpu_desc); - free(env->cpuid); + struct perf_session *session = container_of(oe, struct perf_session, + ordered_events); - free(env->cmdline); - free(env->sibling_cores); - free(env->sibling_threads); - free(env->numa_nodes); - free(env->pmu_mappings); + return perf_session__deliver_event(session, event->event, + session->tool, event->file_offset, + event->file_path); } -void perf_session__delete(struct perf_session *self) +struct perf_session *__perf_session__new(struct perf_data *data, + struct perf_tool *tool, + bool trace_event_repipe, + struct perf_env *host_env) { - perf_session__destroy_kernel_maps(self); - perf_session__delete_dead_threads(self); - perf_session__delete_threads(self); - perf_session_env__delete(&self->header.env); - machines__exit(&self->machines); - close(self->fd); - free(self); - vdso__exit(); -} + int ret = -ENOMEM; + struct perf_session *session = zalloc(sizeof(*session)); -static int process_event_synth_tracing_data_stub(union perf_event *event - __maybe_unused, - struct perf_session *session - __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + if (!session) + goto out; -static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, - struct perf_evlist **pevlist - __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + session->trace_event_repipe = trace_event_repipe; + session->tool = tool; + session->decomp_data.zstd_decomp = &session->zstd_data; + session->active_decomp = &session->decomp_data; + INIT_LIST_HEAD(&session->auxtrace_index); + machines__init(&session->machines); + ordered_events__init(&session->ordered_events, + ordered_events__deliver_event, NULL); + + perf_env__init(&session->header.env); + if (data) { + ret = perf_data__open(data); + if (ret < 0) + goto out_delete; -static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_sample *sample __maybe_unused, - struct perf_evsel *evsel __maybe_unused, - struct machine *machine __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + session->data = data; -static int process_event_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_sample *sample __maybe_unused, - struct machine *machine __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + if (perf_data__is_read(data)) { + ret = perf_session__open(session); + if (ret < 0) + goto out_delete; -static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_session *perf_session - __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + /* + * set session attributes that are present in perf.data + * but not in pipe-mode. + */ + if (!data->is_pipe) { + perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } -static int process_event_type_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} + evlist__init_trace_event_sample_raw(session->evlist, &session->header.env); -static int process_finished_round(struct perf_tool *tool, - union perf_event *event, - struct perf_session *session); - -static void perf_tool__fill_defaults(struct perf_tool *tool) -{ - if (tool->sample == NULL) - tool->sample = process_event_sample_stub; - if (tool->mmap == NULL) - tool->mmap = process_event_stub; - if (tool->comm == NULL) - tool->comm = process_event_stub; - if (tool->fork == NULL) - tool->fork = process_event_stub; - if (tool->exit == NULL) - tool->exit = process_event_stub; - if (tool->lost == NULL) - tool->lost = perf_event__process_lost; - if (tool->read == NULL) - tool->read = process_event_sample_stub; - if (tool->throttle == NULL) - tool->throttle = process_event_stub; - if (tool->unthrottle == NULL) - tool->unthrottle = process_event_stub; - if (tool->attr == NULL) - tool->attr = process_event_synth_attr_stub; - if (tool->event_type == NULL) - tool->event_type = process_event_type_stub; - if (tool->tracing_data == NULL) - tool->tracing_data = process_event_synth_tracing_data_stub; - if (tool->build_id == NULL) - tool->build_id = process_finished_round_stub; - if (tool->finished_round == NULL) { - if (tool->ordered_samples) - tool->finished_round = process_finished_round; - else - tool->finished_round = process_finished_round_stub; + /* Open the directory data. */ + if (data->is_dir) { + ret = perf_data__open_dir(data); + if (ret) + goto out_delete; + } + + if (!symbol_conf.kallsyms_name && + !symbol_conf.vmlinux_name) + symbol_conf.kallsyms_name = perf_data__kallsyms_name(data); + } + } else { + assert(host_env != NULL); + session->machines.host.env = host_env; } -} - -void mem_bswap_32(void *src, int byte_size) -{ - u32 *m = src; - while (byte_size > 0) { - *m = bswap_32(*m); - byte_size -= sizeof(u32); - ++m; + if (session->evlist) + session->evlist->session = session; + + session->machines.host.single_address_space = + perf_env__single_address_space(session->machines.host.env); + + if (!data || perf_data__is_write(data)) { + /* + * In O_RDONLY mode this will be performed when reading the + * kernel MMAP event, in perf_event__process_mmap(). + */ + if (perf_session__create_kernel_maps(session) < 0) + pr_warning("Cannot read kernel map\n"); + } + + /* + * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is + * processed, so evlist__sample_id_all is not meaningful here. + */ + if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && + tool->ordered_events && !evlist__sample_id_all(session->evlist)) { + dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); + tool->ordered_events = false; } + + return session; + + out_delete: + perf_session__delete(session); + out: + return ERR_PTR(ret); } -void mem_bswap_64(void *src, int byte_size) +static void perf_decomp__release_events(struct decomp *next) { - u64 *m = src; + struct decomp *decomp; + size_t mmap_len; - while (byte_size > 0) { - *m = bswap_64(*m); - byte_size -= sizeof(u64); - ++m; + do { + decomp = next; + if (decomp == NULL) + break; + next = decomp->next; + mmap_len = decomp->mmap_len; + munmap(decomp, mmap_len); + } while (1); +} + +void perf_session__delete(struct perf_session *session) +{ + if (session == NULL) + return; + auxtrace__free(session); + auxtrace_index__free(&session->auxtrace_index); + debuginfo_cache__delete(); + perf_session__destroy_kernel_maps(session); + perf_decomp__release_events(session->decomp_data.decomp); + perf_env__exit(&session->header.env); + machines__exit(&session->machines); + if (session->data) { + if (perf_data__is_read(session->data)) + evlist__delete(session->evlist); + perf_data__close(session->data); } +#ifdef HAVE_LIBTRACEEVENT + trace_event__cleanup(&session->tevent); +#endif + free(session); } static void swap_sample_id_all(union perf_event *event, void *data) @@ -352,6 +311,29 @@ static void perf_event__mmap_swap(union perf_event *event, } } +static void perf_event__mmap2_swap(union perf_event *event, + bool sample_id_all) +{ + event->mmap2.pid = bswap_32(event->mmap2.pid); + event->mmap2.tid = bswap_32(event->mmap2.tid); + event->mmap2.start = bswap_64(event->mmap2.start); + event->mmap2.len = bswap_64(event->mmap2.len); + event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); + + if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) { + event->mmap2.maj = bswap_32(event->mmap2.maj); + event->mmap2.min = bswap_32(event->mmap2.min); + event->mmap2.ino = bswap_64(event->mmap2.ino); + event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); + } + + if (sample_id_all) { + void *data = &event->mmap2.filename; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} static void perf_event__task_swap(union perf_event *event, bool sample_id_all) { event->fork.pid = bswap_32(event->fork.pid); @@ -377,6 +359,100 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all) swap_sample_id_all(event, &event->read + 1); } +static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) +{ + event->aux.aux_offset = bswap_64(event->aux.aux_offset); + event->aux.aux_size = bswap_64(event->aux.aux_size); + event->aux.flags = bswap_64(event->aux.flags); + + if (sample_id_all) + swap_sample_id_all(event, &event->aux + 1); +} + +static void perf_event__itrace_start_swap(union perf_event *event, + bool sample_id_all) +{ + event->itrace_start.pid = bswap_32(event->itrace_start.pid); + event->itrace_start.tid = bswap_32(event->itrace_start.tid); + + if (sample_id_all) + swap_sample_id_all(event, &event->itrace_start + 1); +} + +static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) +{ + if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { + event->context_switch.next_prev_pid = + bswap_32(event->context_switch.next_prev_pid); + event->context_switch.next_prev_tid = + bswap_32(event->context_switch.next_prev_tid); + } + + if (sample_id_all) + swap_sample_id_all(event, &event->context_switch + 1); +} + +static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all) +{ + event->text_poke.addr = bswap_64(event->text_poke.addr); + event->text_poke.old_len = bswap_16(event->text_poke.old_len); + event->text_poke.new_len = bswap_16(event->text_poke.new_len); + + if (sample_id_all) { + size_t len = sizeof(event->text_poke.old_len) + + sizeof(event->text_poke.new_len) + + event->text_poke.old_len + + event->text_poke.new_len; + void *data = &event->text_poke.old_len; + + data += PERF_ALIGN(len, sizeof(u64)); + swap_sample_id_all(event, data); + } +} + +static void perf_event__throttle_swap(union perf_event *event, + bool sample_id_all) +{ + event->throttle.time = bswap_64(event->throttle.time); + event->throttle.id = bswap_64(event->throttle.id); + event->throttle.stream_id = bswap_64(event->throttle.stream_id); + + if (sample_id_all) + swap_sample_id_all(event, &event->throttle + 1); +} + +static void perf_event__namespaces_swap(union perf_event *event, + bool sample_id_all) +{ + u64 i; + + event->namespaces.pid = bswap_32(event->namespaces.pid); + event->namespaces.tid = bswap_32(event->namespaces.tid); + event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); + + for (i = 0; i < event->namespaces.nr_namespaces; i++) { + struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; + + ns->dev = bswap_64(ns->dev); + ns->ino = bswap_64(ns->ino); + } + + if (sample_id_all) + swap_sample_id_all(event, &event->namespaces.link_info[i]); +} + +static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all) +{ + event->cgroup.id = bswap_64(event->cgroup.id); + + if (sample_id_all) { + void *data = &event->cgroup.path; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} + static u8 revbyte(u8 b) { int rev = (b >> 4) | ((b & 0xf) << 4); @@ -387,7 +463,7 @@ static u8 revbyte(u8 b) /* * XXX this is hack in attempt to carry flags bitfield - * throught endian village. ABI says: + * through endian village. ABI says: * * Bit-fields are allocated from right to left (least to most significant) * on little-endian implementations and from left to right (most to least @@ -414,16 +490,45 @@ void perf_event__attr_swap(struct perf_event_attr *attr) { attr->type = bswap_32(attr->type); attr->size = bswap_32(attr->size); - attr->config = bswap_64(attr->config); - attr->sample_period = bswap_64(attr->sample_period); - attr->sample_type = bswap_64(attr->sample_type); - attr->read_format = bswap_64(attr->read_format); - attr->wakeup_events = bswap_32(attr->wakeup_events); - attr->bp_type = bswap_32(attr->bp_type); - attr->bp_addr = bswap_64(attr->bp_addr); - attr->bp_len = bswap_64(attr->bp_len); - swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); +#define bswap_safe(f, n) \ + (attr->size > (offsetof(struct perf_event_attr, f) + \ + sizeof(attr->f) * (n))) +#define bswap_field(f, sz) \ +do { \ + if (bswap_safe(f, 0)) \ + attr->f = bswap_##sz(attr->f); \ +} while(0) +#define bswap_field_16(f) bswap_field(f, 16) +#define bswap_field_32(f) bswap_field(f, 32) +#define bswap_field_64(f) bswap_field(f, 64) + + bswap_field_64(config); + bswap_field_64(sample_period); + bswap_field_64(sample_type); + bswap_field_64(read_format); + bswap_field_32(wakeup_events); + bswap_field_32(bp_type); + bswap_field_64(bp_addr); + bswap_field_64(bp_len); + bswap_field_64(branch_sample_type); + bswap_field_64(sample_regs_user); + bswap_field_32(sample_stack_user); + bswap_field_32(aux_watermark); + bswap_field_16(sample_max_stack); + bswap_field_32(aux_sample_size); + + /* + * After read_format are bitfields. Check read_format because + * we are unable to use offsetof on bitfield. + */ + if (bswap_safe(read_format, 1)) + swap_bitfield((u8 *) (&attr->read_format + 1), + sizeof(u64)); +#undef bswap_field_64 +#undef bswap_field_32 +#undef bswap_field +#undef bswap_safe } static void perf_event__hdr_attr_swap(union perf_event *event, @@ -434,8 +539,15 @@ static void perf_event__hdr_attr_swap(union perf_event *event, perf_event__attr_swap(&event->attr.attr); size = event->header.size; - size -= (void *)&event->attr.id - (void *)event; - mem_bswap_64(event->attr.id, size); + size -= perf_record_header_attr_id(event) - (void *)event; + mem_bswap_64(perf_record_header_attr_id(event), size); +} + +static void perf_event__event_update_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->event_update.type = bswap_64(event->event_update.type); + event->event_update.id = bswap_64(event->event_update.id); } static void perf_event__event_type_swap(union perf_event *event, @@ -451,101 +563,182 @@ static void perf_event__tracing_data_swap(union perf_event *event, event->tracing_data.size = bswap_32(event->tracing_data.size); } -typedef void (*perf_event__swap_op)(union perf_event *event, - bool sample_id_all); +static void perf_event__auxtrace_info_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + size_t size; -static perf_event__swap_op perf_event__swap_ops[] = { - [PERF_RECORD_MMAP] = perf_event__mmap_swap, - [PERF_RECORD_COMM] = perf_event__comm_swap, - [PERF_RECORD_FORK] = perf_event__task_swap, - [PERF_RECORD_EXIT] = perf_event__task_swap, - [PERF_RECORD_LOST] = perf_event__all64_swap, - [PERF_RECORD_READ] = perf_event__read_swap, - [PERF_RECORD_SAMPLE] = perf_event__all64_swap, - [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, - [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, - [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, - [PERF_RECORD_HEADER_BUILD_ID] = NULL, - [PERF_RECORD_HEADER_MAX] = NULL, -}; + event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); -struct sample_queue { - u64 timestamp; - u64 file_offset; - union perf_event *event; - struct list_head list; -}; + size = event->header.size; + size -= (void *)&event->auxtrace_info.priv - (void *)event; + mem_bswap_64(event->auxtrace_info.priv, size); +} -static void perf_session_free_sample_buffers(struct perf_session *session) +static void perf_event__auxtrace_swap(union perf_event *event, + bool sample_id_all __maybe_unused) { - struct ordered_samples *os = &session->ordered_samples; - - while (!list_empty(&os->to_free)) { - struct sample_queue *sq; - - sq = list_entry(os->to_free.next, struct sample_queue, list); - list_del(&sq->list); - free(sq); + event->auxtrace.size = bswap_64(event->auxtrace.size); + event->auxtrace.offset = bswap_64(event->auxtrace.offset); + event->auxtrace.reference = bswap_64(event->auxtrace.reference); + event->auxtrace.idx = bswap_32(event->auxtrace.idx); + event->auxtrace.tid = bswap_32(event->auxtrace.tid); + event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); +} + +static void perf_event__auxtrace_error_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); + event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); + event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); + event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); + event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); + event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); + event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); + if (event->auxtrace_error.fmt) + event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); + if (event->auxtrace_error.fmt >= 2) { + event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid); + event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu); } } -static int perf_session_deliver_event(struct perf_session *session, - union perf_event *event, - struct perf_sample *sample, - struct perf_tool *tool, - u64 file_offset); +static void perf_event__thread_map_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + unsigned i; + + event->thread_map.nr = bswap_64(event->thread_map.nr); -static int flush_sample_queue(struct perf_session *s, - struct perf_tool *tool) + for (i = 0; i < event->thread_map.nr; i++) + event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); +} + +static void perf_event__cpu_map_swap(union perf_event *event, + bool sample_id_all __maybe_unused) { - struct ordered_samples *os = &s->ordered_samples; - struct list_head *head = &os->samples; - struct sample_queue *tmp, *iter; - struct perf_sample sample; - u64 limit = os->next_flush; - u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; - unsigned idx = 0, progress_next = os->nr_samples / 16; - int ret; + struct perf_record_cpu_map_data *data = &event->cpu_map.data; - if (!tool->ordered_samples || !limit) - return 0; + data->type = bswap_16(data->type); - list_for_each_entry_safe(iter, tmp, head, list) { - if (iter->timestamp > limit) - break; + switch (data->type) { + case PERF_CPU_MAP__CPUS: + data->cpus_data.nr = bswap_16(data->cpus_data.nr); - ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); - if (ret) - pr_err("Can't parse sample, err = %d\n", ret); - else { - ret = perf_session_deliver_event(s, iter->event, &sample, tool, - iter->file_offset); - if (ret) - return ret; - } + for (unsigned i = 0; i < data->cpus_data.nr; i++) + data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]); + break; + case PERF_CPU_MAP__MASK: + data->mask32_data.long_size = bswap_16(data->mask32_data.long_size); - os->last_flush = iter->timestamp; - list_del(&iter->list); - list_add(&iter->list, &os->sample_cache); - if (++idx >= progress_next) { - progress_next += os->nr_samples / 16; - ui_progress__update(idx, os->nr_samples, - "Processing time ordered events..."); + switch (data->mask32_data.long_size) { + case 4: + data->mask32_data.nr = bswap_16(data->mask32_data.nr); + for (unsigned i = 0; i < data->mask32_data.nr; i++) + data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]); + break; + case 8: + data->mask64_data.nr = bswap_16(data->mask64_data.nr); + for (unsigned i = 0; i < data->mask64_data.nr; i++) + data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]); + break; + default: + pr_err("cpu_map swap: unsupported long size\n"); } + break; + case PERF_CPU_MAP__RANGE_CPUS: + data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu); + data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu); + break; + default: + break; } +} - if (list_empty(head)) { - os->last_sample = NULL; - } else if (last_ts <= limit) { - os->last_sample = - list_entry(head->prev, struct sample_queue, list); - } +static void perf_event__stat_config_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + u64 size; - os->nr_samples = 0; + size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]); + size += 1; /* nr item itself */ + mem_bswap_64(&event->stat_config.nr, size); +} - return 0; +static void perf_event__stat_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->stat.id = bswap_64(event->stat.id); + event->stat.thread = bswap_32(event->stat.thread); + event->stat.cpu = bswap_32(event->stat.cpu); + event->stat.val = bswap_64(event->stat.val); + event->stat.ena = bswap_64(event->stat.ena); + event->stat.run = bswap_64(event->stat.run); +} + +static void perf_event__stat_round_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->stat_round.type = bswap_64(event->stat_round.type); + event->stat_round.time = bswap_64(event->stat_round.time); +} + +static void perf_event__time_conv_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->time_conv.time_shift = bswap_64(event->time_conv.time_shift); + event->time_conv.time_mult = bswap_64(event->time_conv.time_mult); + event->time_conv.time_zero = bswap_64(event->time_conv.time_zero); + + if (event_contains(event->time_conv, time_cycles)) { + event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles); + event->time_conv.time_mask = bswap_64(event->time_conv.time_mask); + } } +typedef void (*perf_event__swap_op)(union perf_event *event, + bool sample_id_all); + +static perf_event__swap_op perf_event__swap_ops[] = { + [PERF_RECORD_MMAP] = perf_event__mmap_swap, + [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, + [PERF_RECORD_COMM] = perf_event__comm_swap, + [PERF_RECORD_FORK] = perf_event__task_swap, + [PERF_RECORD_EXIT] = perf_event__task_swap, + [PERF_RECORD_LOST] = perf_event__all64_swap, + [PERF_RECORD_READ] = perf_event__read_swap, + [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, + [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, + [PERF_RECORD_SAMPLE] = perf_event__all64_swap, + [PERF_RECORD_AUX] = perf_event__aux_swap, + [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, + [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, + [PERF_RECORD_SWITCH] = perf_event__switch_swap, + [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, + [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, + [PERF_RECORD_CGROUP] = perf_event__cgroup_swap, + [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap, + [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap, + [PERF_RECORD_CALLCHAIN_DEFERRED] = perf_event__all64_swap, + [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, + [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, + [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, + [PERF_RECORD_HEADER_BUILD_ID] = NULL, + [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, + [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, + [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, + [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, + [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, + [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, + [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, + [PERF_RECORD_STAT] = perf_event__stat_swap, + [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, + [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, + [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap, + [PERF_RECORD_HEADER_MAX] = NULL, +}; + /* * When perf record finishes a pass on every buffers, it records this pseudo * event. @@ -585,149 +778,209 @@ static int flush_sample_queue(struct perf_session *s, * Flush every events below timestamp 7 * etc... */ -static int process_finished_round(struct perf_tool *tool, - union perf_event *event __maybe_unused, - struct perf_session *session) +int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct ordered_events *oe) { - int ret = flush_sample_queue(session, tool); - if (!ret) - session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; - - return ret; + if (dump_trace) + fprintf(stdout, "\n"); + return ordered_events__flush(oe, OE_FLUSH__ROUND); } -/* The queue is ordered by time */ -static void __queue_event(struct sample_queue *new, struct perf_session *s) +int perf_session__queue_event(struct perf_session *s, union perf_event *event, + u64 timestamp, u64 file_offset, const char *file_path) { - struct ordered_samples *os = &s->ordered_samples; - struct sample_queue *sample = os->last_sample; - u64 timestamp = new->timestamp; - struct list_head *p; - - ++os->nr_samples; - os->last_sample = new; - - if (!sample) { - list_add(&new->list, &os->samples); - os->max_timestamp = timestamp; - return; - } - - /* - * last_sample might point to some random place in the list as it's - * the last queued event. We expect that the new event is close to - * this. - */ - if (sample->timestamp <= timestamp) { - while (sample->timestamp <= timestamp) { - p = sample->list.next; - if (p == &os->samples) { - list_add_tail(&new->list, &os->samples); - os->max_timestamp = timestamp; - return; - } - sample = list_entry(p, struct sample_queue, list); - } - list_add_tail(&new->list, &sample->list); - } else { - while (sample->timestamp > timestamp) { - p = sample->list.prev; - if (p == &os->samples) { - list_add(&new->list, &os->samples); - return; - } - sample = list_entry(p, struct sample_queue, list); - } - list_add(&new->list, &sample->list); - } + return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path); } -#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) - -static int perf_session_queue_event(struct perf_session *s, union perf_event *event, - struct perf_sample *sample, u64 file_offset) +static void callchain__lbr_callstack_printf(struct perf_sample *sample) { - struct ordered_samples *os = &s->ordered_samples; - struct list_head *sc = &os->sample_cache; - u64 timestamp = sample->time; - struct sample_queue *new; - - if (!timestamp || timestamp == ~0ULL) - return -ETIME; + struct ip_callchain *callchain = sample->callchain; + struct branch_stack *lbr_stack = sample->branch_stack; + struct branch_entry *entries = perf_sample__branch_entries(sample); + u64 kernel_callchain_nr = callchain->nr; + unsigned int i; - if (timestamp < s->ordered_samples.last_flush) { - printf("Warning: Timestamp below last timeslice flush\n"); - return -EINVAL; + for (i = 0; i < kernel_callchain_nr; i++) { + if (callchain->ips[i] == PERF_CONTEXT_USER) + break; } - if (!list_empty(sc)) { - new = list_entry(sc->next, struct sample_queue, list); - list_del(&new->list); - } else if (os->sample_buffer) { - new = os->sample_buffer + os->sample_buffer_idx; - if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) - os->sample_buffer = NULL; - } else { - os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); - if (!os->sample_buffer) - return -ENOMEM; - list_add(&os->sample_buffer->list, &os->to_free); - os->sample_buffer_idx = 2; - new = os->sample_buffer + 1; - } + if ((i != kernel_callchain_nr) && lbr_stack->nr) { + u64 total_nr; + /* + * LBR callstack can only get user call chain, + * i is kernel call chain number, + * 1 is PERF_CONTEXT_USER. + * + * The user call chain is stored in LBR registers. + * LBR are pair registers. The caller is stored + * in "from" register, while the callee is stored + * in "to" register. + * For example, there is a call stack + * "A"->"B"->"C"->"D". + * The LBR registers will be recorded like + * "C"->"D", "B"->"C", "A"->"B". + * So only the first "to" register and all "from" + * registers are needed to construct the whole stack. + */ + total_nr = i + 1 + lbr_stack->nr + 1; + kernel_callchain_nr = i + 1; - new->timestamp = timestamp; - new->file_offset = file_offset; - new->event = event; + printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); - __queue_event(new, s); + for (i = 0; i < kernel_callchain_nr; i++) + printf("..... %2d: %016" PRIx64 "\n", + i, callchain->ips[i]); - return 0; + printf("..... %2d: %016" PRIx64 "\n", + (int)(kernel_callchain_nr), entries[0].to); + for (i = 0; i < lbr_stack->nr; i++) + printf("..... %2d: %016" PRIx64 "\n", + (int)(i + kernel_callchain_nr + 1), entries[i].from); + } } -static void callchain__printf(struct perf_sample *sample) +static void callchain__printf(struct evsel *evsel, + struct perf_sample *sample) { unsigned int i; + struct ip_callchain *callchain = sample->callchain; + + if (evsel__has_branch_callstack(evsel)) + callchain__lbr_callstack_printf(sample); - printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); + printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); - for (i = 0; i < sample->callchain->nr; i++) + for (i = 0; i < callchain->nr; i++) printf("..... %2d: %016" PRIx64 "\n", - i, sample->callchain->ips[i]); + i, callchain->ips[i]); + + if (sample->deferred_callchain) + printf("...... (deferred)\n"); } -static void branch_stack__printf(struct perf_sample *sample) +static void branch_stack__printf(struct perf_sample *sample, + struct evsel *evsel) { + struct branch_entry *entries = perf_sample__branch_entries(sample); + bool callstack = evsel__has_branch_callstack(evsel); + u64 *branch_stack_cntr = sample->branch_stack_cntr; uint64_t i; - printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); + if (!callstack) { + printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr); + } else { + /* the reason of adding 1 to nr is because after expanding + * branch stack it generates nr + 1 callstack records. e.g., + * B()->C() + * A()->B() + * the final callstack should be: + * C() + * B() + * A() + */ + printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1); + } + + for (i = 0; i < sample->branch_stack->nr; i++) { + struct branch_entry *e = &entries[i]; + + if (!callstack) { + printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n", + i, e->from, e->to, + (unsigned short)e->flags.cycles, + e->flags.mispred ? "M" : " ", + e->flags.predicted ? "P" : " ", + e->flags.abort ? "A" : " ", + e->flags.in_tx ? "T" : " ", + (unsigned)e->flags.reserved, + get_branch_type(e), + e->flags.spec ? branch_spec_desc(e->flags.spec) : ""); + } else { + if (i == 0) { + printf("..... %2"PRIu64": %016" PRIx64 "\n" + "..... %2"PRIu64": %016" PRIx64 "\n", + i, e->to, i+1, e->from); + } else { + printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from); + } + } + } - for (i = 0; i < sample->branch_stack->nr; i++) - printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", - i, sample->branch_stack->entries[i].from, - sample->branch_stack->entries[i].to); + if (branch_stack_cntr) { + unsigned int br_cntr_width, br_cntr_nr; + + perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width); + printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n", + sample->branch_stack->nr, br_cntr_width, br_cntr_nr); + for (i = 0; i < sample->branch_stack->nr; i++) + printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]); + } } -static void regs_dump__printf(u64 mask, u64 *regs) +static void regs_dump__printf(u64 mask, u64 *regs, const char *arch) { unsigned rid, i = 0; for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { u64 val = regs[i++]; - printf(".... %-5s 0x%" PRIx64 "\n", - perf_reg_name(rid), val); + printf(".... %-5s 0x%016" PRIx64 "\n", + perf_reg_name(rid, arch), val); } } -static void regs_user__printf(struct perf_sample *sample, u64 mask) +static const char *regs_abi[] = { + [PERF_SAMPLE_REGS_ABI_NONE] = "none", + [PERF_SAMPLE_REGS_ABI_32] = "32-bit", + [PERF_SAMPLE_REGS_ABI_64] = "64-bit", +}; + +static inline const char *regs_dump_abi(struct regs_dump *d) { - struct regs_dump *user_regs = &sample->user_regs; + if (d->abi > PERF_SAMPLE_REGS_ABI_64) + return "unknown"; - if (user_regs->regs) { - printf("... user regs: mask 0x%" PRIx64 "\n", mask); - regs_dump__printf(mask, user_regs->regs); - } + return regs_abi[d->abi]; +} + +static void regs__printf(const char *type, struct regs_dump *regs, const char *arch) +{ + u64 mask = regs->mask; + + printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", + type, + mask, + regs_dump_abi(regs)); + + regs_dump__printf(mask, regs->regs, arch); +} + +static void regs_user__printf(struct perf_sample *sample, const char *arch) +{ + struct regs_dump *user_regs; + + if (!sample->user_regs) + return; + + user_regs = perf_sample__user_regs(sample); + + if (user_regs->regs) + regs__printf("user", user_regs, arch); +} + +static void regs_intr__printf(struct perf_sample *sample, const char *arch) +{ + struct regs_dump *intr_regs; + + if (!sample->intr_regs) + return; + + intr_regs = perf_sample__intr_regs(sample); + + if (intr_regs->regs) + regs__printf("intr", intr_regs, arch); } static void stack_user__printf(struct stack_dump *dump) @@ -736,14 +989,12 @@ static void stack_user__printf(struct stack_dump *dump) dump->size, dump->offset); } -static void perf_session__print_tstamp(struct perf_session *session, - union perf_event *event, - struct perf_sample *sample) +static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) { - u64 sample_type = perf_evlist__sample_type(session->evlist); + u64 sample_type = __evlist__combined_sample_type(evlist); if (event->header.type != PERF_RECORD_SAMPLE && - !perf_evlist__sample_id_all(session->evlist)) { + !evlist__sample_id_all(evlist)) { fputs("-1 -1 ", stdout); return; } @@ -755,189 +1006,681 @@ static void perf_session__print_tstamp(struct perf_session *session, printf("%" PRIu64 " ", sample->time); } -static void dump_event(struct perf_session *session, union perf_event *event, - u64 file_offset, struct perf_sample *sample) +static void sample_read__printf(struct perf_sample *sample, u64 read_format) +{ + printf("... sample_read:\n"); + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + printf("...... time enabled %016" PRIx64 "\n", + sample->read.time_enabled); + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + printf("...... time running %016" PRIx64 "\n", + sample->read.time_running); + + if (read_format & PERF_FORMAT_GROUP) { + struct sample_read_value *value = sample->read.group.values; + + printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); + + sample_read_group__for_each(value, sample->read.group.nr, read_format) { + printf("..... id %016" PRIx64 + ", value %016" PRIx64, + value->id, value->value); + if (read_format & PERF_FORMAT_LOST) + printf(", lost %" PRIu64, value->lost); + printf("\n"); + } + } else { + printf("..... id %016" PRIx64 ", value %016" PRIx64, + sample->read.one.id, sample->read.one.value); + if (read_format & PERF_FORMAT_LOST) + printf(", lost %" PRIu64, sample->read.one.lost); + printf("\n"); + } +} + +static void dump_event(struct evlist *evlist, union perf_event *event, + u64 file_offset, struct perf_sample *sample, + const char *file_path) { if (!dump_trace) return; - printf("\n%#" PRIx64 " [%#x]: event: %d\n", - file_offset, event->header.size, event->header.type); + printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n", + file_offset, file_path, event->header.size, event->header.type); trace_event(event); + if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) + evlist->trace_event_sample_raw(evlist, event, sample); if (sample) - perf_session__print_tstamp(session, event, sample); + evlist__print_tstamp(evlist, event, sample); printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, event->header.size, perf_event__name(event->header.type)); } -static void dump_sample(struct perf_evsel *evsel, union perf_event *event, - struct perf_sample *sample) +char *get_page_size_name(u64 size, char *str) +{ + if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size)) + snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A"); + + return str; +} + +static void dump_sample(struct evsel *evsel, union perf_event *event, + struct perf_sample *sample, const char *arch) { u64 sample_type; + char str[PAGE_SIZE_NAME_LEN]; if (!dump_trace) return; - printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", + printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", event->header.misc, sample->pid, sample->tid, sample->ip, sample->period, sample->addr); - sample_type = evsel->attr.sample_type; + sample_type = evsel->core.attr.sample_type; - if (sample_type & PERF_SAMPLE_CALLCHAIN) - callchain__printf(sample); + if (evsel__has_callchain(evsel)) + callchain__printf(evsel, sample); - if (sample_type & PERF_SAMPLE_BRANCH_STACK) - branch_stack__printf(sample); + if (evsel__has_br_stack(evsel)) + branch_stack__printf(sample, evsel); if (sample_type & PERF_SAMPLE_REGS_USER) - regs_user__printf(sample, evsel->attr.sample_regs_user); + regs_user__printf(sample, arch); + + if (sample_type & PERF_SAMPLE_REGS_INTR) + regs_intr__printf(sample, arch); if (sample_type & PERF_SAMPLE_STACK_USER) stack_user__printf(&sample->user_stack); - if (sample_type & PERF_SAMPLE_WEIGHT) - printf("... weight: %" PRIu64 "\n", sample->weight); + if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { + printf("... weight: %" PRIu64 "", sample->weight); + if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { + printf(",0x%"PRIx16"", sample->ins_lat); + printf(",0x%"PRIx16"", sample->weight3); + } + printf("\n"); + } if (sample_type & PERF_SAMPLE_DATA_SRC) printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); + + if (sample_type & PERF_SAMPLE_PHYS_ADDR) + printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); + + if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) + printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str)); + + if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) + printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str)); + + if (sample_type & PERF_SAMPLE_TRANSACTION) + printf("... transaction: %" PRIx64 "\n", sample->transaction); + + if (sample_type & PERF_SAMPLE_READ) + sample_read__printf(sample, evsel->core.attr.read_format); } -static struct machine * - perf_session__find_machine_for_cpumode(struct perf_session *session, - union perf_event *event) +static void dump_deferred_callchain(struct evsel *evsel, union perf_event *event, + struct perf_sample *sample) { - const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + if (!dump_trace) + return; + + printf("(IP, 0x%x): %d/%d: %#" PRIx64 "\n", + event->header.misc, sample->pid, sample->tid, sample->deferred_cookie); + + if (evsel__has_callchain(evsel)) + callchain__printf(evsel, sample); +} + +static void dump_read(struct evsel *evsel, union perf_event *event) +{ + struct perf_record_read *read_event = &event->read; + u64 read_format; + + if (!dump_trace) + return; + + printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, + evsel__name(evsel), event->read.value); + + if (!evsel) + return; + + read_format = evsel->core.attr.read_format; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + printf("... time running : %" PRI_lu64 "\n", read_event->time_running); + if (read_format & PERF_FORMAT_ID) + printf("... id : %" PRI_lu64 "\n", read_event->id); + + if (read_format & PERF_FORMAT_LOST) + printf("... lost : %" PRI_lu64 "\n", read_event->lost); +} + +static struct machine *machines__find_for_cpumode(struct machines *machines, + union perf_event *event, + struct perf_sample *sample) +{ if (perf_guest && - ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || - (cpumode == PERF_RECORD_MISC_GUEST_USER))) { + ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || + (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { u32 pid; - if (event->header.type == PERF_RECORD_MMAP) + if (sample->machine_pid) + pid = sample->machine_pid; + else if (event->header.type == PERF_RECORD_MMAP + || event->header.type == PERF_RECORD_MMAP2) pid = event->mmap.pid; else - pid = event->ip.pid; + pid = sample->pid; + + /* + * Guest code machine is created as needed and does not use + * DEFAULT_GUEST_KERNEL_ID. + */ + if (symbol_conf.guest_code) + return machines__findnew(machines, pid); - return perf_session__findnew_machine(session, pid); + return machines__find_guest(machines, pid); } - return &session->machines.host; + return &machines->host; } -static int perf_session_deliver_event(struct perf_session *session, - union perf_event *event, - struct perf_sample *sample, - struct perf_tool *tool, - u64 file_offset) +static int deliver_sample_value(struct evlist *evlist, + const struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct sample_read_value *v, + struct machine *machine, + bool per_thread) { - struct perf_evsel *evsel; - struct machine *machine; + struct perf_sample_id *sid = evlist__id2sid(evlist, v->id); + struct evsel *evsel; + u64 *storage = NULL; - dump_event(session, event, file_offset, sample); + if (sid) { + storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread); + } - evsel = perf_evlist__id2evsel(session->evlist, sample->id); - if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { - /* - * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here - * because the tools right now may apply filters, discarding - * some of the samples. For consistency, in the future we - * should have something like nr_filtered_samples and remove - * the sample->period from total_sample_period, etc, KISS for - * now tho. - * - * Also testing against NULL allows us to handle files without - * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the - * future probably it'll be a good idea to restrict event - * processing via perf_session to files with both set. - */ - hists__inc_nr_events(&evsel->hists, event->header.type); + if (storage) { + sample->id = v->id; + sample->period = v->value - *storage; + *storage = v->value; + } + + if (!storage || sid->evsel == NULL) { + ++evlist->stats.nr_unknown_id; + return 0; + } + + /* + * There's no reason to deliver sample + * for zero period, bail out. + */ + if (!sample->period) + return 0; + + evsel = container_of(sid->evsel, struct evsel, core); + return tool->sample(tool, event, sample, evsel, machine); +} + +static int deliver_sample_group(struct evlist *evlist, + const struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine, + u64 read_format, + bool per_thread) +{ + int ret = -EINVAL; + struct sample_read_value *v = sample->read.group.values; + + if (tool->dont_split_sample_group) + return deliver_sample_value(evlist, tool, event, sample, v, machine, + per_thread); + + sample_read_group__for_each(v, sample->read.group.nr, read_format) { + ret = deliver_sample_value(evlist, tool, event, sample, v, + machine, per_thread); + if (ret) + break; } - machine = perf_session__find_machine_for_cpumode(session, event); + return ret; +} + +static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, + struct evsel *evsel, struct machine *machine) +{ + /* We know evsel != NULL. */ + u64 sample_type = evsel->core.attr.sample_type; + u64 read_format = evsel->core.attr.read_format; + bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core); + + /* Standard sample delivery. */ + if (!(sample_type & PERF_SAMPLE_READ)) + return tool->sample(tool, event, sample, evsel, machine); + + /* For PERF_SAMPLE_READ we have either single or group mode. */ + if (read_format & PERF_FORMAT_GROUP) + return deliver_sample_group(evlist, tool, event, sample, + machine, read_format, per_thread); + else + return deliver_sample_value(evlist, tool, event, sample, + &sample->read.one, machine, + per_thread); +} + +/* + * Samples with deferred callchains should wait for the next matching + * PERF_RECORD_CALLCHAIN_RECORD entries. Keep the events in a list and + * deliver them once it finds the callchains. + */ +struct deferred_event { + struct list_head list; + union perf_event *event; +}; + +/* + * This is called when a deferred callchain record comes up. Find all matching + * samples, merge the callchains and process them. + */ +static int evlist__deliver_deferred_callchain(struct evlist *evlist, + const struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct deferred_event *de, *tmp; + struct evsel *evsel; + int ret = 0; + + if (!tool->merge_deferred_callchains) { + evsel = evlist__id2evsel(evlist, sample->id); + return tool->callchain_deferred(tool, event, sample, + evsel, machine); + } + + list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) { + struct perf_sample orig_sample; + + ret = evlist__parse_sample(evlist, de->event, &orig_sample); + if (ret < 0) { + pr_err("failed to parse original sample\n"); + break; + } + + if (sample->tid != orig_sample.tid) + continue; + + if (event->callchain_deferred.cookie == orig_sample.deferred_cookie) + sample__merge_deferred_callchain(&orig_sample, sample); + else + orig_sample.deferred_callchain = false; + + evsel = evlist__id2evsel(evlist, orig_sample.id); + ret = evlist__deliver_sample(evlist, tool, de->event, + &orig_sample, evsel, machine); + + if (orig_sample.deferred_callchain) + free(orig_sample.callchain); + + list_del(&de->list); + free(de->event); + free(de); + + if (ret) + break; + } + return ret; +} + +/* + * This is called at the end of the data processing for the session. Flush the + * remaining samples as there's no hope for matching deferred callchains. + */ +static int session__flush_deferred_samples(struct perf_session *session, + const struct perf_tool *tool) +{ + struct evlist *evlist = session->evlist; + struct machine *machine = &session->machines.host; + struct deferred_event *de, *tmp; + struct evsel *evsel; + int ret = 0; + + list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) { + struct perf_sample sample; + + ret = evlist__parse_sample(evlist, de->event, &sample); + if (ret < 0) { + pr_err("failed to parse original sample\n"); + break; + } + + evsel = evlist__id2evsel(evlist, sample.id); + ret = evlist__deliver_sample(evlist, tool, de->event, + &sample, evsel, machine); + + list_del(&de->list); + free(de->event); + free(de); + + if (ret) + break; + } + return ret; +} + +static int machines__deliver_event(struct machines *machines, + struct evlist *evlist, + union perf_event *event, + struct perf_sample *sample, + const struct perf_tool *tool, u64 file_offset, + const char *file_path) +{ + struct evsel *evsel; + struct machine *machine; + + dump_event(evlist, event, file_offset, sample, file_path); + + evsel = evlist__id2evsel(evlist, sample->id); + + machine = machines__find_for_cpumode(machines, event, sample); switch (event->header.type) { case PERF_RECORD_SAMPLE: - dump_sample(evsel, event, sample); if (evsel == NULL) { - ++session->stats.nr_unknown_id; + ++evlist->stats.nr_unknown_id; return 0; } if (machine == NULL) { - ++session->stats.nr_unprocessable_samples; + ++evlist->stats.nr_unprocessable_samples; + dump_sample(evsel, event, sample, perf_env__arch(NULL)); return 0; } - return tool->sample(tool, event, sample, evsel, machine); + dump_sample(evsel, event, sample, perf_env__arch(machine->env)); + if (sample->deferred_callchain && tool->merge_deferred_callchains) { + struct deferred_event *de = malloc(sizeof(*de)); + size_t sz = event->header.size; + + if (de == NULL) + return -ENOMEM; + + de->event = malloc(sz); + if (de->event == NULL) { + free(de); + return -ENOMEM; + } + memcpy(de->event, event, sz); + list_add_tail(&de->list, &evlist->deferred_samples); + return 0; + } + return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); case PERF_RECORD_MMAP: return tool->mmap(tool, event, sample, machine); + case PERF_RECORD_MMAP2: + if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) + ++evlist->stats.nr_proc_map_timeout; + return tool->mmap2(tool, event, sample, machine); case PERF_RECORD_COMM: return tool->comm(tool, event, sample, machine); + case PERF_RECORD_NAMESPACES: + return tool->namespaces(tool, event, sample, machine); + case PERF_RECORD_CGROUP: + return tool->cgroup(tool, event, sample, machine); case PERF_RECORD_FORK: return tool->fork(tool, event, sample, machine); case PERF_RECORD_EXIT: return tool->exit(tool, event, sample, machine); case PERF_RECORD_LOST: if (tool->lost == perf_event__process_lost) - session->stats.total_lost += event->lost.lost; + evlist->stats.total_lost += event->lost.lost; return tool->lost(tool, event, sample, machine); + case PERF_RECORD_LOST_SAMPLES: + if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF) + evlist->stats.total_dropped_samples += event->lost_samples.lost; + else if (tool->lost_samples == perf_event__process_lost_samples) + evlist->stats.total_lost_samples += event->lost_samples.lost; + return tool->lost_samples(tool, event, sample, machine); case PERF_RECORD_READ: + dump_read(evsel, event); return tool->read(tool, event, sample, evsel, machine); case PERF_RECORD_THROTTLE: return tool->throttle(tool, event, sample, machine); case PERF_RECORD_UNTHROTTLE: return tool->unthrottle(tool, event, sample, machine); + case PERF_RECORD_AUX: + if (tool->aux == perf_event__process_aux) { + if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) + evlist->stats.total_aux_lost += 1; + if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) + evlist->stats.total_aux_partial += 1; + if (event->aux.flags & PERF_AUX_FLAG_COLLISION) + evlist->stats.total_aux_collision += 1; + } + return tool->aux(tool, event, sample, machine); + case PERF_RECORD_ITRACE_START: + return tool->itrace_start(tool, event, sample, machine); + case PERF_RECORD_SWITCH: + case PERF_RECORD_SWITCH_CPU_WIDE: + return tool->context_switch(tool, event, sample, machine); + case PERF_RECORD_KSYMBOL: + return tool->ksymbol(tool, event, sample, machine); + case PERF_RECORD_BPF_EVENT: + return tool->bpf(tool, event, sample, machine); + case PERF_RECORD_TEXT_POKE: + return tool->text_poke(tool, event, sample, machine); + case PERF_RECORD_AUX_OUTPUT_HW_ID: + return tool->aux_output_hw_id(tool, event, sample, machine); + case PERF_RECORD_CALLCHAIN_DEFERRED: + dump_deferred_callchain(evsel, event, sample); + return evlist__deliver_deferred_callchain(evlist, tool, event, + sample, machine); default: - ++session->stats.nr_unknown_events; + ++evlist->stats.nr_unknown_events; return -1; } } -static int perf_session__preprocess_sample(struct perf_session *session, - union perf_event *event, struct perf_sample *sample) +static int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + const struct perf_tool *tool, + u64 file_offset, + const char *file_path) { - if (event->header.type != PERF_RECORD_SAMPLE || - !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) - return 0; + struct perf_sample sample; + int ret; - if (!ip_callchain__valid(sample->callchain, event)) { - pr_debug("call-chain problem with event, skipping it.\n"); - ++session->stats.nr_invalid_chains; - session->stats.total_invalid_chains += sample->period; - return -EINVAL; + perf_sample__init(&sample, /*all=*/false); + ret = evlist__parse_sample(session->evlist, event, &sample); + if (ret) { + pr_err("Can't parse sample, err = %d\n", ret); + goto out; } - return 0; + + ret = auxtrace__process_event(session, event, &sample, tool); + if (ret < 0) + goto out; + if (ret > 0) { + ret = 0; + goto out; + } + + ret = machines__deliver_event(&session->machines, session->evlist, + event, &sample, tool, file_offset, file_path); + + if (dump_trace && sample.aux_sample.size) + auxtrace__dump_auxtrace_sample(session, &sample); +out: + perf_sample__exit(&sample); + return ret; } -static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, - struct perf_tool *tool, u64 file_offset) +static s64 perf_session__process_user_event(struct perf_session *session, + union perf_event *event, + u64 file_offset, + const char *file_path) { - int err; + struct ordered_events *oe = &session->ordered_events; + const struct perf_tool *tool = session->tool; + struct perf_sample sample; + int fd = perf_data__fd(session->data); + s64 err; - dump_event(session, event, file_offset, NULL); + perf_sample__init(&sample, /*all=*/true); + if ((event->header.type != PERF_RECORD_COMPRESSED && + event->header.type != PERF_RECORD_COMPRESSED2) || + perf_tool__compressed_is_stub(tool)) + dump_event(session->evlist, event, file_offset, &sample, file_path); /* These events are processed right away */ switch (event->header.type) { case PERF_RECORD_HEADER_ATTR: - err = tool->attr(event, &session->evlist); - if (err == 0) + err = tool->attr(tool, event, &session->evlist); + if (err == 0) { perf_session__set_id_hdr_size(session); - return err; + perf_session__set_comm_exec(session); + } + break; + case PERF_RECORD_EVENT_UPDATE: + err = tool->event_update(tool, event, &session->evlist); + break; case PERF_RECORD_HEADER_EVENT_TYPE: - return tool->event_type(tool, event); + /* + * Deprecated, but we need to handle it for sake + * of old data files create in pipe mode. + */ + err = 0; + break; case PERF_RECORD_HEADER_TRACING_DATA: - /* setup for reading amidst mmap */ - lseek(session->fd, file_offset, SEEK_SET); - return tool->tracing_data(event, session); + /* + * Setup for reading amidst mmap, but only when we + * are in 'file' mode. The 'pipe' fd is in proper + * place already. + */ + if (!perf_data__is_pipe(session->data)) + lseek(fd, file_offset, SEEK_SET); + err = tool->tracing_data(tool, session, event); + break; case PERF_RECORD_HEADER_BUILD_ID: - return tool->build_id(tool, event, session); + err = tool->build_id(tool, session, event); + break; case PERF_RECORD_FINISHED_ROUND: - return tool->finished_round(tool, event, session); + err = tool->finished_round(tool, event, oe); + break; + case PERF_RECORD_ID_INDEX: + err = tool->id_index(tool, session, event); + break; + case PERF_RECORD_AUXTRACE_INFO: + err = tool->auxtrace_info(tool, session, event); + break; + case PERF_RECORD_AUXTRACE: + /* + * Setup for reading amidst mmap, but only when we + * are in 'file' mode. The 'pipe' fd is in proper + * place already. + */ + if (!perf_data__is_pipe(session->data)) + lseek(fd, file_offset + event->header.size, SEEK_SET); + err = tool->auxtrace(tool, session, event); + break; + case PERF_RECORD_AUXTRACE_ERROR: + perf_session__auxtrace_error_inc(session, event); + err = tool->auxtrace_error(tool, session, event); + break; + case PERF_RECORD_THREAD_MAP: + err = tool->thread_map(tool, session, event); + break; + case PERF_RECORD_CPU_MAP: + err = tool->cpu_map(tool, session, event); + break; + case PERF_RECORD_STAT_CONFIG: + err = tool->stat_config(tool, session, event); + break; + case PERF_RECORD_STAT: + err = tool->stat(tool, session, event); + break; + case PERF_RECORD_STAT_ROUND: + err = tool->stat_round(tool, session, event); + break; + case PERF_RECORD_TIME_CONV: + session->time_conv = event->time_conv; + err = tool->time_conv(tool, session, event); + break; + case PERF_RECORD_HEADER_FEATURE: + err = tool->feature(tool, session, event); + break; + case PERF_RECORD_COMPRESSED: + case PERF_RECORD_COMPRESSED2: + err = tool->compressed(tool, session, event, file_offset, file_path); + if (err) + dump_event(session->evlist, event, file_offset, &sample, file_path); + break; + case PERF_RECORD_FINISHED_INIT: + err = tool->finished_init(tool, session, event); + break; + case PERF_RECORD_BPF_METADATA: + err = tool->bpf_metadata(tool, session, event); + break; default: + err = -EINVAL; + break; + } + perf_sample__exit(&sample); + return err; +} + +int perf_session__deliver_synth_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample) +{ + struct evlist *evlist = session->evlist; + const struct perf_tool *tool = session->tool; + + events_stats__inc(&evlist->stats, event->header.type); + + if (event->header.type >= PERF_RECORD_USER_TYPE_START) + return perf_session__process_user_event(session, event, 0, NULL); + + return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL); +} + +int perf_session__deliver_synth_attr_event(struct perf_session *session, + const struct perf_event_attr *attr, + u64 id) +{ + union { + struct { + struct perf_record_header_attr attr; + u64 ids[1]; + } attr_id; + union perf_event ev; + } ev = { + .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR, + .attr_id.attr.header.size = sizeof(ev.attr_id), + .attr_id.ids[0] = id, + }; + + if (attr->size != sizeof(ev.attr_id.attr.attr)) { + pr_debug("Unexpected perf_event_attr size\n"); return -EINVAL; } + ev.attr_id.attr.attr = *attr; + return perf_session__deliver_synth_event(session, &ev.ev, NULL); } static void event_swap(union perf_event *event, bool sample_id_all) @@ -949,126 +1692,312 @@ static void event_swap(union perf_event *event, bool sample_id_all) swap(event, sample_id_all); } -static int perf_session__process_event(struct perf_session *session, - union perf_event *event, - struct perf_tool *tool, - u64 file_offset) +int perf_session__peek_event(struct perf_session *session, off_t file_offset, + void *buf, size_t buf_sz, + union perf_event **event_ptr, + struct perf_sample *sample) { - struct perf_sample sample; - int ret; + union perf_event *event; + size_t hdr_sz, rest; + int fd; + + if (session->one_mmap && !session->header.needs_swap) { + event = file_offset - session->one_mmap_offset + + session->one_mmap_addr; + goto out_parse_sample; + } + + if (perf_data__is_pipe(session->data)) + return -1; + + fd = perf_data__fd(session->data); + hdr_sz = sizeof(struct perf_event_header); + + if (buf_sz < hdr_sz) + return -1; + + if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || + readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) + return -1; + + event = (union perf_event *)buf; if (session->header.needs_swap) - event_swap(event, perf_evlist__sample_id_all(session->evlist)); + perf_event_header__bswap(&event->header); - if (event->header.type >= PERF_RECORD_HEADER_MAX) - return -EINVAL; + if (event->header.size < hdr_sz || event->header.size > buf_sz) + return -1; + + buf += hdr_sz; + rest = event->header.size - hdr_sz; + + if (readn(fd, buf, rest) != (ssize_t)rest) + return -1; + + if (session->header.needs_swap) + event_swap(event, evlist__sample_id_all(session->evlist)); + +out_parse_sample: + + if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && + evlist__parse_sample(session->evlist, event, sample)) + return -1; + + *event_ptr = event; + + return 0; +} - events_stats__inc(&session->stats, event->header.type); +int perf_session__peek_events(struct perf_session *session, u64 offset, + u64 size, peek_events_cb_t cb, void *data) +{ + u64 max_offset = offset + size; + char buf[PERF_SAMPLE_MAX_SIZE]; + union perf_event *event; + int err; + + do { + err = perf_session__peek_event(session, offset, buf, + PERF_SAMPLE_MAX_SIZE, &event, + NULL); + if (err) + return err; + + err = cb(session, event, offset, data); + if (err) + return err; + + offset += event->header.size; + if (event->header.type == PERF_RECORD_AUXTRACE) + offset += event->auxtrace.size; + + } while (offset < max_offset); + + return err; +} + +static s64 perf_session__process_event(struct perf_session *session, + union perf_event *event, u64 file_offset, + const char *file_path) +{ + struct evlist *evlist = session->evlist; + const struct perf_tool *tool = session->tool; + int ret; + + if (session->header.needs_swap) + event_swap(event, evlist__sample_id_all(evlist)); + + if (event->header.type >= PERF_RECORD_HEADER_MAX) { + /* perf should not support unaligned event, stop here. */ + if (event->header.size % sizeof(u64)) + return -EINVAL; + + /* This perf is outdated and does not support the latest event type. */ + ui__warning("Unsupported header type %u, please consider updating perf.\n", + event->header.type); + /* Skip unsupported event by returning its size. */ + return event->header.size; + } + + events_stats__inc(&evlist->stats, event->header.type); if (event->header.type >= PERF_RECORD_USER_TYPE_START) - return perf_session__process_user_event(session, event, tool, file_offset); + return perf_session__process_user_event(session, event, file_offset, file_path); - /* - * For all kernel events we get the sample data - */ - ret = perf_evlist__parse_sample(session->evlist, event, &sample); - if (ret) - return ret; + if (tool->ordered_events) { + u64 timestamp = -1ULL; - /* Preprocess sample records - precheck callchains */ - if (perf_session__preprocess_sample(session, event, &sample)) - return 0; + ret = evlist__parse_sample_timestamp(evlist, event, ×tamp); + if (ret && ret != -1) + return ret; - if (tool->ordered_samples) { - ret = perf_session_queue_event(session, event, &sample, - file_offset); + ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path); if (ret != -ETIME) return ret; } - return perf_session_deliver_event(session, event, &sample, tool, - file_offset); + return perf_session__deliver_event(session, event, tool, file_offset, file_path); } -void perf_event_header__bswap(struct perf_event_header *self) +void perf_event_header__bswap(struct perf_event_header *hdr) { - self->type = bswap_32(self->type); - self->misc = bswap_16(self->misc); - self->size = bswap_16(self->size); + hdr->type = bswap_32(hdr->type); + hdr->misc = bswap_16(hdr->misc); + hdr->size = bswap_16(hdr->size); } struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) { - return machine__findnew_thread(&session->machines.host, pid); + return machine__findnew_thread(&session->machines.host, -1, pid); +} + +int perf_session__register_idle_thread(struct perf_session *session) +{ + struct thread *thread = machine__idle_thread(&session->machines.host); + + /* machine__idle_thread() got the thread, so put it */ + thread__put(thread); + return thread ? 0 : -1; } -static struct thread *perf_session__register_idle_thread(struct perf_session *self) +static void +perf_session__warn_order(const struct perf_session *session) { - struct thread *thread = perf_session__findnew(self, 0); + const struct ordered_events *oe = &session->ordered_events; + struct evsel *evsel; + bool should_warn = true; - if (thread == NULL || thread__set_comm(thread, "swapper")) { - pr_err("problem inserting idle task.\n"); - thread = NULL; + evlist__for_each_entry(session->evlist, evsel) { + if (evsel->core.attr.write_backward) + should_warn = false; } - return thread; + if (!should_warn) + return; + if (oe->nr_unordered_events != 0) + ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); } -static void perf_session__warn_about_errors(const struct perf_session *session, - const struct perf_tool *tool) +static void perf_session__warn_about_errors(const struct perf_session *session) { - if (tool->lost == perf_event__process_lost && - session->stats.nr_events[PERF_RECORD_LOST] != 0) { + const struct events_stats *stats = &session->evlist->stats; + + if (session->tool->lost == perf_event__process_lost && + stats->nr_events[PERF_RECORD_LOST] != 0) { ui__warning("Processed %d events and lost %d chunks!\n\n" "Check IO/CPU overload!\n\n", - session->stats.nr_events[0], - session->stats.nr_events[PERF_RECORD_LOST]); + stats->nr_events[0], + stats->nr_events[PERF_RECORD_LOST]); } - if (session->stats.nr_unknown_events != 0) { + if (session->tool->lost_samples == perf_event__process_lost_samples) { + double drop_rate; + + drop_rate = (double)stats->total_lost_samples / + (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); + if (drop_rate > 0.05) { + ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", + stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, + drop_rate * 100.0); + } + } + + if (session->tool->aux == perf_event__process_aux && + stats->total_aux_lost != 0) { + ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", + stats->total_aux_lost, + stats->nr_events[PERF_RECORD_AUX]); + } + + if (session->tool->aux == perf_event__process_aux && + stats->total_aux_partial != 0) { + bool vmm_exclusive = false; + + (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", + &vmm_exclusive); + + ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" + "Are you running a KVM guest in the background?%s\n\n", + stats->total_aux_partial, + stats->nr_events[PERF_RECORD_AUX], + vmm_exclusive ? + "\nReloading kvm_intel module with vmm_exclusive=0\n" + "will reduce the gaps to only guest's timeslices." : + ""); + } + + if (session->tool->aux == perf_event__process_aux && + stats->total_aux_collision != 0) { + ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n", + stats->total_aux_collision, + stats->nr_events[PERF_RECORD_AUX]); + } + + if (stats->nr_unknown_events != 0) { ui__warning("Found %u unknown events!\n\n" "Is this an older tool processing a perf.data " "file generated by a more recent tool?\n\n" "If that is not the case, consider " "reporting to linux-kernel@vger.kernel.org.\n\n", - session->stats.nr_unknown_events); + stats->nr_unknown_events); } - if (session->stats.nr_unknown_id != 0) { + if (stats->nr_unknown_id != 0) { ui__warning("%u samples with id not present in the header\n", - session->stats.nr_unknown_id); + stats->nr_unknown_id); } - if (session->stats.nr_invalid_chains != 0) { - ui__warning("Found invalid callchains!\n\n" - "%u out of %u events were discarded for this reason.\n\n" - "Consider reporting to linux-kernel@vger.kernel.org.\n\n", - session->stats.nr_invalid_chains, - session->stats.nr_events[PERF_RECORD_SAMPLE]); - } + if (stats->nr_invalid_chains != 0) { + ui__warning("Found invalid callchains!\n\n" + "%u out of %u events were discarded for this reason.\n\n" + "Consider reporting to linux-kernel@vger.kernel.org.\n\n", + stats->nr_invalid_chains, + stats->nr_events[PERF_RECORD_SAMPLE]); + } - if (session->stats.nr_unprocessable_samples != 0) { + if (stats->nr_unprocessable_samples != 0) { ui__warning("%u unprocessable samples recorded.\n" "Do you have a KVM guest running and not using 'perf kvm'?\n", - session->stats.nr_unprocessable_samples); + stats->nr_unprocessable_samples); + } + + perf_session__warn_order(session); + + events_stats__auxtrace_error_warn(stats); + + if (stats->nr_proc_map_timeout != 0) { + ui__warning("%d map information files for pre-existing threads were\n" + "not processed, if there are samples for addresses they\n" + "will not be resolved, you may find out which are these\n" + "threads by running with -v and redirecting the output\n" + "to a file.\n" + "The time limit to process proc map is too short?\n" + "Increase it by --proc-map-timeout\n", + stats->nr_proc_map_timeout); } } -#define session_done() (*(volatile int *)(&session_done)) -volatile int session_done; +static int perf_session__flush_thread_stack(struct thread *thread, + void *p __maybe_unused) +{ + return thread_stack__flush(thread); +} -static int __perf_session__process_pipe_events(struct perf_session *self, - struct perf_tool *tool) +static int perf_session__flush_thread_stacks(struct perf_session *session) { + return machines__for_each_thread(&session->machines, + perf_session__flush_thread_stack, + NULL); +} + +volatile sig_atomic_t session_done; + +static int __perf_session__process_decomp_events(struct perf_session *session); + +static int __perf_session__process_pipe_events(struct perf_session *session) +{ + struct ordered_events *oe = &session->ordered_events; + const struct perf_tool *tool = session->tool; + struct ui_progress prog; union perf_event *event; uint32_t size, cur_size = 0; void *buf = NULL; - int skip = 0; + s64 skip = 0; u64 head; - int err; + ssize_t err; void *p; + bool update_prog = false; - perf_tool__fill_defaults(tool); + /* + * If it's from a file saving pipe data (by redirection), it would have + * a file name other than "-". Then we can get the total size and show + * the progress. + */ + if (strcmp(session->data->path, "-") && session->data->file.size) { + ui_progress__init_size(&prog, session->data->file.size, + "Processing events..."); + update_prog = true; + } head = 0; cur_size = sizeof(union perf_event); @@ -1076,9 +2005,11 @@ static int __perf_session__process_pipe_events(struct perf_session *self, buf = malloc(cur_size); if (!buf) return -errno; + ordered_events__set_copy_on_queue(oe, true); more: event = buf; - err = readn(self->fd, event, sizeof(struct perf_event_header)); + err = perf_data__read(session->data, event, + sizeof(struct perf_event_header)); if (err <= 0) { if (err == 0) goto done; @@ -1087,12 +2018,14 @@ more: goto out_err; } - if (self->header.needs_swap) + if (session->header.needs_swap) perf_event_header__bswap(&event->header); size = event->header.size; - if (size == 0) - size = 8; + if (size < sizeof(struct perf_event_header)) { + pr_err("bad event header size\n"); + goto out_err; + } if (size > cur_size) { void *new = realloc(buf, size); @@ -1108,7 +2041,8 @@ more: p += sizeof(struct perf_event_header); if (size - sizeof(struct perf_event_header)) { - err = readn(self->fd, p, size - sizeof(struct perf_event_header)); + err = perf_data__read(session->data, p, + size - sizeof(struct perf_event_header)); if (err <= 0) { if (err == 0) { pr_err("unexpected end of event stream\n"); @@ -1120,7 +2054,7 @@ more: } } - if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { + if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) { pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", head, event->header.size, event->header.type); err = -EINVAL; @@ -1132,22 +2066,44 @@ more: if (skip > 0) head += skip; + err = __perf_session__process_decomp_events(session); + if (err) + goto out_err; + + if (update_prog) + ui_progress__update(&prog, size); + if (!session_done()) goto more; done: - err = 0; + /* do the final flush for ordered samples */ + err = ordered_events__flush(oe, OE_FLUSH__FINAL); + if (err) + goto out_err; + err = session__flush_deferred_samples(session, tool); + if (err) + goto out_err; + err = auxtrace__flush_events(session, tool); + if (err) + goto out_err; + err = perf_session__flush_thread_stacks(session); out_err: free(buf); - perf_session__warn_about_errors(self, tool); - perf_session_free_sample_buffers(self); + if (update_prog) + ui_progress__finish(); + if (!tool->no_warn) + perf_session__warn_about_errors(session); + ordered_events__free(&session->ordered_events); + auxtrace__free_events(session); return err; } static union perf_event * -fetch_mmaped_event(struct perf_session *session, - u64 head, size_t mmap_size, char *buf) +prefetch_event(char *buf, u64 head, size_t mmap_size, + bool needs_swap, union perf_event *error) { union perf_event *event; + u16 event_size; /* * Ensure we have enough space remaining to read @@ -1157,14 +2113,75 @@ fetch_mmaped_event(struct perf_session *session, return NULL; event = (union perf_event *)(buf + head); + if (needs_swap) + perf_event_header__bswap(&event->header); - if (session->header.needs_swap) + event_size = event->header.size; + if (head + event_size <= mmap_size) + return event; + + /* We're not fetching the event so swap back again */ + if (needs_swap) perf_event_header__bswap(&event->header); - if (head + event->header.size > mmap_size) + /* Check if the event fits into the next mmapped buf. */ + if (event_size <= mmap_size - head % page_size) { + /* Remap buf and fetch again. */ return NULL; + } + + /* Invalid input. Event size should never exceed mmap_size. */ + pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:" + " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size); + + return error; +} - return event; +static union perf_event * +fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) +{ + return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); +} + +static union perf_event * +fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) +{ + return prefetch_event(buf, head, mmap_size, needs_swap, NULL); +} + +static int __perf_session__process_decomp_events(struct perf_session *session) +{ + s64 skip; + u64 size; + struct decomp *decomp = session->active_decomp->decomp_last; + + if (!decomp) + return 0; + + while (decomp->head < decomp->size && !session_done()) { + union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, + session->header.needs_swap); + + if (!event) + break; + + size = event->header.size; + + if (size < sizeof(struct perf_event_header) || + (skip = perf_session__process_event(session, event, decomp->file_pos, + decomp->file_path)) < 0) { + pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", + decomp->file_pos + decomp->head, event->header.size, event->header.type); + return -EINVAL; + } + + if (skip) + size += skip; + + decomp->head += size; + } + + return 0; } /* @@ -1179,136 +2196,433 @@ fetch_mmaped_event(struct perf_session *session, #define NUM_MMAPS 128 #endif -int __perf_session__process_events(struct perf_session *session, - u64 data_offset, u64 data_size, - u64 file_size, struct perf_tool *tool) +struct reader; + +typedef s64 (*reader_cb_t)(struct perf_session *session, + union perf_event *event, + u64 file_offset, + const char *file_path); + +struct reader { + int fd; + const char *path; + u64 data_size; + u64 data_offset; + reader_cb_t process; + bool in_place_update; + char *mmaps[NUM_MMAPS]; + size_t mmap_size; + int mmap_idx; + char *mmap_cur; + u64 file_pos; + u64 file_offset; + u64 head; + u64 size; + bool done; + struct zstd_data zstd_data; + struct decomp_data decomp_data; +}; + +static int +reader__init(struct reader *rd, bool *one_mmap) { - u64 head, page_offset, file_offset, file_pos, progress_next; - int err, mmap_prot, mmap_flags, map_idx = 0; - size_t mmap_size; - char *buf, *mmaps[NUM_MMAPS]; - union perf_event *event; - uint32_t size; + u64 data_size = rd->data_size; + char **mmaps = rd->mmaps; - perf_tool__fill_defaults(tool); + rd->head = rd->data_offset; + data_size += rd->data_offset; - page_offset = page_size * (data_offset / page_size); - file_offset = page_offset; - head = data_offset - page_offset; + rd->mmap_size = MMAP_SIZE; + if (rd->mmap_size > data_size) { + rd->mmap_size = data_size; + if (one_mmap) + *one_mmap = true; + } - if (data_offset + data_size < file_size) - file_size = data_offset + data_size; + memset(mmaps, 0, sizeof(rd->mmaps)); - progress_next = file_size / 16; + if (zstd_init(&rd->zstd_data, 0)) + return -1; + rd->decomp_data.zstd_decomp = &rd->zstd_data; - mmap_size = MMAP_SIZE; - if (mmap_size > file_size) - mmap_size = file_size; + return 0; +} - memset(mmaps, 0, sizeof(mmaps)); +static void +reader__release_decomp(struct reader *rd) +{ + perf_decomp__release_events(rd->decomp_data.decomp); + zstd_fini(&rd->zstd_data); +} + +static int +reader__mmap(struct reader *rd, struct perf_session *session) +{ + int mmap_prot, mmap_flags; + char *buf, **mmaps = rd->mmaps; + u64 page_offset; mmap_prot = PROT_READ; mmap_flags = MAP_SHARED; - if (session->header.needs_swap) { + if (rd->in_place_update) { + mmap_prot |= PROT_WRITE; + } else if (session->header.needs_swap) { mmap_prot |= PROT_WRITE; mmap_flags = MAP_PRIVATE; } -remap: - buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, - file_offset); + + if (mmaps[rd->mmap_idx]) { + munmap(mmaps[rd->mmap_idx], rd->mmap_size); + mmaps[rd->mmap_idx] = NULL; + } + + page_offset = page_size * (rd->head / page_size); + rd->file_offset += page_offset; + rd->head -= page_offset; + + buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd, + rd->file_offset); if (buf == MAP_FAILED) { pr_err("failed to mmap file\n"); - err = -errno; - goto out_err; + return -errno; + } + mmaps[rd->mmap_idx] = rd->mmap_cur = buf; + rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1); + rd->file_pos = rd->file_offset + rd->head; + if (session->one_mmap) { + session->one_mmap_addr = buf; + session->one_mmap_offset = rd->file_offset; } - mmaps[map_idx] = buf; - map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); - file_pos = file_offset + head; -more: - event = fetch_mmaped_event(session, head, mmap_size, buf); - if (!event) { - if (mmaps[map_idx]) { - munmap(mmaps[map_idx], mmap_size); - mmaps[map_idx] = NULL; - } + return 0; +} - page_offset = page_size * (head / page_size); - file_offset += page_offset; - head -= page_offset; - goto remap; - } +enum { + READER_OK, + READER_NODATA, +}; + +static int +reader__read_event(struct reader *rd, struct perf_session *session, + struct ui_progress *prog) +{ + u64 size; + int err = READER_OK; + union perf_event *event; + s64 skip; + + event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur, + session->header.needs_swap); + if (IS_ERR(event)) + return PTR_ERR(event); + + if (!event) + return READER_NODATA; size = event->header.size; - if (size == 0 || - perf_session__process_event(session, event, tool, file_pos) < 0) { - pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", - file_offset + head, event->header.size, - event->header.type); - err = -EINVAL; - goto out_err; + skip = -EINVAL; + + if (size < sizeof(struct perf_event_header) || + (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) { + pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", + rd->file_offset + rd->head, event->header.size, + event->header.type, strerror(-skip)); + err = skip; + goto out; } - head += size; - file_pos += size; + if (skip) + size += skip; - if (file_pos >= progress_next) { - progress_next += file_size / 16; - ui_progress__update(file_pos, file_size, - "Processing events..."); - } + rd->size += size; + rd->head += size; + rd->file_pos += size; + + err = __perf_session__process_decomp_events(session); + if (err) + goto out; + + ui_progress__update(prog, size); + +out: + return err; +} + +static inline bool +reader__eof(struct reader *rd) +{ + return (rd->file_pos >= rd->data_size + rd->data_offset); +} + +static int +reader__process_events(struct reader *rd, struct perf_session *session, + struct ui_progress *prog) +{ + int err; + + err = reader__init(rd, &session->one_mmap); + if (err) + goto out; + + session->active_decomp = &rd->decomp_data; + +remap: + err = reader__mmap(rd, session); + if (err) + goto out; + +more: + err = reader__read_event(rd, session, prog); + if (err < 0) + goto out; + else if (err == READER_NODATA) + goto remap; + + if (session_done()) + goto out; - if (file_pos < file_size) + if (!reader__eof(rd)) goto more; - err = 0; +out: + session->active_decomp = &session->decomp_data; + return err; +} + +static s64 process_simple(struct perf_session *session, + union perf_event *event, + u64 file_offset, + const char *file_path) +{ + return perf_session__process_event(session, event, file_offset, file_path); +} + +static int __perf_session__process_events(struct perf_session *session) +{ + struct reader rd = { + .fd = perf_data__fd(session->data), + .path = session->data->file.path, + .data_size = session->header.data_size, + .data_offset = session->header.data_offset, + .process = process_simple, + .in_place_update = session->data->in_place_update, + }; + struct ordered_events *oe = &session->ordered_events; + const struct perf_tool *tool = session->tool; + struct ui_progress prog; + int err; + + if (rd.data_size == 0) + return -1; + + ui_progress__init_size(&prog, rd.data_size, "Processing events..."); + + err = reader__process_events(&rd, session, &prog); + if (err) + goto out_err; /* do the final flush for ordered samples */ - session->ordered_samples.next_flush = ULLONG_MAX; - err = flush_sample_queue(session, tool); + err = ordered_events__flush(oe, OE_FLUSH__FINAL); + if (err) + goto out_err; + err = auxtrace__flush_events(session, tool); + if (err) + goto out_err; + err = session__flush_deferred_samples(session, tool); + if (err) + goto out_err; + err = perf_session__flush_thread_stacks(session); out_err: ui_progress__finish(); - perf_session__warn_about_errors(session, tool); - perf_session_free_sample_buffers(session); + if (!tool->no_warn) + perf_session__warn_about_errors(session); + /* + * We may switching perf.data output, make ordered_events + * reusable. + */ + ordered_events__reinit(&session->ordered_events); + auxtrace__free_events(session); + reader__release_decomp(&rd); + session->one_mmap = false; return err; } -int perf_session__process_events(struct perf_session *self, - struct perf_tool *tool) +/* + * Processing 2 MB of data from each reader in sequence, + * because that's the way the ordered events sorting works + * most efficiently. + */ +#define READER_MAX_SIZE (2 * 1024 * 1024) + +/* + * This function reads, merge and process directory data. + * It assumens the version 1 of directory data, where each + * data file holds per-cpu data, already sorted by kernel. + */ +static int __perf_session__process_dir_events(struct perf_session *session) { - int err; + struct perf_data *data = session->data; + const struct perf_tool *tool = session->tool; + int i, ret, readers, nr_readers; + struct ui_progress prog; + u64 total_size = perf_data__size(session->data); + struct reader *rd; + + ui_progress__init_size(&prog, total_size, "Processing events..."); - if (perf_session__register_idle_thread(self) == NULL) + nr_readers = 1; + for (i = 0; i < data->dir.nr; i++) { + if (data->dir.files[i].size) + nr_readers++; + } + + rd = zalloc(nr_readers * sizeof(struct reader)); + if (!rd) return -ENOMEM; - if (!self->fd_pipe) - err = __perf_session__process_events(self, - self->header.data_offset, - self->header.data_size, - self->size, tool); - else - err = __perf_session__process_pipe_events(self, tool); + rd[0] = (struct reader) { + .fd = perf_data__fd(session->data), + .path = session->data->file.path, + .data_size = session->header.data_size, + .data_offset = session->header.data_offset, + .process = process_simple, + .in_place_update = session->data->in_place_update, + }; + ret = reader__init(&rd[0], NULL); + if (ret) + goto out_err; + ret = reader__mmap(&rd[0], session); + if (ret) + goto out_err; + readers = 1; - return err; + for (i = 0; i < data->dir.nr; i++) { + if (!data->dir.files[i].size) + continue; + rd[readers] = (struct reader) { + .fd = data->dir.files[i].fd, + .path = data->dir.files[i].path, + .data_size = data->dir.files[i].size, + .data_offset = 0, + .process = process_simple, + .in_place_update = session->data->in_place_update, + }; + ret = reader__init(&rd[readers], NULL); + if (ret) + goto out_err; + ret = reader__mmap(&rd[readers], session); + if (ret) + goto out_err; + readers++; + } + + i = 0; + while (readers) { + if (session_done()) + break; + + if (rd[i].done) { + i = (i + 1) % nr_readers; + continue; + } + if (reader__eof(&rd[i])) { + rd[i].done = true; + readers--; + continue; + } + + session->active_decomp = &rd[i].decomp_data; + ret = reader__read_event(&rd[i], session, &prog); + if (ret < 0) { + goto out_err; + } else if (ret == READER_NODATA) { + ret = reader__mmap(&rd[i], session); + if (ret) + goto out_err; + } + + if (rd[i].size >= READER_MAX_SIZE) { + rd[i].size = 0; + i = (i + 1) % nr_readers; + } + } + + ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL); + if (ret) + goto out_err; + + ret = session__flush_deferred_samples(session, tool); + if (ret) + goto out_err; + + ret = perf_session__flush_thread_stacks(session); +out_err: + ui_progress__finish(); + + if (!tool->no_warn) + perf_session__warn_about_errors(session); + + /* + * We may switching perf.data output, make ordered_events + * reusable. + */ + ordered_events__reinit(&session->ordered_events); + + session->one_mmap = false; + + session->active_decomp = &session->decomp_data; + for (i = 0; i < nr_readers; i++) + reader__release_decomp(&rd[i]); + zfree(&rd); + + return ret; +} + +int perf_session__process_events(struct perf_session *session) +{ + if (perf_session__register_idle_thread(session) < 0) + return -ENOMEM; + + if (perf_data__is_pipe(session->data)) + return __perf_session__process_pipe_events(session); + + if (perf_data__is_dir(session->data) && session->data->dir.nr) + return __perf_session__process_dir_events(session); + + return __perf_session__process_events(session); } bool perf_session__has_traces(struct perf_session *session, const char *msg) { - if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { - pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); - return false; + struct evsel *evsel; + + evlist__for_each_entry(session->evlist, evsel) { + if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) + return true; } - return true; + pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); + return false; } -int maps__set_kallsyms_ref_reloc_sym(struct map **maps, - const char *symbol_name, u64 addr) +bool perf_session__has_switch_events(struct perf_session *session) +{ + struct evsel *evsel; + + evlist__for_each_entry(session->evlist, evsel) { + if (evsel->core.attr.context_switch) + return true; + } + + return false; +} + +int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) { char *bracket; - enum map_type i; struct ref_reloc_sym *ref; + struct kmap *kmap; ref = zalloc(sizeof(struct ref_reloc_sym)); if (ref == NULL) @@ -1326,37 +2640,35 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps, ref->addr = addr; - for (i = 0; i < MAP__NR_TYPES; ++i) { - struct kmap *kmap = map__kmap(maps[i]); + kmap = map__kmap(map); + if (kmap) kmap->ref_reloc_sym = ref; - } return 0; } -size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) +size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) { - return machines__fprintf_dsos(&self->machines, fp); + return machines__fprintf_dsos(&session->machines, fp); } -size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { - return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); + return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); } size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) { - struct perf_evsel *pos; - size_t ret = fprintf(fp, "Aggregated stats:\n"); + size_t ret; + const char *msg = ""; - ret += events_stats__fprintf(&session->stats, fp); + if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) + msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; - list_for_each_entry(pos, &session->evlist->entries, node) { - ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); - ret += events_stats__fprintf(&pos->hists.stats, fp); - } + ret = fprintf(fp, "\nAggregated stats:%s\n", msg); + ret += events_stats__fprintf(&session->evlist->stats, fp); return ret; } @@ -1369,193 +2681,226 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp) return machine__fprintf(&session->machines.host, fp); } -struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, - unsigned int type) +void perf_session__dump_kmaps(struct perf_session *session) { - struct perf_evsel *pos; + int save_verbose = verbose; - list_for_each_entry(pos, &session->evlist->entries, node) { - if (pos->attr.type == type) - return pos; - } - return NULL; + fflush(stdout); + fprintf(stderr, "Kernel and module maps:\n"); + verbose = 0; /* Suppress verbose to print a summary only */ + maps__fprintf(machine__kernel_maps(&session->machines.host), stderr); + verbose = save_verbose; } -void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, - struct perf_sample *sample, struct machine *machine, - int print_sym, int print_dso, int print_symoffset) +struct evsel *perf_session__find_first_evtype(struct perf_session *session, + unsigned int type) { - struct addr_location al; - struct callchain_cursor_node *node; - - if (perf_event__preprocess_sample(event, machine, &al, sample, - NULL) < 0) { - error("problem processing %d event, skipping it.\n", - event->header.type); - return; - } - - if (symbol_conf.use_callchain && sample->callchain) { - - - if (machine__resolve_callchain(machine, evsel, al.thread, - sample, NULL) != 0) { - if (verbose) - error("Failed to resolve callchain. Skipping\n"); - return; - } - callchain_cursor_commit(&callchain_cursor); + struct evsel *pos; - while (1) { - node = callchain_cursor_current(&callchain_cursor); - if (!node) - break; - - printf("\t%16" PRIx64, node->ip); - if (print_sym) { - printf(" "); - symbol__fprintf_symname(node->sym, stdout); - } - if (print_dso) { - printf(" ("); - map__fprintf_dsoname(node->map, stdout); - printf(")"); - } - printf("\n"); - - callchain_cursor_advance(&callchain_cursor); - } - - } else { - printf("%16" PRIx64, sample->ip); - if (print_sym) { - printf(" "); - if (print_symoffset) - symbol__fprintf_symname_offs(al.sym, &al, - stdout); - else - symbol__fprintf_symname(al.sym, stdout); - } - - if (print_dso) { - printf(" ("); - map__fprintf_dsoname(al.map, stdout); - printf(")"); - } + evlist__for_each_entry(session->evlist, pos) { + if (pos->core.attr.type == type) + return pos; } + return NULL; } int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap) { - int i; - struct cpu_map *map; + int i, err = -1; + struct perf_cpu_map *map; + int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS); + struct perf_cpu cpu; for (i = 0; i < PERF_TYPE_MAX; ++i) { - struct perf_evsel *evsel; + struct evsel *evsel; evsel = perf_session__find_first_evtype(session, i); if (!evsel) continue; - if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { + if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { pr_err("File does not contain CPU events. " - "Remove -c option to proceed.\n"); + "Remove -C option to proceed.\n"); return -1; } } - map = cpu_map__new(cpu_list); + map = perf_cpu_map__new(cpu_list); if (map == NULL) { pr_err("Invalid cpu_list\n"); return -1; } - for (i = 0; i < map->nr; i++) { - int cpu = map->map[i]; - - if (cpu >= MAX_NR_CPUS) { + perf_cpu_map__for_each_cpu(cpu, i, map) { + if (cpu.cpu >= nr_cpus) { pr_err("Requested CPU %d too large. " - "Consider raising MAX_NR_CPUS\n", cpu); - return -1; + "Consider raising MAX_NR_CPUS\n", cpu.cpu); + goto out_delete_map; } - set_bit(cpu, cpu_bitmap); + __set_bit(cpu.cpu, cpu_bitmap); } - return 0; + err = 0; + +out_delete_map: + perf_cpu_map__put(map); + return err; } void perf_session__fprintf_info(struct perf_session *session, FILE *fp, bool full) { - struct stat st; - int ret; - if (session == NULL || fp == NULL) return; - ret = fstat(session->fd, &st); - if (ret == -1) - return; - fprintf(fp, "# ========\n"); - fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); perf_header__fprintf_info(session, fp, full); fprintf(fp, "# ========\n#\n"); } +static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid) +{ + struct machine *machine = machines__findnew(&session->machines, machine_pid); + struct thread *thread; + + if (!machine) + return -ENOMEM; -int __perf_session__set_tracepoints_handlers(struct perf_session *session, - const struct perf_evsel_str_handler *assocs, - size_t nr_assocs) + machine->single_address_space = session->machines.host.single_address_space; + + thread = machine__idle_thread(machine); + if (!thread) + return -ENOMEM; + thread__put(thread); + + machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid); + + return 0; +} + +static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid, + pid_t tid, int guest_cpu) { - struct perf_evlist *evlist = session->evlist; - struct event_format *format; - struct perf_evsel *evsel; - char *tracepoint, *name; - size_t i; - int err; + struct machine *machine = &session->machines.host; + struct thread *thread = machine__findnew_thread(machine, pid, tid); - for (i = 0; i < nr_assocs; i++) { - err = -ENOMEM; - tracepoint = strdup(assocs[i].name); - if (tracepoint == NULL) - goto out; - - err = -ENOENT; - name = strchr(tracepoint, ':'); - if (name == NULL) - goto out_free; - - *name++ = '\0'; - format = pevent_find_event_by_name(session->pevent, - tracepoint, name); - if (format == NULL) { - /* - * Adding a handler for an event not in the session, - * just ignore it. - */ - goto next; + if (!thread) + return -ENOMEM; + thread__set_guest_cpu(thread, guest_cpu); + thread__put(thread); + + return 0; +} + +int perf_event__process_id_index(const struct perf_tool *tool __maybe_unused, + struct perf_session *session, + union perf_event *event) +{ + struct evlist *evlist = session->evlist; + struct perf_record_id_index *ie = &event->id_index; + size_t sz = ie->header.size - sizeof(*ie); + size_t i, nr, max_nr; + size_t e1_sz = sizeof(struct id_index_entry); + size_t e2_sz = sizeof(struct id_index_entry_2); + size_t etot_sz = e1_sz + e2_sz; + struct id_index_entry_2 *e2; + pid_t last_pid = 0; + + max_nr = sz / e1_sz; + nr = ie->nr; + if (nr > max_nr) { + printf("Too big: nr %zu max_nr %zu\n", nr, max_nr); + return -EINVAL; + } + + if (sz >= nr * etot_sz) { + max_nr = sz / etot_sz; + if (nr > max_nr) { + printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr); + return -EINVAL; } + e2 = (void *)ie + sizeof(*ie) + nr * e1_sz; + } else { + e2 = NULL; + } - evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); - if (evsel == NULL) - goto next; + if (dump_trace) + fprintf(stdout, " nr: %zu\n", nr); + + for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) { + struct id_index_entry *e = &ie->entries[i]; + struct perf_sample_id *sid; + int ret; + + if (dump_trace) { + fprintf(stdout, " ... id: %"PRI_lu64, e->id); + fprintf(stdout, " idx: %"PRI_lu64, e->idx); + fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); + fprintf(stdout, " tid: %"PRI_ld64, e->tid); + if (e2) { + fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid); + fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu); + } else { + fprintf(stdout, "\n"); + } + } + + sid = evlist__id2sid(evlist, e->id); + if (!sid) + return -ENOENT; + + sid->idx = e->idx; + sid->cpu.cpu = e->cpu; + sid->tid = e->tid; + + if (!e2) + continue; - err = -EEXIST; - if (evsel->handler.func != NULL) - goto out_free; - evsel->handler.func = assocs[i].handler; -next: - free(tracepoint); + sid->machine_pid = e2->machine_pid; + sid->vcpu.cpu = e2->vcpu; + + if (!sid->machine_pid) + continue; + + if (sid->machine_pid != last_pid) { + ret = perf_session__register_guest(session, sid->machine_pid); + if (ret) + return ret; + last_pid = sid->machine_pid; + perf_guest = true; + } + + ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu); + if (ret) + return ret; } + return 0; +} - err = 0; -out: - return err; +int perf_session__dsos_hit_all(struct perf_session *session) +{ + struct rb_node *nd; + int err; + + err = machine__hit_all_dsos(&session->machines.host); + if (err) + return err; + + for (nd = rb_first_cached(&session->machines.guests); nd; + nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); -out_free: - free(tracepoint); - goto out; + err = machine__hit_all_dsos(pos); + if (err) + return err; + } + + return 0; +} + +struct perf_env *perf_session__env(struct perf_session *session) +{ + return &session->header.env; } |
