summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/annotate.c163
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/bpf-event.c425
-rw-r--r--tools/perf/util/bpf-event.h42
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/config.c3
-rw-r--r--tools/perf/util/data.c11
-rw-r--r--tools/perf/util/data.h2
-rw-r--r--tools/perf/util/dso.c43
-rw-r--r--tools/perf/util/dso.h8
-rw-r--r--tools/perf/util/env.c155
-rw-r--r--tools/perf/util/env.h24
-rw-r--r--tools/perf/util/evlist.c119
-rw-r--r--tools/perf/util/evlist.h12
-rw-r--r--tools/perf/util/evsel.c8
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/header.c253
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/map.c18
-rw-r--r--tools/perf/util/ordered-events.c2
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/sort.c52
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat.c12
-rw-r--r--tools/perf/util/symbol.c1
27 files changed, 1223 insertions, 149 deletions
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 5f6dbbf5d749..c8b01176c9e1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -10,6 +10,10 @@
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -24,6 +28,7 @@
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
+#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@@ -31,6 +36,7 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <bpf/libbpf.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
+ case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+ scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1674,6 +1683,156 @@ fallback:
return 0;
}
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotation_options *opts = args->options;
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct disassemble_info info;
+ struct dso *dso = map->dso;
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ int ret = -1;
+ char *buf;
+ bfd *bfdf;
+ FILE *s;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return -1;
+
+ pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ assert(bfdf);
+ assert(bfd_check_format(bfdf, bfd_object));
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s)
+ goto out;
+ init_disassemble_info(&info, s,
+ (fprintf_ftype) fprintf);
+
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+ dso->bpf_prog.id);
+ if (!info_node)
+ goto out;
+ info_linear = info_node->info_linear;
+ sub_id = dso->bpf_prog.sub_id;
+
+ info.buffer = (void *)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso->bpf_prog.env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ assert(disassemble);
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!opts->hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
- if (dso__is_kcore(dso)) {
+ if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+ return symbol__disassemble_bpf(sym, args);
+ } else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index df34fe483164..5bc0cf655d37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -369,6 +369,7 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
+ SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 028c8ec1f62a..2a4a0da35632 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -3,11 +3,17 @@
#include <stdlib.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
+#include <bpf/libbpf.h>
#include <linux/btf.h>
+#include <linux/err.h>
#include "bpf-event.h"
#include "debug.h"
#include "symbol.h"
#include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
return ret;
}
+static int machine__process_bpf_event_load(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = machine->env;
+ int id = event->bpf_event.id;
+ unsigned int i;
+
+ /* perf-record, no need to handle bpf-event */
+ if (env == NULL)
+ return 0;
+
+ info_node = perf_env__find_bpf_prog_info(env, id);
+ if (!info_node)
+ return 0;
+ info_linear = info_node->info_linear;
+
+ for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+ u64 addr = addrs[i];
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, addr);
+
+ if (map) {
+ map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+ map->dso->bpf_prog.id = id;
+ map->dso->bpf_prog.sub_id = i;
+ map->dso->bpf_prog.env = env;
+ }
+ }
+ return 0;
+}
+
int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_bpf_event(event, stdout);
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ return machine__process_bpf_event_load(machine, event, sample);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
return 0;
}
+static int perf_env__fetch_btf(struct perf_env *env,
+ u32 btf_id,
+ struct btf *btf)
+{
+ struct btf_node *node;
+ u32 data_size;
+ const void *data;
+
+ data = btf__get_raw_data(btf, &data_size);
+
+ node = malloc(data_size + sizeof(struct btf_node));
+ if (!node)
+ return -1;
+
+ node->id = btf_id;
+ node->data_size = data_size;
+ memcpy(node->data, data, data_size);
+
+ perf_env__insert_btf(env, node);
+ return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+ struct bpf_prog_info *info,
+ struct btf *btf,
+ u32 sub_id)
+{
+ u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+ void *func_infos = (void *)(uintptr_t)(info->func_info);
+ u32 sub_prog_cnt = info->nr_jited_ksyms;
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ name_len = snprintf(buf, size, "bpf_prog_");
+ name_len += snprintf_hex(buf + name_len, size - name_len,
+ prog_tags[sub_id], BPF_TAG_SIZE);
+ if (btf) {
+ finfo = func_infos + sub_id * info->func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (sub_id == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info->name[0])
+ short_name = info->name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(buf + name_len, size - name_len,
+ "_%s", short_name);
+ return name_len;
+}
+
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
* -1 for failures;
* -2 for lack of kernel support.
*/
-static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
int fd,
@@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
{
struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
struct bpf_event *bpf_event = &event->bpf_event;
- u32 sub_prog_cnt, i, func_info_rec_size = 0;
- u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
- struct bpf_prog_info info = { .type = 0, };
- u32 info_len = sizeof(info);
- void *func_infos = NULL;
- u64 *prog_addrs = NULL;
+ struct bpf_prog_info_linear *info_linear;
+ struct perf_tool *tool = session->tool;
+ struct bpf_prog_info_node *info_node;
+ struct bpf_prog_info *info;
struct btf *btf = NULL;
- u32 *prog_lens = NULL;
- bool has_btf = false;
- char errbuf[512];
+ struct perf_env *env;
+ u32 sub_prog_cnt, i;
int err = 0;
+ u64 arrays;
+
+ /*
+ * for perf-record and perf-report use header.env;
+ * otherwise, use global perf_env.
+ */
+ env = session->data ? &session->header.env : &perf_env;
- /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
- if (err) {
- pr_debug("%s: failed to get BPF program info: %s, aborting\n",
- __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ info_linear = NULL;
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
return -1;
}
- if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+
+ if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
+ info = &info_linear->info;
+
/* number of ksyms, func_lengths, and tags should match */
- sub_prog_cnt = info.nr_jited_ksyms;
- if (sub_prog_cnt != info.nr_prog_tags ||
- sub_prog_cnt != info.nr_jited_func_lens)
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
return -1;
/* check BTF func info support */
- if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+ if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
- if (sub_prog_cnt != info.nr_func_info) {
+ if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
- return -1;
- }
- if (btf__get_from_id(info.btf_id, &btf)) {
- pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
- return -1;
+ err = -1;
+ goto out;
}
- func_info_rec_size = info.func_info_rec_size;
- func_infos = calloc(sub_prog_cnt, func_info_rec_size);
- if (!func_infos) {
- pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
- return -1;
+ if (btf__get_from_id(info->btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+ err = -1;
+ btf = NULL;
+ goto out;
}
- has_btf = true;
- }
-
- /*
- * We need address, length, and tag for each sub program.
- * Allocate memory and call bpf_obj_get_info_by_fd() again
- */
- prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
- if (!prog_addrs) {
- pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
- goto out;
- }
- prog_lens = calloc(sub_prog_cnt, sizeof(u32));
- if (!prog_lens) {
- pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
- goto out;
- }
- prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
- if (!prog_tags) {
- pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
- goto out;
- }
-
- memset(&info, 0, sizeof(info));
- info.nr_jited_ksyms = sub_prog_cnt;
- info.nr_jited_func_lens = sub_prog_cnt;
- info.nr_prog_tags = sub_prog_cnt;
- info.jited_ksyms = ptr_to_u64(prog_addrs);
- info.jited_func_lens = ptr_to_u64(prog_lens);
- info.prog_tags = ptr_to_u64(prog_tags);
- info_len = sizeof(info);
- if (has_btf) {
- info.nr_func_info = sub_prog_cnt;
- info.func_info_rec_size = func_info_rec_size;
- info.func_info = ptr_to_u64(func_infos);
- }
-
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
- if (err) {
- pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
- goto out;
+ perf_env__fetch_btf(env, info->btf_id, btf);
}
/* Synthesize PERF_RECORD_KSYMBOL */
for (i = 0; i < sub_prog_cnt; i++) {
- const struct bpf_func_info *finfo;
- const char *short_name = NULL;
- const struct btf_type *t;
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
int name_len;
*ksymbol_event = (struct ksymbol_event){
@@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
- name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
- "bpf_prog_");
- name_len += snprintf_hex(ksymbol_event->name + name_len,
- KSYM_NAME_LEN - name_len,
- prog_tags[i], BPF_TAG_SIZE);
- if (has_btf) {
- finfo = func_infos + i * info.func_info_rec_size;
- t = btf__type_by_id(btf, finfo->type_id);
- short_name = btf__name_by_offset(btf, t->name_off);
- } else if (i == 0 && sub_prog_cnt == 1) {
- /* no subprog */
- if (info.name[0])
- short_name = info.name;
- } else
- short_name = "F";
- if (short_name)
- name_len += snprintf(ksymbol_event->name + name_len,
- KSYM_NAME_LEN - name_len,
- "_%s", short_name);
+ name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+ KSYM_NAME_LEN, info, btf, i);
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
@@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
machine, process);
}
- /* Synthesize PERF_RECORD_BPF_EVENT */
- if (opts->bpf_event) {
+ if (!opts->no_bpf_event) {
+ /* Synthesize PERF_RECORD_BPF_EVENT */
*bpf_event = (struct bpf_event){
.header = {
.type = PERF_RECORD_BPF_EVENT,
@@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
},
.type = PERF_BPF_EVENT_PROG_LOAD,
.flags = 0,
- .id = info.id,
+ .id = info->id,
};
- memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+ memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
+
+ /* save bpf_prog_info to env */
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node) {
+ err = -1;
+ goto out;
+ }
+
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ info_linear = NULL;
+
+ /*
+ * process after saving bpf_prog_info to env, so that
+ * required information is ready for look up
+ */
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
out:
- free(prog_tags);
- free(prog_lens);
- free(prog_addrs);
- free(func_infos);
+ free(info_linear);
free(btf);
return err ? -1 : 0;
}
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts)
@@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
continue;
}
- err = perf_event__synthesize_one_bpf_prog(tool, process,
+ err = perf_event__synthesize_one_bpf_prog(session, process,
machine, fd,
event, opts);
close(fd);
@@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
free(event);
return err;
}
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct btf *btf = NULL;
+ u64 arrays;
+ u32 btf_id;
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0)
+ return;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ goto out;
+ }
+
+ btf_id = info_linear->info.btf_id;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (info_node) {
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ } else
+ free(info_linear);
+
+ if (btf_id == 0)
+ goto out;
+
+ if (btf__get_from_id(btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n",
+ __func__, btf_id);
+ goto out;
+ }
+ perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+ free(btf);
+ close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+ struct perf_env *env = data;
+
+ if (event->header.type != PERF_RECORD_BPF_EVENT)
+ return -1;
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ perf_env__add_bpf_info(env, event->bpf_event.id);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+
+ return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .sample_id_all = 1,
+ .watermark = 1,
+ .bpf_event = 1,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+
+ /*
+ * Older gcc versions don't support designated initializers, like above,
+ * for unnamed union members, such as the following:
+ */
+ attr.wakeup_watermark = 1;
+
+ return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
+{
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ char name[KSYM_NAME_LEN];
+ struct btf *btf = NULL;
+ u32 sub_prog_cnt, i;
+
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return;
+
+ if (info->btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ if (sub_prog_cnt == 1) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+ fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+ info->id, name, prog_addrs[0], prog_lens[0]);
+ return;
+ }
+
+ fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+ for (i = 0; i < sub_prog_cnt; i++) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+ fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+ i, name, prog_addrs[i], prog_lens[i]);
+ }
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 7890067e1a37..04c33b3bfe28 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -3,22 +3,45 @@
#define __PERF_BPF_EVENT_H
#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
#include "event.h"
+#include <stdio.h>
struct machine;
union perf_event;
+struct perf_env;
struct perf_sample;
-struct perf_tool;
struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+ struct bpf_prog_info_linear *info_linear;
+ struct rb_node rb_node;
+};
+
+struct btf_node {
+ struct rb_node rb_node;
+ u32 id;
+ u32 data_size;
+ char data[];
+};
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
#else
static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
return 0;
}
-static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused,
struct record_opts *opts __maybe_unused)
{
return 0;
}
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+ struct perf_env *env __maybe_unused)
+{
+ return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
+{
+
+}
#endif // HAVE_LIBBPF_SUPPORT
#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff0d17920ed..0c5517a8d0b7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
+/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index fa092511c52b..7e3c1b60120c 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
- return ret;
out_free:
free(key);
- return -1;
+ return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index c6b67efea11a..6a64f713710d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -361,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit)
+ size_t pos, bool at_exit,
+ char **new_filepath)
{
- char *new_filepath;
int ret;
if (check_pipe(data))
@@ -371,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
+ if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
+ if (rename(data->path, *new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
@@ -396,7 +396,6 @@ int perf_data__switch(struct perf_data *data,
}
ret = data->file.fd;
out:
- free(new_filepath);
return ret;
}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 6aef8746469f..259868a39019 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -70,7 +70,7 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
*/
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit);
+ size_t pos, bool at_exit, char **new_filepath);
int perf_data__create_dir(struct perf_data *data, int nr);
int perf_data__open_dir(struct perf_data *data);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index ba58ba603b69..e059976d9d93 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
@@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
static void dso__set_basename(struct dso *dso)
{
- /*
- * basename() may modify path buffer, so we must pass
- * a copy.
- */
- char *base, *lname = strdup(dso->long_name);
+ char *base, *lname;
+ int tid;
- if (!lname)
- return;
-
- /*
- * basename() may return a pointer to internal
- * storage which is reused in subsequent calls
- * so copy the result.
- */
- base = strdup(basename(lname));
+ if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+ if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+ return;
+ } else {
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ lname = strdup(dso->long_name);
+ if (!lname)
+ return;
- free(lname);
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
- if (!base)
- return;
+ free(lname);
- dso__set_short_name(dso, base, true);
+ if (!base)
+ return;
+ }
+ dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index bb417c54c25a..6e3f63781e51 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -14,6 +14,7 @@
struct machine;
struct map;
+struct perf_env;
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -35,6 +36,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BPF_PROG_INFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -189,6 +191,12 @@ struct dso {
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
} data;
+ /* bpf prog information */
+ struct {
+ u32 id;
+ u32 sub_id;
+ struct perf_env *env;
+ } bpf_prog;
union { /* Tool specific area */
void *priv;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..c6351b557bb0 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,163 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
+#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
+#include <bpf/libbpf.h>
struct perf_env perf_env;
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
+{
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id) {
+ p = &(*p)->rb_left;
+ } else if (prog_id > node->info_linear->info.id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id)
+{
+ struct bpf_prog_info_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.infos.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id)
+ n = n->rb_left;
+ else if (prog_id > node->info_linear->info.id)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct btf_node, rb_node);
+ if (btf_id < node->id) {
+ p = &(*p)->rb_left;
+ } else if (btf_id > node->id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct btf_node, rb_node);
+ if (btf_id < node->id)
+ n = n->rb_left;
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_write(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.infos_cnt = 0;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.btfs_cnt = 0;
+
+ up_write(&env->bpf_progs.lock);
+}
+
void perf_env__exit(struct perf_env *env)
{
int i;
+ perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -38,6 +186,13 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
+void perf_env__init(struct perf_env *env)
+{
+ env->bpf_progs.infos = RB_ROOT;
+ env->bpf_progs.btfs = RB_ROOT;
+ init_rwsem(&env->bpf_progs.lock);
+}
+
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..4f8e2b485c01 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
+#include <linux/rbtree.h>
#include "cpumap.h"
+#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
+
+ /*
+ * bpf_info_lock protects bpf rbtrees. This is needed because the
+ * trees are accessed by different threads in perf-top
+ */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root infos;
+ u32 infos_cnt;
+ struct rb_root btfs;
+ u32 btfs_cnt;
+ } bpf_progs;
};
+struct bpf_prog_info_node;
+struct btf_node;
+
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ed20f4379956..ec78e93085de 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -19,6 +19,7 @@
#include "debug.h"
#include "units.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include <signal.h>
#include <unistd.h>
@@ -1856,3 +1857,121 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
}
return leader;
}
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data)
+{
+ struct perf_evsel *evsel;
+ bool new_evlist = (*evlist) == NULL;
+
+ if (*evlist == NULL)
+ *evlist = perf_evlist__new();
+ if (*evlist == NULL)
+ return -1;
+
+ if (!attr->sample_id_all) {
+ pr_warning("enabling sample_id_all for all side band events\n");
+ attr->sample_id_all = 1;
+ }
+
+ evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+ if (!evsel)
+ goto out_err;
+
+ evsel->side_band.cb = cb;
+ evsel->side_band.data = data;
+ perf_evlist__add(*evlist, evsel);
+ return 0;
+
+out_err:
+ if (new_evlist) {
+ perf_evlist__delete(*evlist);
+ *evlist = NULL;
+ }
+ return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+ struct perf_evlist *evlist = arg;
+ bool draining = false;
+ int i;
+
+ while (draining || !(evlist->thread.done)) {
+ if (draining)
+ draining = false;
+ else if (evlist->thread.done)
+ draining = true;
+
+ if (!draining)
+ perf_evlist__poll(evlist, 1000);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &evlist->mmap[i];
+ union perf_event *event;
+
+ if (perf_mmap__read_init(map))
+ continue;
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (evsel && evsel->side_band.cb)
+ evsel->side_band.cb(event, evsel->side_band.data);
+ else
+ pr_warning("cannot locate proper evsel for the side band event\n");
+
+ perf_mmap__consume(map);
+ }
+ perf_mmap__read_done(map);
+ }
+ }
+ return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target)
+{
+ struct perf_evsel *counter;
+
+ if (!evlist)
+ return 0;
+
+ if (perf_evlist__create_maps(evlist, target))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__open(counter, evlist->cpus,
+ evlist->threads) < 0)
+ goto out_delete_evlist;
+ }
+
+ if (perf_evlist__mmap(evlist, UINT_MAX))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__enable(counter))
+ goto out_delete_evlist;
+ }
+
+ evlist->thread.done = 0;
+ if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+ goto out_delete_evlist;
+
+ return 0;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+ if (!evlist)
+ return;
+ evlist->thread.done = 1;
+ pthread_join(evlist->thread.th, NULL);
+ perf_evlist__delete(evlist);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 744906dd4887..dcb68f34d2cd 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,10 @@ struct perf_evlist {
struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
+ struct {
+ pthread_t th;
+ volatile int done;
+ } thread;
};
struct perf_evsel_str_handler {
@@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
int perf_evlist__add_dummy(struct perf_evlist *evlist);
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 3bbf73e979c0..7835e05f0c0a 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1036,7 +1036,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
attr->ksymbol = track && !perf_missing_features.ksymbol;
- attr->bpf_event = track && opts->bpf_event &&
+ attr->bpf_event = track && !opts->no_bpf_event &&
!perf_missing_features.bpf_event;
if (opts->record_namespaces)
@@ -1292,6 +1292,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
+ perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
@@ -1342,10 +1343,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
count->val = 0;
} else if (count->run < count->ena) {
scaled = 1;
- count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+ count->val = (u64)((double) count->val * count->ena / count->run);
}
- } else
- count->ena = count->run = 0;
+ }
if (pscaled)
*pscaled = scaled;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index cc578e02e08f..0f2c6c93d721 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -73,6 +73,8 @@ struct perf_evsel_config_term {
struct perf_stat_evsel;
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -151,6 +153,10 @@ struct perf_evsel {
bool collect_stat;
bool weak_group;
const char *pmu_name;
+ struct {
+ perf_evsel__sb_cb_t *cb;
+ void *data;
+ } side_band;
};
union u64_swap {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b0683bf4d9f3..b9e693825873 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -18,6 +18,7 @@
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
+#include <bpf/libbpf.h>
#include "evlist.h"
#include "evsel.h"
@@ -40,6 +41,7 @@
#include "time-utils.h"
#include "units.h"
#include "cputopo.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
@@ -876,6 +878,89 @@ static int write_dir_format(struct feat_fd *ff,
return do_write(ff, &data->dir.version, sizeof(data->dir.version));
}
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.infos_cnt,
+ sizeof(env->bpf_progs.infos_cnt));
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+ while (next) {
+ struct bpf_prog_info_node *node;
+ size_t len;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ len = sizeof(struct bpf_prog_info_linear) +
+ node->info_linear->data_len;
+
+ /* before writing to file, translate address to offset */
+ bpf_program__bpil_addr_to_offs(node->info_linear);
+ ret = do_write(ff, node->info_linear, len);
+ /*
+ * translate back to address even when do_write() fails,
+ * so that this function never changes the data.
+ */
+ bpf_program__bpil_offs_to_addr(node->info_linear);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+ sizeof(env->bpf_progs.btfs_cnt));
+
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ ret = do_write(ff, &node->id,
+ sizeof(u32) * 2 + node->data_size);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1367,6 +1452,52 @@ static void print_dir_format(struct feat_fd *ff, FILE *fp)
fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
}
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+ bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ fprintf(fp, "# btf info of id %u\n", node->id);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
static void free_event_desc(struct perf_evsel *events)
{
struct perf_evsel *evsel;
@@ -2414,6 +2545,124 @@ static int process_dir_format(struct feat_fd *ff,
return do_read_u64(ff, &data->dir.version);
}
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 info_len, data_len;
+
+ info_linear = NULL;
+ info_node = NULL;
+ if (do_read_u32(ff, &info_len))
+ goto out;
+ if (do_read_u32(ff, &data_len))
+ goto out;
+
+ if (info_len > sizeof(struct bpf_prog_info)) {
+ pr_warning("detected invalid bpf_prog_info\n");
+ goto out;
+ }
+
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+ data_len);
+ if (!info_linear)
+ goto out;
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+ if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+ goto out;
+ if (__do_read(ff, &info_linear->info, info_len))
+ goto out;
+ if (info_len < sizeof(struct bpf_prog_info))
+ memset(((void *)(&info_linear->info)) + info_len, 0,
+ sizeof(struct bpf_prog_info) - info_len);
+
+ if (__do_read(ff, info_linear->data, data_len))
+ goto out;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node)
+ goto out;
+
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ return 0;
+out:
+ free(info_linear);
+ free(info_node);
+ up_write(&env->bpf_progs.lock);
+ return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ struct btf_node *node;
+ u32 id, data_size;
+
+ if (do_read_u32(ff, &id))
+ return -1;
+ if (do_read_u32(ff, &data_size))
+ return -1;
+
+ node = malloc(sizeof(struct btf_node) + data_size);
+ if (!node)
+ return -1;
+
+ node->id = id;
+ node->data_size = data_size;
+
+ if (__do_read(ff, node->data, data_size)) {
+ free(node);
+ return -1;
+ }
+
+ perf_env__insert_btf(env, node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2474,7 +2723,9 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
FEAT_OPR(CLOCKID, clockid, false),
- FEAT_OPN(DIR_FORMAT, dir_format, false)
+ FEAT_OPN(DIR_FORMAT, dir_format, false),
+ FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
+ FEAT_OPR(BPF_BTF, bpf_btf, false),
};
struct header_print_data {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 6a231340238d..386da49e1bfa 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -40,6 +40,8 @@ enum {
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
HEADER_DIR_FORMAT,
+ HEADER_BPF_PROG_INFO,
+ HEADER_BPF_BTF,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 1f230285d78a..7ace7a10054d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1111,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
- if (err)
+ if (err) {
+ map__put(alm);
return err;
+ }
err = iter->ops->prepare_entry(iter, al);
if (err)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index fbeb0c6efaa6..e32628cd20a7 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -577,10 +577,25 @@ static void __maps__purge(struct maps *maps)
}
}
+static void __maps__purge_names(struct maps *maps)
+{
+ struct rb_root *root = &maps->names;
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+ next = rb_next(&pos->rb_node_name);
+ rb_erase_init(&pos->rb_node_name, root);
+ map__put(pos);
+ }
+}
+
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
+ __maps__purge_names(maps);
up_write(&maps->lock);
}
@@ -917,6 +932,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
+
+ rb_erase_init(&map->rb_node_name, &maps->names);
+ map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index ea523d3b248f..989fed6f43b5 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
"FINAL",
"ROUND",
"HALF ",
+ "TOP ",
+ "TIME ",
};
int err;
bool show_progress = false;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4dcc01b2532c..5ef4939408f2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
perf_evsel__delete(evsel);
}
+ thread_map__put(tmap);
return ret;
}
@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
+ free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0ec34227bd60..b17f1c9bc965 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
+ perf_env__init(&session->header.env);
if (data) {
if (perf_data__open(data))
goto out_delete;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index bdd30cab51cb..5d2518e89fc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -13,6 +13,7 @@
#include "evsel.h"
#include "evlist.h"
#include "strlist.h"
+#include "strbuf.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
@@ -3107,3 +3108,54 @@ void reset_output_field(void)
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+ if (*llen >= 75) {
+ strbuf_addstr(sb, "\n\t\t\t ");
+ *llen = INDENT;
+ }
+ strbuf_addf(sb, " %s", str);
+ *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+ struct strbuf sb;
+ char *s;
+ int len = strlen(prefix) + INDENT;
+
+ strbuf_init(&sb, 300);
+ strbuf_addstr(&sb, prefix);
+ add_hpp_sort_string(&sb, hpp_sort_dimensions,
+ ARRAY_SIZE(hpp_sort_dimensions), &len);
+ add_sort_string(&sb, common_sort_dimensions,
+ ARRAY_SIZE(common_sort_dimensions), &len);
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
+ s = strbuf_detach(&sb, NULL);
+ strbuf_release(&sb);
+ return s;
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index bb9442ab7a0c..ce376a73f964 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -296,6 +296,8 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
+const char *sort_help(const char *prefix);
+
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
bool is_strict_order(const char *order);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d40515307b8..2856cc9d5a31 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
break;
case AGGR_GLOBAL:
aggr->val += count->val;
- if (config->scale) {
- aggr->ena += count->ena;
- aggr->run += count->run;
- }
+ aggr->ena += count->ena;
+ aggr->run += count->run;
case AGGR_UNSET:
default:
break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
- if (config->scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 58442ca5e3c4..5cbad55cd99d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1455,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;