summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_ftrace.c')
-rw-r--r--tools/perf/util/bpf_ftrace.c114
1 files changed, 87 insertions, 27 deletions
diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c
index 7a4297d8fd2c..c456d24efa30 100644
--- a/tools/perf/util/bpf_ftrace.c
+++ b/tools/perf/util/bpf_ftrace.c
@@ -1,8 +1,10 @@
-#include <stdio.h>
+#include <errno.h>
#include <fcntl.h>
#include <stdint.h>
+#include <stdio.h>
#include <stdlib.h>
+#include <bpf/bpf.h>
#include <linux/err.h>
#include "util/ftrace.h"
@@ -11,6 +13,7 @@
#include "util/debug.h"
#include "util/evlist.h"
#include "util/bpf_counter.h"
+#include "util/stat.h"
#include "util/bpf_skel/func_latency.skel.h"
@@ -20,15 +23,26 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
{
int fd, err;
int i, ncpus = 1, ntasks = 1;
- struct filter_entry *func;
+ struct filter_entry *func = NULL;
- if (!list_is_singular(&ftrace->filters)) {
- pr_err("ERROR: %s target function(s).\n",
- list_empty(&ftrace->filters) ? "No" : "Too many");
- return -1;
- }
+ if (!list_empty(&ftrace->filters)) {
+ if (!list_is_singular(&ftrace->filters)) {
+ pr_err("ERROR: Too many target functions.\n");
+ return -1;
+ }
+ func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ } else {
+ int count = 0;
+ struct list_head *pos;
- func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ list_for_each(pos, &ftrace->event_pair)
+ count++;
+
+ if (count != 2) {
+ pr_err("ERROR: Needs two target events.\n");
+ return -1;
+ }
+ }
skel = func_latency_bpf__open();
if (!skel) {
@@ -36,17 +50,28 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
return -1;
}
+ skel->rodata->bucket_range = ftrace->bucket_range;
+ skel->rodata->min_latency = ftrace->min_latency;
+ skel->rodata->bucket_num = ftrace->bucket_num;
+ if (ftrace->bucket_range && ftrace->bucket_num) {
+ bpf_map__set_max_entries(skel->maps.latency, ftrace->bucket_num);
+ }
+
/* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ skel->rodata->has_cpu = 1;
}
if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ skel->rodata->has_task = 1;
}
+ skel->rodata->use_nsec = ftrace->use_nsec;
+
set_max_rlimit();
err = func_latency_bpf__load(skel);
@@ -59,7 +84,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
u32 cpu;
u8 val = 1;
- skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
@@ -72,7 +96,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
u32 pid;
u8 val = 1;
- skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
@@ -81,22 +104,46 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
}
}
- skel->bss->use_nsec = ftrace->use_nsec;
+ skel->bss->min = INT64_MAX;
- skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
- false, func->name);
- if (IS_ERR(skel->links.func_begin)) {
- pr_err("Failed to attach fentry program\n");
- err = PTR_ERR(skel->links.func_begin);
- goto out;
- }
+ if (func) {
+ skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
+ false, func->name);
+ if (IS_ERR(skel->links.func_begin)) {
+ pr_err("Failed to attach fentry program\n");
+ err = PTR_ERR(skel->links.func_begin);
+ goto out;
+ }
- skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
- true, func->name);
- if (IS_ERR(skel->links.func_end)) {
- pr_err("Failed to attach fexit program\n");
- err = PTR_ERR(skel->links.func_end);
- goto out;
+ skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
+ true, func->name);
+ if (IS_ERR(skel->links.func_end)) {
+ pr_err("Failed to attach fexit program\n");
+ err = PTR_ERR(skel->links.func_end);
+ goto out;
+ }
+ } else {
+ struct filter_entry *event;
+
+ event = list_first_entry(&ftrace->event_pair, struct filter_entry, list);
+
+ skel->links.event_begin = bpf_program__attach_raw_tracepoint(skel->progs.event_begin,
+ event->name);
+ if (IS_ERR(skel->links.event_begin)) {
+ pr_err("Failed to attach first tracepoint program\n");
+ err = PTR_ERR(skel->links.event_begin);
+ goto out;
+ }
+
+ event = list_next_entry(event, list);
+
+ skel->links.event_end = bpf_program__attach_raw_tracepoint(skel->progs.event_end,
+ event->name);
+ if (IS_ERR(skel->links.event_end)) {
+ pr_err("Failed to attach second tracepoint program\n");
+ err = PTR_ERR(skel->links.event_end);
+ goto out;
+ }
}
/* XXX: we don't actually use this fd - just for poll() */
@@ -118,8 +165,8 @@ int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
return 0;
}
-int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
- int buckets[])
+int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
+ int buckets[], struct stats *stats)
{
int i, fd, err;
u32 idx;
@@ -132,7 +179,7 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
if (hist == NULL)
return -ENOMEM;
- for (idx = 0; idx < NUM_BUCKET; idx++) {
+ for (idx = 0; idx < skel->rodata->bucket_num; idx++) {
err = bpf_map_lookup_elem(fd, &idx, hist);
if (err) {
buckets[idx] = 0;
@@ -143,6 +190,19 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
buckets[idx] += hist[i];
}
+ if (skel->bss->count) {
+ stats->mean = skel->bss->total / skel->bss->count;
+ stats->n = skel->bss->count;
+ stats->max = skel->bss->max;
+ stats->min = skel->bss->min;
+
+ if (!ftrace->use_nsec) {
+ stats->mean /= 1000;
+ stats->max /= 1000;
+ stats->min /= 1000;
+ }
+ }
+
free(hist);
return 0;
}