summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_counter_cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_counter_cgroup.c')
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c149
1 files changed, 86 insertions, 63 deletions
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 631e34a0b66f..17d7196c6589 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -4,6 +4,7 @@
/* Copyright (c) 2021 Google */
#include <assert.h>
+#include <errno.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
@@ -13,6 +14,7 @@
#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <api/fs/fs.h>
+#include <bpf/bpf.h>
#include <perf/bpf_perf.h>
#include "affinity.h"
@@ -26,6 +28,7 @@
#include "cpumap.h"
#include "thread_map.h"
+#include "bpf_skel/bperf_cgroup.h"
#include "bpf_skel/bperf_cgroup.skel.h"
static struct perf_event_attr cgrp_switch_attr = {
@@ -41,40 +44,75 @@ static struct bperf_cgroup_bpf *skel;
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
+static void setup_rodata(struct bperf_cgroup_bpf *sk, int evlist_size)
+{
+ int map_size, total_cpus = cpu__max_cpu().cpu;
+
+ sk->rodata->num_cpus = total_cpus;
+ sk->rodata->num_events = evlist_size / nr_cgroups;
+
+ if (cgroup_is_v2("perf_event") > 0)
+ sk->rodata->use_cgroup_v2 = 1;
+
+ BUG_ON(evlist_size % nr_cgroups != 0);
+
+ /* we need one copy of events per cpu for reading */
+ map_size = total_cpus * evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.events, map_size);
+ bpf_map__set_max_entries(sk->maps.cgrp_idx, nr_cgroups);
+ /* previous result is saved in a per-cpu array */
+ map_size = evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.prev_readings, map_size);
+ /* cgroup result needs all events (per-cpu) */
+ map_size = evlist_size;
+ bpf_map__set_max_entries(sk->maps.cgrp_readings, map_size);
+}
+
+static void test_max_events_program_load(void)
+{
+#ifndef NDEBUG
+ /*
+ * Test that the program verifies with the maximum number of events. If
+ * this test fails unfortunately perf needs recompiling with a lower
+ * BPERF_CGROUP__MAX_EVENTS to avoid BPF verifier issues.
+ */
+ int err, max_events = BPERF_CGROUP__MAX_EVENTS * nr_cgroups;
+ struct bperf_cgroup_bpf *test_skel = bperf_cgroup_bpf__open();
+
+ if (!test_skel) {
+ pr_err("Failed to open cgroup skeleton\n");
+ return;
+ }
+ setup_rodata(test_skel, max_events);
+ err = bperf_cgroup_bpf__load(test_skel);
+ if (err) {
+ pr_err("Failed to load cgroup skeleton with max events %d.\n",
+ BPERF_CGROUP__MAX_EVENTS);
+ }
+ bperf_cgroup_bpf__destroy(test_skel);
+#endif
+}
+
static int bperf_load_program(struct evlist *evlist)
{
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
- __u32 i, cpu;
- __u32 nr_cpus = evlist->core.all_cpus->nr;
+ int i, j;
+ struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
- int map_size, map_fd;
- int prog_fd, err;
+ int map_fd, prog_fd, err;
+
+ set_max_rlimit();
+
+ test_max_events_program_load();
skel = bperf_cgroup_bpf__open();
if (!skel) {
pr_err("Failed to open cgroup skeleton\n");
return -1;
}
-
- skel->rodata->num_cpus = total_cpus;
- skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
-
- BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
-
- /* we need one copy of events per cpu for reading */
- map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.events, map_size);
- bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
- /* previous result is saved in a per-cpu array */
- map_size = evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
- /* cgroup result needs all events (per-cpu) */
- map_size = evlist->core.nr_entries;
- bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
-
- set_max_rlimit();
+ setup_rodata(skel, evlist->core.nr_entries);
err = bperf_cgroup_bpf__load(skel);
if (err) {
@@ -82,9 +120,6 @@ static int bperf_load_program(struct evlist *evlist)
goto out;
}
- if (cgroup_is_v2("perf_event") > 0)
- skel->bss->use_cgroup_v2 = 1;
-
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr);
@@ -93,7 +128,7 @@ static int bperf_load_program(struct evlist *evlist)
goto out;
}
- for (i = 0; i < nr_cpus; i++) {
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
FD(cgrp_switch, i));
if (IS_ERR(link)) {
@@ -115,29 +150,20 @@ static int bperf_load_program(struct evlist *evlist)
evsel->cgrp = NULL;
/* open single copy of the events w/o cgroup */
- err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
- if (err) {
- pr_err("Failed to open first cgroup events\n");
- goto out;
- }
+ err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
+ if (err == 0)
+ evsel->supported = true;
map_fd = bpf_map__fd(skel->maps.events);
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- int fd = FD(evsel, cpu);
- __u32 idx = evsel->core.idx * total_cpus +
- evlist->core.all_cpus->map[cpu].cpu;
-
- err = bpf_map_update_elem(map_fd, &idx, &fd,
- BPF_ANY);
- if (err < 0) {
- pr_err("Failed to update perf_event fd\n");
- goto out;
- }
+ perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+ int fd = FD(evsel, j);
+ __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
+
+ bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
}
evsel->cgrp = leader_cgrp;
}
- evsel->supported = true;
if (evsel->cgrp == cgrp)
continue;
@@ -145,9 +171,8 @@ static int bperf_load_program(struct evlist *evlist)
cgrp = evsel->cgrp;
if (read_cgroup_id(cgrp) < 0) {
- pr_err("Failed to get cgroup id\n");
- err = -1;
- goto out;
+ pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
+ cgrp->id = 0;
}
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
@@ -195,7 +220,8 @@ static int bperf_cgrp__load(struct evsel *evsel,
}
static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
- int cpu __maybe_unused, int fd __maybe_unused)
+ int cpu_map_idx __maybe_unused,
+ int fd __maybe_unused)
{
/* nothing to do */
return 0;
@@ -207,14 +233,12 @@ static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
*/
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
- int i, cpu;
- int nr_cpus = evlist->core.all_cpus->nr;
+ struct perf_cpu cpu;
+ int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
- for (i = 0; i < nr_cpus; i++) {
- cpu = evlist->core.all_cpus->map[i].cpu;
- bperf_trigger_reading(prog_fd, cpu);
- }
+ perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
+ bperf_trigger_reading(prog_fd, cpu.cpu);
return 0;
}
@@ -244,12 +268,10 @@ static int bperf_cgrp__disable(struct evsel *evsel)
static int bperf_cgrp__read(struct evsel *evsel)
{
struct evlist *evlist = evsel->evlist;
- int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
int total_cpus = cpu__max_cpu().cpu;
struct perf_counts_values *counts;
struct bpf_perf_event_value *values;
int reading_map_fd, err = 0;
- __u32 idx;
if (evsel->core.idx)
return 0;
@@ -263,21 +285,22 @@ static int bperf_cgrp__read(struct evsel *evsel)
reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
evlist__for_each_entry(evlist, evsel) {
- idx = evsel->core.idx;
+ __u32 idx = evsel->core.idx;
+ int i;
+ struct perf_cpu cpu;
+
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
- pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n",
+ pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
idx, evsel__name(evsel), evsel->cgrp->name);
goto out;
}
- for (i = 0; i < nr_cpus; i++) {
- cpu = evlist->core.all_cpus->map[i].cpu;
-
+ perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
counts = perf_counts(evsel->counts, i, 0);
- counts->val = values[cpu].counter;
- counts->ena = values[cpu].enabled;
- counts->run = values[cpu].running;
+ counts->val = values[cpu.cpu].counter;
+ counts->ena = values[cpu.cpu].enabled;
+ counts->run = values[cpu.cpu].running;
}
}