summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_skel/bperf_cgroup.bpf.c')
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c62
1 files changed, 48 insertions, 14 deletions
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 292c430768b5..c2298a2decc9 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -1,14 +1,12 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
// Copyright (c) 2021 Google
+#include "bperf_cgroup.h"
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary
-#define MAX_EVENTS 32 // max events per cgroup: arbitrary
-
// NOTE: many of map and global data will be modified before loading
// from the userspace (perf tool) using the skeleton helpers.
@@ -43,11 +41,39 @@ struct {
__uint(value_size, sizeof(struct bpf_perf_event_value));
} cgrp_readings SEC(".maps");
+/* new kernel cgroup definition */
+struct cgroup___new {
+ int level;
+ struct cgroup *ancestors[];
+} __attribute__((preserve_access_index));
+
+/* old kernel cgroup definition */
+struct cgroup___old {
+ int level;
+ u64 ancestor_ids[];
+} __attribute__((preserve_access_index));
+
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
+const volatile int use_cgroup_v2 = 0;
int enabled = 0;
-int use_cgroup_v2 = 0;
+int perf_subsys_id = -1;
+
+static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
+{
+ /* recast pointer to capture new type for compiler */
+ struct cgroup___new *cgrp_new = (void *)cgrp;
+
+ if (bpf_core_field_exists(cgrp_new->ancestors)) {
+ return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
+ } else {
+ /* recast pointer to capture old type for compiler */
+ struct cgroup___old *cgrp_old = (void *)cgrp;
+
+ return BPF_CORE_READ(cgrp_old, ancestor_ids[level]);
+ }
+}
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
@@ -58,17 +84,25 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
int level;
int cnt;
- cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
- for (cnt = 0; i < MAX_LEVELS; i++) {
+ for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
__u64 cgrp_id;
if (i > level)
break;
// convert cgroup-id to a map index
- cgrp_id = BPF_CORE_READ(cgrp, ancestor_ids[i]);
+ cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
if (!elem)
continue;
@@ -87,7 +121,7 @@ static inline int get_cgroup_v2_idx(__u32 *cgrps, int size)
__u32 *elem;
int cnt;
- for (cnt = 0; i < MAX_LEVELS; i++) {
+ for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
__u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
if (cgrp_id == 0)
@@ -112,17 +146,17 @@ static int bperf_cgroup_count(void)
register int c = 0;
struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
__u32 cpu = bpf_get_smp_processor_id();
- __u32 cgrp_idx[MAX_LEVELS];
+ __u32 cgrp_idx[BPERF_CGROUP__MAX_LEVELS];
int cgrp_cnt;
__u32 key, cgrp;
long err;
if (use_cgroup_v2)
- cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS);
+ cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
else
- cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS);
+ cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
- for ( ; idx < MAX_EVENTS; idx++) {
+ for ( ; idx < BPERF_CGROUP__MAX_EVENTS; idx++) {
if (idx == num_events)
break;
@@ -150,7 +184,7 @@ static int bperf_cgroup_count(void)
delta.enabled = val.enabled - prev_val->enabled;
delta.running = val.running - prev_val->running;
- for (c = 0; c < MAX_LEVELS; c++) {
+ for (c = 0; c < BPERF_CGROUP__MAX_LEVELS; c++) {
if (c == cgrp_cnt)
break;
@@ -176,7 +210,7 @@ static int bperf_cgroup_count(void)
}
// This will be attached to cgroup-switches event for each cpu
-SEC("perf_events")
+SEC("perf_event")
int BPF_PROG(on_cgrp_switch)
{
return bperf_cgroup_count();