1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
|
#include <bpf/libbpf.h>
#include <internal/xyarray.h>
#include "util/debug.h"
#include "util/evlist.h"
#include "util/trace_augment.h"
#include "bpf_skel/augmented_raw_syscalls.skel.h"
static struct augmented_raw_syscalls_bpf *skel;
static struct evsel *bpf_output;
int augmented_syscalls__prepare(void)
{
struct bpf_program *prog;
char buf[128];
int err;
skel = augmented_raw_syscalls_bpf__open();
if (!skel) {
pr_debug("Failed to open augmented syscalls BPF skeleton\n");
return -errno;
}
/*
* Disable attaching the BPF programs except for sys_enter and
* sys_exit that tail call into this as necessary.
*/
bpf_object__for_each_program(prog, skel->obj) {
if (prog != skel->progs.sys_enter && prog != skel->progs.sys_exit)
bpf_program__set_autoattach(prog, /*autoattach=*/false);
}
err = augmented_raw_syscalls_bpf__load(skel);
if (err < 0) {
libbpf_strerror(err, buf, sizeof(buf));
pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", buf);
return err;
}
augmented_raw_syscalls_bpf__attach(skel);
return 0;
}
int augmented_syscalls__create_bpf_output(struct evlist *evlist)
{
int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
if (err) {
pr_err("ERROR: Setup BPF output event failed: %d\n", err);
return err;
}
bpf_output = evlist__last(evlist);
assert(evsel__name_is(bpf_output, "__augmented_syscalls__"));
return 0;
}
void augmented_syscalls__setup_bpf_output(void)
{
struct perf_cpu cpu;
int i;
if (bpf_output == NULL)
return;
/*
* Set up the __augmented_syscalls__ BPF map to hold for each
* CPU the bpf-output event's file descriptor.
*/
perf_cpu_map__for_each_cpu(cpu, i, bpf_output->core.cpus) {
int mycpu = cpu.cpu;
bpf_map__update_elem(skel->maps.__augmented_syscalls__,
&mycpu, sizeof(mycpu),
xyarray__entry(bpf_output->core.fd,
mycpu, 0),
sizeof(__u32), BPF_ANY);
}
}
int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids)
{
bool value = true;
int err = 0;
if (skel == NULL)
return 0;
for (size_t i = 0; i < nr; ++i) {
err = bpf_map__update_elem(skel->maps.pids_filtered, &pids[i],
sizeof(*pids), &value, sizeof(value),
BPF_ANY);
if (err)
break;
}
return err;
}
int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd)
{
if (skel == NULL)
return -1;
*enter_fd = bpf_map__fd(skel->maps.syscalls_sys_enter);
*exit_fd = bpf_map__fd(skel->maps.syscalls_sys_exit);
*beauty_fd = bpf_map__fd(skel->maps.beauty_map_enter);
if (*enter_fd < 0 || *exit_fd < 0 || *beauty_fd < 0) {
pr_err("Error: failed to get syscall or beauty map fd\n");
return -1;
}
return 0;
}
struct bpf_program *augmented_syscalls__unaugmented(void)
{
return skel->progs.syscall_unaugmented;
}
struct bpf_program *augmented_syscalls__find_by_title(const char *name)
{
struct bpf_program *pos;
const char *sec_name;
if (skel->obj == NULL)
return NULL;
bpf_object__for_each_program(pos, skel->obj) {
sec_name = bpf_program__section_name(pos);
if (sec_name && !strcmp(sec_name, name))
return pos;
}
return NULL;
}
void augmented_syscalls__cleanup(void)
{
augmented_raw_syscalls_bpf__destroy(skel);
}
|