summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/bpf.c')
-rw-r--r--tools/lib/bpf/bpf.c296
1 files changed, 246 insertions, 50 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index ed86b37d8024..b66f5fbfbbb2 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -103,9 +103,9 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
*/
-int probe_memcg_account(void)
+int probe_memcg_account(int token_fd)
{
- const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
struct bpf_insn insns[] = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(),
@@ -120,6 +120,9 @@ int probe_memcg_account(void)
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
if (prog_fd >= 0) {
@@ -146,12 +149,12 @@ int bump_rlimit_memlock(void)
struct rlimit rlim;
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
- if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
memlock_bumped = true;
- /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
+ /* zero memlock_rlim disables auto-bumping RLIMIT_MEMLOCK */
if (memlock_rlim == 0)
return 0;
@@ -169,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
union bpf_attr attr;
int fd;
@@ -181,7 +184,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL);
attr.map_type = map_type;
- if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -191,6 +194,7 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
attr.map_flags = OPTS_GET(opts, map_flags, 0);
@@ -198,6 +202,10 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.numa_node = OPTS_GET(opts, numa_node, 0);
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
+ attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+ attr.excl_prog_hash = ptr_to_u64(OPTS_GET(opts, excl_prog_hash, NULL));
+ attr.excl_prog_hash_size = OPTS_GET(opts, excl_prog_hash_size, 0);
+
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
@@ -232,7 +240,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, keyring_id);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -261,8 +269,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0);
+ attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
- if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license);
@@ -304,6 +313,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
+ attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0);
if (log_level) {
attr.log_buf = ptr_to_u64(log_buf);
@@ -629,55 +639,89 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
}
-int bpf_prog_attach_opts(int prog_fd, int target_fd,
- enum bpf_attach_type type,
- const struct bpf_prog_attach_opts *opts)
+int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type,
+ const struct bpf_prog_attach_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
+ __u32 relative_id, flags;
+ int ret, relative_fd;
union bpf_attr attr;
- int ret;
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
return libbpf_err(-EINVAL);
+ relative_id = OPTS_GET(opts, relative_id, 0);
+ relative_fd = OPTS_GET(opts, relative_fd, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ /* validate we don't have unexpected combinations of non-zero fields */
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
- attr.target_fd = target_fd;
- attr.attach_bpf_fd = prog_fd;
- attr.attach_type = type;
- attr.attach_flags = OPTS_GET(opts, flags, 0);
- attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
+ attr.target_fd = target;
+ attr.attach_bpf_fd = prog_fd;
+ attr.attach_type = type;
+ attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0);
+ attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
+
+ if (relative_id) {
+ attr.attach_flags = flags | BPF_F_ID;
+ attr.relative_id = relative_id;
+ } else {
+ attr.attach_flags = flags;
+ attr.relative_fd = relative_fd;
+ }
ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
return libbpf_err_errno(ret);
}
-int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
+int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type,
+ const struct bpf_prog_detach_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
+ __u32 relative_id, flags;
+ int ret, relative_fd;
union bpf_attr attr;
- int ret;
+
+ if (!OPTS_VALID(opts, bpf_prog_detach_opts))
+ return libbpf_err(-EINVAL);
+
+ relative_id = OPTS_GET(opts, relative_id, 0);
+ relative_fd = OPTS_GET(opts, relative_fd, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ /* validate we don't have unexpected combinations of non-zero fields */
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
memset(&attr, 0, attr_sz);
- attr.target_fd = target_fd;
- attr.attach_type = type;
+ attr.target_fd = target;
+ attr.attach_bpf_fd = prog_fd;
+ attr.attach_type = type;
+ attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
+
+ if (relative_id) {
+ attr.attach_flags = flags | BPF_F_ID;
+ attr.relative_id = relative_id;
+ } else {
+ attr.attach_flags = flags;
+ attr.relative_fd = relative_fd;
+ }
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
return libbpf_err_errno(ret);
}
-int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
+int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
- const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
- union bpf_attr attr;
- int ret;
-
- memset(&attr, 0, attr_sz);
- attr.target_fd = target_fd;
- attr.attach_bpf_fd = prog_fd;
- attr.attach_type = type;
+ return bpf_prog_detach_opts(0, target_fd, type, NULL);
+}
- ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
- return libbpf_err_errno(ret);
+int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
+{
+ return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL);
}
int bpf_link_create(int prog_fd, int target_fd,
@@ -685,9 +729,9 @@ int bpf_link_create(int prog_fd, int target_fd,
const struct bpf_link_create_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, link_create);
- __u32 target_btf_id, iter_info_len;
+ __u32 target_btf_id, iter_info_len, relative_id;
+ int fd, err, relative_fd;
union bpf_attr attr;
- int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return libbpf_err(-EINVAL);
@@ -725,6 +769,7 @@ int bpf_link_create(int prog_fd, int target_fd,
return libbpf_err(-EINVAL);
break;
case BPF_TRACE_KPROBE_MULTI:
+ case BPF_TRACE_KPROBE_SESSION:
attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
@@ -733,6 +778,19 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, kprobe_multi))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_UPROBE_MULTI:
+ case BPF_TRACE_UPROBE_SESSION:
+ attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
+ attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
+ attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
+ attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0));
+ attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0));
+ attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0));
+ attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0);
+ if (!OPTS_ZEROED(opts, uprobe_multi))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_TRACE_RAW_TP:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
case BPF_MODIFY_RETURN:
@@ -741,6 +799,90 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, tracing))
return libbpf_err(-EINVAL);
break;
+ case BPF_NETFILTER:
+ attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0);
+ attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0);
+ attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0);
+ attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0);
+ if (!OPTS_ZEROED(opts, netfilter))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_TCX_INGRESS:
+ case BPF_TCX_EGRESS:
+ relative_fd = OPTS_GET(opts, tcx.relative_fd, 0);
+ relative_id = OPTS_GET(opts, tcx.relative_id, 0);
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+ if (relative_id) {
+ attr.link_create.tcx.relative_id = relative_id;
+ attr.link_create.flags |= BPF_F_ID;
+ } else {
+ attr.link_create.tcx.relative_fd = relative_fd;
+ }
+ attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0);
+ if (!OPTS_ZEROED(opts, tcx))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_NETKIT_PRIMARY:
+ case BPF_NETKIT_PEER:
+ relative_fd = OPTS_GET(opts, netkit.relative_fd, 0);
+ relative_id = OPTS_GET(opts, netkit.relative_id, 0);
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+ if (relative_id) {
+ attr.link_create.netkit.relative_id = relative_id;
+ attr.link_create.flags |= BPF_F_ID;
+ } else {
+ attr.link_create.netkit.relative_fd = relative_fd;
+ }
+ attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0);
+ if (!OPTS_ZEROED(opts, netkit))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_CGROUP_INET_INGRESS:
+ case BPF_CGROUP_INET_EGRESS:
+ case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET_SOCK_RELEASE:
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ case BPF_CGROUP_UNIX_CONNECT:
+ case BPF_CGROUP_INET4_GETPEERNAME:
+ case BPF_CGROUP_INET6_GETPEERNAME:
+ case BPF_CGROUP_UNIX_GETPEERNAME:
+ case BPF_CGROUP_INET4_GETSOCKNAME:
+ case BPF_CGROUP_INET6_GETSOCKNAME:
+ case BPF_CGROUP_UNIX_GETSOCKNAME:
+ case BPF_CGROUP_UDP4_SENDMSG:
+ case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UNIX_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
+ case BPF_CGROUP_UNIX_RECVMSG:
+ case BPF_CGROUP_SOCK_OPS:
+ case BPF_CGROUP_DEVICE:
+ case BPF_CGROUP_SYSCTL:
+ case BPF_CGROUP_GETSOCKOPT:
+ case BPF_CGROUP_SETSOCKOPT:
+ case BPF_LSM_CGROUP:
+ relative_fd = OPTS_GET(opts, cgroup.relative_fd, 0);
+ relative_id = OPTS_GET(opts, cgroup.relative_id, 0);
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+ if (relative_id) {
+ attr.link_create.cgroup.relative_id = relative_id;
+ attr.link_create.flags |= BPF_F_ID;
+ } else {
+ attr.link_create.cgroup.relative_fd = relative_fd;
+ }
+ attr.link_create.cgroup.expected_revision =
+ OPTS_GET(opts, cgroup.expected_revision, 0);
+ if (!OPTS_ZEROED(opts, cgroup))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -833,8 +975,7 @@ int bpf_iter_create(int link_fd)
return libbpf_err_errno(fd);
}
-int bpf_prog_query_opts(int target_fd,
- enum bpf_attach_type type,
+int bpf_prog_query_opts(int target, enum bpf_attach_type type,
struct bpf_prog_query_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, query);
@@ -845,18 +986,20 @@ int bpf_prog_query_opts(int target_fd,
return libbpf_err(-EINVAL);
memset(&attr, 0, attr_sz);
-
- attr.query.target_fd = target_fd;
- attr.query.attach_type = type;
- attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
- attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
- attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
- attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
+ attr.query.target_fd = target;
+ attr.query.attach_type = type;
+ attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
+ attr.query.count = OPTS_GET(opts, count, 0);
+ attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
+ attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL));
+ attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
+ attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL));
ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
OPTS_SET(opts, attach_flags, attr.query.attach_flags);
- OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
+ OPTS_SET(opts, revision, attr.query.revision);
+ OPTS_SET(opts, count, attr.query.count);
return libbpf_err_errno(ret);
}
@@ -1000,7 +1143,7 @@ int bpf_map_get_fd_by_id(__u32 id)
int bpf_btf_get_fd_by_id_opts(__u32 id,
const struct bpf_get_fd_by_id_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
+ const size_t attr_sz = offsetofend(union bpf_attr, fd_by_id_token_fd);
union bpf_attr attr;
int fd;
@@ -1010,6 +1153,7 @@ int bpf_btf_get_fd_by_id_opts(__u32 id,
memset(&attr, 0, attr_sz);
attr.btf_id = id;
attr.open_flags = OPTS_GET(opts, open_flags, 0);
+ attr.fd_by_id_token_fd = OPTS_GET(opts, token_fd, 0);
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
@@ -1080,23 +1224,34 @@ int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info
return bpf_obj_get_info_by_fd(link_fd, info, info_len);
}
-int bpf_raw_tracepoint_open(const char *name, int prog_fd)
+int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_raw_tp_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
- attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
+ attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL));
+ attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0);
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
return libbpf_err_errno(fd);
}
+int bpf_raw_tracepoint_open(const char *name, int prog_fd)
+{
+ LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name);
+
+ return bpf_raw_tracepoint_open_opts(prog_fd, &opts);
+}
+
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
union bpf_attr attr;
char *log_buf;
size_t log_size;
@@ -1121,6 +1276,10 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
attr.btf = ptr_to_u64(btf_data);
attr.btf_size = btf_size;
+
+ attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
+ attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
+
/* log_level == 0 and log_buf != NULL means "try loading without
* log_buf, but retry with log_buf and log_level=1 on error", which is
* consistent across low-level and high-level BTF and program loading
@@ -1201,3 +1360,40 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
return libbpf_err_errno(ret);
}
+
+int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, token_create);
+ union bpf_attr attr;
+ int fd;
+
+ if (!OPTS_VALID(opts, bpf_token_create_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.token_create.bpffs_fd = bpffs_fd;
+ attr.token_create.flags = OPTS_GET(opts, flags, 0);
+
+ fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
+
+int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len,
+ struct bpf_prog_stream_read_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_stream_read);
+ union bpf_attr attr;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_prog_stream_read_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_stream_read.stream_buf = ptr_to_u64(buf);
+ attr.prog_stream_read.stream_buf_len = buf_len;
+ attr.prog_stream_read.stream_id = stream_id;
+ attr.prog_stream_read.prog_fd = prog_fd;
+
+ err = sys_bpf(BPF_PROG_STREAM_READ_BY_FD, &attr, attr_sz);
+ return libbpf_err_errno(err);
+}