summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@redhat.com>2020-03-29 15:22:52 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-03-30 01:17:05 +0200
commite2842be53d4f31962a9992eab39391cdf637fa2e (patch)
tree9c448a2ad2a0e7dc392015e97a0fbd247dd6a7b6 /tools/lib/bpf/libbpf.c
parent5a95cbb80ef8d8f2db29ab10777cd4742e6fc8ec (diff)
libbpf: Add setter for initial value for internal maps
For internal maps (most notably the maps backing global variables), libbpf uses an internal mmaped area to store the data after opening the object. This data is subsequently copied into the kernel map when the object is loaded. This adds a function to set a new value for that data, which can be used to before it is loaded into the kernel. This is especially relevant for RODATA maps, since those are frozen on load. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Link: https://lore.kernel.org/bpf/20200329132253.232541-1-toke@redhat.com
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 62903302935e..7deab98720ee 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -6758,6 +6758,17 @@ void *bpf_map__priv(const struct bpf_map *map)
return map ? map->priv : ERR_PTR(-EINVAL);
}
+int bpf_map__set_initial_value(struct bpf_map *map,
+ const void *data, size_t size)
+{
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
+ size != map->def.value_size || map->fd >= 0)
+ return -EINVAL;
+
+ memcpy(map->mmaped, data, size);
+ return 0;
+}
+
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;