From 55b244221c3f17eb2ed51c8e39e4a01c523e4eee Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 10 Jul 2020 17:04:40 +0200 Subject: selftests/bpf: Fix cgroup sockopt verifier test Since the BPF_PROG_TYPE_CGROUP_SOCKOPT verifier test does not set an attach type, bpf_prog_load_check_attach() disallows loading the program and the test is always skipped: #434/p perfevent for cgroup sockopt SKIP (unsupported program type 25) Fix the issue by setting a valid attach type. Fixes: 0456ea170cd6 ("bpf: Enable more helpers for BPF_PROG_TYPE_CGROUP_{DEVICE,SYSCTL,SOCKOPT}") Signed-off-by: Jean-Philippe Brucker Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20200710150439.126627-1-jean-philippe@linaro.org --- tools/testing/selftests/bpf/verifier/event_output.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/bpf/verifier/event_output.c b/tools/testing/selftests/bpf/verifier/event_output.c index 99f8f582c02b..c5e805980409 100644 --- a/tools/testing/selftests/bpf/verifier/event_output.c +++ b/tools/testing/selftests/bpf/verifier/event_output.c @@ -112,6 +112,7 @@ "perfevent for cgroup sockopt", .insns = { __PERF_EVENT_INSNS__ }, .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, .fixup_map_event_output = { 4 }, .result = ACCEPT, .retval = 1, -- cgit From 5b801dfb7feb2738975d80223efc2fc193e55573 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Tue, 14 Jul 2020 14:09:04 -0400 Subject: bpf: Fix NULL pointer dereference in __btf_resolve_helper_id() Prevent __btf_resolve_helper_id() from dereferencing `btf_vmlinux` as NULL. This patch fixes the following syzbot bug: https://syzkaller.appspot.com/bug?id=f823224ada908fa5c207902a5a62065e53ca0fcc Reported-by: syzbot+ee09bda7017345f1fbe6@syzkaller.appspotmail.com Signed-off-by: Peilin Ye Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200714180904.277512-1-yepeilin.cs@gmail.com --- kernel/bpf/btf.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 9a1a98dd9e97..0443600146dc 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4058,6 +4058,11 @@ static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, const char *tname, *sym; u32 btf_id, i; + if (!btf_vmlinux) { + bpf_log(log, "btf_vmlinux doesn't exist\n"); + return -EINVAL; + } + if (IS_ERR(btf_vmlinux)) { bpf_log(log, "btf_vmlinux is malformed\n"); return -EINVAL; -- cgit From 1d4e1eab456e1ee92a94987499b211db05f900ea Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 Jul 2020 21:09:12 -0700 Subject: bpf: Fix map leak in HASH_OF_MAPS map Fix HASH_OF_MAPS bug of not putting inner map pointer on bpf_map_elem_update() operation. This is due to per-cpu extra_elems optimization, which bypassed free_htab_elem() logic doing proper clean ups. Make sure that inner map is put properly in optimized case as well. Fixes: 8c290e60fa2a ("bpf: fix hashmap extra_elems logic") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729040913.2815687-1-andriin@fb.com --- kernel/bpf/hashtab.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c9..b32cc8ce8ff6 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) htab_elem_free(htab, l); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; -- cgit From 0ba58348414eb10249480635545758b40b3c33b6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 Jul 2020 21:09:13 -0700 Subject: selftests/bpf: Extend map-in-map selftest to detect memory leaks Add test validating that all inner maps are released properly after skeleton is destroyed. To ensure determinism, trigger kernel-side synchronize_rcu() before checking map existence by their IDs. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729040913.2815687-2-andriin@fb.com --- .../selftests/bpf/prog_tests/btf_map_in_map.c | 124 ++++++++++++++++++--- 1 file changed, 110 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c index f7ee8fa377ad..6ccecbd39476 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -5,10 +5,60 @@ #include "test_btf_map_in_map.skel.h" +static int duration; + +static __u32 bpf_map_id(struct bpf_map *map) +{ + struct bpf_map_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + if (err) + return 0; + return info.id; +} + +/* + * Trigger synchronize_rcu() in kernel. + * + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu() + * if looking up an existing non-NULL element or updating the map with a valid + * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map, + * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then + * cleanup. At the end, at least one synchronize_rcu() would be called. + */ +static int kern_sync_rcu(void) +{ + int inner_map_fd, outer_map_fd, err, zero = 0; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno)) + return -1; + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), inner_map_fd, 1, 0); + if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) { + close(inner_map_fd); + return -1; + } + + err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0); + if (err) + err = -errno; + CHECK(err, "outer_map_update", "failed %d\n", err); + close(inner_map_fd); + close(outer_map_fd); + return err; +} + void test_btf_map_in_map(void) { - int duration = 0, err, key = 0, val; - struct test_btf_map_in_map* skel; + int err, key = 0, val, i; + struct test_btf_map_in_map *skel; + int outer_arr_fd, outer_hash_fd; + int fd, map1_fd, map2_fd, map1_id, map2_id; skel = test_btf_map_in_map__open_and_load(); if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) @@ -18,32 +68,78 @@ void test_btf_map_in_map(void) if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; + map1_fd = bpf_map__fd(skel->maps.inner_map1); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); + outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); + /* inner1 = input, inner2 = input + 1 */ - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + map1_fd = bpf_map__fd(skel->maps.inner_map1); + bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0); skel->bss->input = 1; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2); /* inner1 = input + 1, inner2 = input */ - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0); skel->bss->input = 3; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3); + for (i = 0; i < 5; i++) { + val = i % 2 ? map1_fd : map2_fd; + err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + } + + map1_id = bpf_map_id(skel->maps.inner_map1); + map2_id = bpf_map_id(skel->maps.inner_map2); + CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n"); + CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n"); + + test_btf_map_in_map__destroy(skel); + skel = NULL; + + /* we need to either wait for or force synchronize_rcu(), before + * checking for "still exists" condition, otherwise map could still be + * resolvable by ID, causing false positives. + * + * Older kernels (5.8 and earlier) freed map only after two + * synchronize_rcu()s, so trigger two, to be entirely sure. + */ + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + + fd = bpf_map_get_fd_by_id(map1_id); + if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) { + close(fd); + goto cleanup; + } + fd = bpf_map_get_fd_by_id(map2_id); + if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) { + close(fd); + goto cleanup; + } + cleanup: test_btf_map_in_map__destroy(skel); } -- cgit From 4f010246b4087ab931b060481014ec110e6a8a46 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 18:09:00 +0200 Subject: net/bpfilter: Initialize pos in __bpfilter_process_sockopt __bpfilter_process_sockopt never initialized the pos variable passed to the pipe write. This has been mostly harmless in the past as pipes ignore the offset, but the switch to kernel_write now verified the position, which can lead to a failure depending on the exact stack initialization pattern. Initialize the variable to zero to make rw_verify_area happy. Fixes: 6955a76fbcd5 ("bpfilter: switch to kernel_write") Reported-by: Christian Brauner Reported-by: Rodrigo Madera Signed-off-by: Christoph Hellwig Signed-off-by: Daniel Borkmann Tested-by: Rodrigo Madera Tested-by: Christian Brauner Reviewed-by: Christian Brauner Link: https://lore.kernel.org/bpf/20200730160900.187157-1-hch@lst.de --- net/bpfilter/bpfilter_kern.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index c0f0990f30b6..cfb27166bfd7 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -39,7 +39,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, { struct mbox_request req; struct mbox_reply reply; - loff_t pos; + loff_t pos = 0; ssize_t n; int ret = -EFAULT; -- cgit