From c2e42ddf26cad03ea92400c88b024e8ce1601dff Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 16 Aug 2023 12:59:58 +0800 Subject: bpf, cpumask: Clean up bpf_cpu_map_entry directly in cpu_map_free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After synchronous_rcu(), both the dettached XDP program and xdp_do_flush() are completed, and the only user of bpf_cpu_map_entry will be cpu_map_kthread_run(), so instead of calling __cpu_map_entry_replace() to stop kthread and cleanup entry after a RCU grace period, do these things directly. Signed-off-by: Hou Tao Reviewed-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/r/20230816045959.358059-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/cpumap.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel/bpf/cpumap.c') diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 93fbd3e5079e..e42a1bdb7f53 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -566,16 +566,15 @@ static void cpu_map_free(struct bpf_map *map) /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, * so the bpf programs (can be more than one that used this map) were * disconnected from events. Wait for outstanding critical sections in - * these programs to complete. The rcu critical section only guarantees - * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. - * It does __not__ ensure pending flush operations (if any) are - * complete. + * these programs to complete. synchronize_rcu() below not only + * guarantees no further "XDP/bpf-side" reads against + * bpf_cpu_map->cpu_map, but also ensure pending flush operations + * (if any) are completed. */ - synchronize_rcu(); - /* For cpu_map the remote CPUs can still be using the entries - * (struct bpf_cpu_map_entry). + /* The only possible user of bpf_cpu_map_entry is + * cpu_map_kthread_run(). */ for (i = 0; i < cmap->map.max_entries; i++) { struct bpf_cpu_map_entry *rcpu; @@ -584,8 +583,8 @@ static void cpu_map_free(struct bpf_map *map) if (!rcpu) continue; - /* bq flush and cleanup happens after RCU grace-period */ - __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ + /* Stop kthread and cleanup entry directly */ + __cpu_map_entry_free(&rcpu->free_work.work); } bpf_map_area_free(cmap->cpu_map); bpf_map_area_free(cmap); -- cgit