diff options
author | Martin KaFai Lau <martin.lau@kernel.org> | 2023-03-07 22:59:21 -0800 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2023-03-10 11:05:28 -0800 |
commit | 2ffcb6fc50174d1efc8f98633eb2647d84483c68 (patch) | |
tree | 2fe829723048d435cd02f816209afb366e5e4b7c /kernel/bpf/bpf_local_storage.c | |
parent | 4cbd23cc92c49173e402753cab62b8a7754ed18f (diff) |
bpf: Refactor codes into bpf_local_storage_destroy
This patch first renames bpf_local_storage_unlink_nolock to
bpf_local_storage_destroy(). It better reflects that it is only
used when the storage's owner (sk/task/cgrp/inode) is being kfree().
All bpf_local_storage_destroy's caller is taking the spin lock and
then free the storage. This patch also moves these two steps into
the bpf_local_storage_destroy.
This is a preparation work for a later patch that uses
bpf_mem_cache_alloc/free in the bpf_local_storage.
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230308065936.1550103-3-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/bpf_local_storage.c')
-rw-r--r-- | kernel/bpf/bpf_local_storage.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 1904a4245ebe..e19f9f50a60d 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -652,11 +652,12 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map, return 0; } -bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage) +void bpf_local_storage_destroy(struct bpf_local_storage *local_storage) { struct bpf_local_storage_elem *selem; bool free_storage = false; struct hlist_node *n; + unsigned long flags; /* Neither the bpf_prog nor the bpf_map's syscall * could be modifying the local_storage->list now. @@ -667,6 +668,7 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage) * when unlinking elem from the local_storage->list and * the map's bucket->list. */ + raw_spin_lock_irqsave(&local_storage->lock, flags); hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) { /* Always unlink from map before unlinking from * local_storage. @@ -681,8 +683,10 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage) free_storage = bpf_selem_unlink_storage_nolock( local_storage, selem, false, false); } + raw_spin_unlock_irqrestore(&local_storage->lock, flags); - return free_storage; + if (free_storage) + kfree_rcu(local_storage, rcu); } u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map) |