summaryrefslogtreecommitdiff
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+huawei@kernel.org>2020-10-04 12:19:12 +0200
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2020-10-04 12:19:12 +0200
commit463c43fcd97e493d8a17242f4f000c86fe642ed6 (patch)
tree95deff935b99d5ef6c87bd67b7f7be608a120951 /kernel/bpf/hashtab.c
parent7c9df3ec493e353166527f56ad1540d052b92cd4 (diff)
parenta1b8638ba1320e6684aa98233c15255eb803fac7 (diff)
Merge tag 'v5.9-rc7' into patchwork
Linux 5.9-rc7 * tag 'v5.9-rc7': (683 commits) Linux 5.9-rc7 mm/thp: Split huge pmds/puds if they're pinned when fork() mm: Do early cow for pinned pages during fork() for ptes mm/fork: Pass new vma pointer into copy_page_range() mm: Introduce mm_struct.has_pinned mm: validate pmd after splitting mm: don't rely on system state to detect hot-plug operations mm: replace memmap_context by meminit_context arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback lib/memregion.c: include memregion.h lib/string.c: implement stpcpy mm/migrate: correct thp migration stats mm/gup: fix gup_fast with dynamic page table folding mm: memcontrol: fix missing suffix of workingset_restore mm, THP, swap: fix allocating cluster for swapfile by mistake mm: slab: fix potential double free in ___cache_free Documentation/llvm: Fix clang target examples io_uring: ensure async buffered read-retry is setup properly KVM: SVM: Add a dedicated INVD intercept routine io_uring: don't unconditionally set plug->nowait = true ...
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 78dfff6a501b..7df28a45c66b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1622,7 +1622,6 @@ struct bpf_iter_seq_hash_map_info {
struct bpf_map *map;
struct bpf_htab *htab;
void *percpu_value_buf; // non-zero means percpu hash
- unsigned long flags;
u32 bucket_id;
u32 skip_elems;
};
@@ -1632,7 +1631,6 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
struct htab_elem *prev_elem)
{
const struct bpf_htab *htab = info->htab;
- unsigned long flags = info->flags;
u32 skip_elems = info->skip_elems;
u32 bucket_id = info->bucket_id;
struct hlist_nulls_head *head;
@@ -1656,19 +1654,18 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
/* not found, unlock and go to the next bucket */
b = &htab->buckets[bucket_id++];
- htab_unlock_bucket(htab, b, flags);
+ rcu_read_unlock();
skip_elems = 0;
}
for (i = bucket_id; i < htab->n_buckets; i++) {
b = &htab->buckets[i];
- flags = htab_lock_bucket(htab, b);
+ rcu_read_lock();
count = 0;
head = &b->head;
hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
if (count >= skip_elems) {
- info->flags = flags;
info->bucket_id = i;
info->skip_elems = count;
return elem;
@@ -1676,7 +1673,7 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
count++;
}
- htab_unlock_bucket(htab, b, flags);
+ rcu_read_unlock();
skip_elems = 0;
}
@@ -1754,14 +1751,10 @@ static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
{
- struct bpf_iter_seq_hash_map_info *info = seq->private;
-
if (!v)
(void)__bpf_hash_map_seq_show(seq, NULL);
else
- htab_unlock_bucket(info->htab,
- &info->htab->buckets[info->bucket_id],
- info->flags);
+ rcu_read_unlock();
}
static int bpf_iter_init_hash_map(void *priv_data,