summaryrefslogtreecommitdiff
path: root/net/ipv4/inet_diag.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2024-01-22 11:26:03 +0000
committerPaolo Abeni <pabeni@redhat.com>2024-01-23 15:13:55 +0100
commit622a08e8de9f9ad576fd56e3a2e97a84370a8100 (patch)
tree9fda1455c3639361d11162d535305baba2f05adf /net/ipv4/inet_diag.c
parentf44e64990beb41167bd7c313d90bcf7e290c3582 (diff)
inet_diag: skip over empty buckets
After the removal of inet_diag_table_mutex, sock_diag_table_mutex and sock_diag_mutex, I was able so see spinlock contention from inet_diag_dump_icsk() when running 100 parallel invocations. It is time to skip over empty buckets. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Guillaume Nault <gnault@redhat.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/ipv4/inet_diag.c')
-rw-r--r--net/ipv4/inet_diag.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2c2d8b9dd8e9..7adace541fe2 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -1045,6 +1045,10 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
num = 0;
ilb = &hashinfo->lhash2[i];
+ if (hlist_nulls_empty(&ilb->nulls_head)) {
+ s_num = 0;
+ continue;
+ }
spin_lock(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
@@ -1109,6 +1113,10 @@ resume_bind_walk:
accum = 0;
ibb = &hashinfo->bhash2[i];
+ if (hlist_empty(&ibb->chain)) {
+ s_num = 0;
+ continue;
+ }
spin_lock_bh(&ibb->lock);
inet_bind_bucket_for_each(tb2, &ibb->chain) {
if (!net_eq(ib2_net(tb2), net))