summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-09-28 08:49:32 +0000
committerPaolo Abeni <pabeni@redhat.com>2025-09-30 15:45:52 +0200
commit9c94ae6bb0b2895024b6e29fcc1cbec968b4776a (patch)
treeefd18d0b0c67ebbe5785cddf16d14a00a2f48463
parent2c0592bd5cadfcd5337eafa07e3145a097cfd880 (diff)
net: make softnet_data.defer_count an atomic
This is preparation work to remove the softnet_data.defer_lock, as it is contended on hosts with large number of cores. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jason Xing <kerneljasonxing@gmail.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250928084934.3266948-2-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c6
3 files changed, 4 insertions, 6 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1b85454116f6..27e3fa69253f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3538,7 +3538,7 @@ struct softnet_data {
/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;
- int defer_count;
+ atomic_t defer_count;
int defer_ipi_scheduled;
struct sk_buff *defer_list;
call_single_data_t defer_csd;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8b54fdf0289a..8566678d8344 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6726,7 +6726,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
spin_lock(&sd->defer_lock);
skb = sd->defer_list;
sd->defer_list = NULL;
- sd->defer_count = 0;
+ atomic_set(&sd->defer_count, 0);
spin_unlock(&sd->defer_lock);
while (skb != NULL) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 618afd59afff..16cd357d62a6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7202,14 +7202,12 @@ nodefer: kfree_skb_napi_cache(skb);
sd = &per_cpu(softnet_data, cpu);
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
- if (READ_ONCE(sd->defer_count) >= defer_max)
+ if (atomic_read(&sd->defer_count) >= defer_max)
goto nodefer;
spin_lock_bh(&sd->defer_lock);
/* Send an IPI every time queue reaches half capacity. */
- kick = sd->defer_count == (defer_max >> 1);
- /* Paired with the READ_ONCE() few lines above */
- WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
+ kick = (atomic_inc_return(&sd->defer_count) - 1) == (defer_max >> 1);
skb->next = sd->defer_list;
/* Paired with READ_ONCE() in skb_defer_free_flush() */