summaryrefslogtreecommitdiff
path: root/net/core/dev.h
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-02-07 17:21:05 -0800
committerJakub Kicinski <kuba@kernel.org>2025-02-07 17:21:06 -0800
commitacdefab0dcbc3833b5a734ab80d792bb778517a0 (patch)
tree173c9371158b93ff7588f9bc2000299abda2a83d /net/core/dev.h
parent6a0ca73e5144a5d1c1f84cbfd96f4bc656c2ae6c (diff)
parent285b3f78eabd951e59e98f01f86abaaa6c76cd44 (diff)
Merge branch 'net-improve-core-queue-api-handling-while-device-is-down'
Jakub Kicinski says: ==================== net: improve core queue API handling while device is down The core netdev_rx_queue_restart() doesn't currently take into account that the device may be down. The current and proposed queue API implementations deal with this by rejecting queue API calls while the device is down. We can do better, in theory we can still allow devmem binding when the device is down - we shouldn't stop and start the queues just try to allocate the memory. The reason we allocate the memory is that memory provider binding checks if any compatible page pool has been created (page_pool_check_memory_provider()). Alternatively we could reject installing MP while the device is down but the MP assignment survives ifdown (so presumably MP doesn't cease to exist while down), and in general we allow configuration while down. Previously I thought we need this as a fix, but gve rejects page pool calls while down, and so did Saeed in the patches he posted. So this series just makes the core act more sensibly but practically should be a noop for now. v1: https://lore.kernel.org/20250205190131.564456-1-kuba@kernel.org ==================== Link: https://patch.msgid.link/20250206225638.1387810-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/dev.h')
-rw-r--r--net/core/dev.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/net/core/dev.h b/net/core/dev.h
index a5b166bbd169..caa13e431a6b 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -299,6 +299,18 @@ void xdp_do_check_flushed(struct napi_struct *napi);
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
+/* Best effort check that NAPI is not idle (can't be scheduled to run) */
+static inline void napi_assert_will_not_race(const struct napi_struct *napi)
+{
+ /* uninitialized instance, can't race */
+ if (!napi->poll_list.next)
+ return;
+
+ /* SCHED bit is set on disabled instances */
+ WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
+ WARN_ON(READ_ONCE(napi->list_owner) != -1);
+}
+
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8