summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c36
-rw-r--r--net/core/dev.h8
-rw-r--r--net/core/devmem.c6
-rw-r--r--net/core/devmem.h7
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/page_pool.c29
6 files changed, 67 insertions, 26 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index b28ce68830b2..5a3c0f40a93f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6978,6 +6978,12 @@ int napi_set_threaded(struct napi_struct *napi,
if (napi->config)
napi->config->threaded = threaded;
+ /* Setting/unsetting threaded mode on a napi might not immediately
+ * take effect, if the current napi instance is actively being
+ * polled. In this case, the switch between threaded mode and
+ * softirq mode will happen in the next round of napi_schedule().
+ * This should not cause hiccups/stalls to the live traffic.
+ */
if (!threaded && napi->thread) {
napi_stop_kthread(napi);
} else {
@@ -6993,7 +6999,7 @@ int netif_set_threaded(struct net_device *dev,
enum netdev_napi_threaded threaded)
{
struct napi_struct *napi;
- int err = 0;
+ int i, err = 0;
netdev_assert_locked_or_invisible(dev);
@@ -7011,23 +7017,13 @@ int netif_set_threaded(struct net_device *dev,
WRITE_ONCE(dev->threaded, threaded);
- /* Make sure kthread is created before THREADED bit
- * is set.
- */
- smp_mb__before_atomic();
+ /* The error should not occur as the kthreads are already created. */
+ list_for_each_entry(napi, &dev->napi_list, dev_list)
+ WARN_ON_ONCE(napi_set_threaded(napi, threaded));
- /* Setting/unsetting threaded mode on a napi might not immediately
- * take effect, if the current napi instance is actively being
- * polled. In this case, the switch between threaded mode and
- * softirq mode will happen in the next round of napi_schedule().
- * This should not cause hiccups/stalls to the live traffic.
- */
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
- if (!threaded && napi->thread)
- napi_stop_kthread(napi);
- else
- assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
- }
+ /* Override the config for all NAPIs even if currently not listed */
+ for (i = 0; i < dev->num_napi_configs; i++)
+ dev->napi_config[i].threaded = threaded;
return err;
}
@@ -7361,8 +7357,9 @@ void netif_napi_add_weight_locked(struct net_device *dev,
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
*/
- if (dev->threaded && napi_kthread_create(napi))
- dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
+ if (napi_get_threaded_config(dev, napi))
+ if (napi_kthread_create(napi))
+ dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
netif_napi_set_irq_locked(napi, -1);
}
EXPORT_SYMBOL(netif_napi_add_weight_locked);
@@ -11881,6 +11878,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
goto free_all;
dev->cfg_pending = dev->cfg;
+ dev->num_napi_configs = maxqs;
napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
diff --git a/net/core/dev.h b/net/core/dev.h
index ab69edc0c3e3..d6b08d435479 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -323,6 +323,14 @@ static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
return NETDEV_NAPI_THREADED_DISABLED;
}
+static inline enum netdev_napi_threaded
+napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
+{
+ if (n->config)
+ return n->config->threaded;
+ return dev->threaded;
+}
+
int napi_set_threaded(struct napi_struct *n,
enum netdev_napi_threaded threaded);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index b3a62ca0df65..24c591ab38ae 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -70,14 +70,13 @@ void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
gen_pool_destroy(binding->chunk_pool);
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
- DMA_FROM_DEVICE);
+ binding->direction);
dma_buf_detach(binding->dmabuf, binding->attachment);
dma_buf_put(binding->dmabuf);
xa_destroy(&binding->bound_rxqs);
kvfree(binding->tx_vec);
kfree(binding);
}
-EXPORT_SYMBOL(__net_devmem_dmabuf_binding_free);
struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
@@ -208,6 +207,7 @@ net_devmem_bind_dmabuf(struct net_device *dev,
mutex_init(&binding->lock);
binding->dmabuf = dmabuf;
+ binding->direction = direction;
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
if (IS_ERR(binding->attachment)) {
@@ -312,7 +312,7 @@ err_tx_vec:
kvfree(binding->tx_vec);
err_unmap:
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
- DMA_FROM_DEVICE);
+ direction);
err_detach:
dma_buf_detach(dmabuf, binding->attachment);
err_free_binding:
diff --git a/net/core/devmem.h b/net/core/devmem.h
index 0a3b28ba5c13..41cd6e1c9141 100644
--- a/net/core/devmem.h
+++ b/net/core/devmem.h
@@ -56,6 +56,9 @@ struct net_devmem_dmabuf_binding {
*/
u32 id;
+ /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
+ enum dma_data_direction direction;
+
/* Array of net_iov pointers for this binding, sorted by virtual
* address. This array is convenient to map the virtual addresses to
* net_iovs in the TX path.
@@ -165,10 +168,6 @@ static inline void net_devmem_put_net_iov(struct net_iov *niov)
{
}
-static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
-{
-}
-
static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
enum dma_data_direction direction,
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a1da97b5b30b..5f65b62346d4 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -768,6 +768,13 @@ int netpoll_setup(struct netpoll *np)
if (err)
goto flush;
rtnl_unlock();
+
+ /* Make sure all NAPI polls which started before dev->npinfo
+ * was visible have exited before we start calling NAPI poll.
+ * NAPI skips locking if dev->npinfo is NULL.
+ */
+ synchronize_rcu();
+
return 0;
flush:
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 05e2e22a8f7c..343a6cac21e3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -1201,6 +1201,35 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
+/**
+ * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
+ * @pool: page pool to modify
+ * @napi: NAPI instance to associate the page pool with
+ *
+ * Associate a page pool with a NAPI instance for lockless page recycling.
+ * This is useful when a new page pool has to be added to a NAPI instance
+ * without disabling that NAPI instance, to mark the point at which control
+ * path "hands over" the page pool to the NAPI instance. In most cases driver
+ * can simply set the @napi field in struct page_pool_params, and does not
+ * have to call this helper.
+ *
+ * The function is idempotent, but does not implement any refcounting.
+ * Single page_pool_disable_direct_recycling() will disable recycling,
+ * no matter how many times enable was called.
+ */
+void page_pool_enable_direct_recycling(struct page_pool *pool,
+ struct napi_struct *napi)
+{
+ if (READ_ONCE(pool->p.napi) == napi)
+ return;
+ WARN_ON(!napi || pool->p.napi);
+
+ mutex_lock(&page_pools_lock);
+ WRITE_ONCE(pool->p.napi, napi);
+ mutex_unlock(&page_pools_lock);
+}
+EXPORT_SYMBOL(page_pool_enable_direct_recycling);
+
void page_pool_disable_direct_recycling(struct page_pool *pool)
{
/* Disable direct recycling based on pool->cpuid.