summaryrefslogtreecommitdiff
path: root/net/core/gro_cells.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/gro_cells.c')
-rw-r--r--net/core/gro_cells.c52
1 files changed, 46 insertions, 6 deletions
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index e095fb871d91..a725d21159a6 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -3,15 +3,18 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
+#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
+ local_lock_t bh_lock;
};
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
+ bool have_bh_lock = false;
struct gro_cell *cell;
int res;
@@ -24,11 +27,13 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
goto unlock;
}
+ local_lock_nested_bh(&gcells->cells->bh_lock);
+ have_bh_lock = true;
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
- atomic_long_inc(&dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
res = NET_RX_DROP;
goto unlock;
@@ -41,6 +46,8 @@ drop:
res = NET_RX_SUCCESS;
unlock:
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
rcu_read_unlock();
return res;
}
@@ -54,7 +61,9 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (work_done < budget) {
+ __local_lock_nested_bh(&cell->bh_lock);
skb = __skb_dequeue(&cell->napi_skbs);
+ __local_unlock_nested_bh(&cell->bh_lock);
if (!skb)
break;
napi_gro_receive(napi, skb);
@@ -78,19 +87,34 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+ local_lock_init(&cell->bh_lock);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
- netif_napi_add(dev, &cell->napi, gro_cell_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &cell->napi, gro_cell_poll);
napi_enable(&cell->napi);
}
return 0;
}
EXPORT_SYMBOL(gro_cells_init);
+struct percpu_free_defer {
+ struct rcu_head rcu;
+ void __percpu *ptr;
+};
+
+static void percpu_free_defer_callback(struct rcu_head *head)
+{
+ struct percpu_free_defer *defer;
+
+ defer = container_of(head, struct percpu_free_defer, rcu);
+ free_percpu(defer->ptr);
+ kfree(defer);
+}
+
void gro_cells_destroy(struct gro_cells *gcells)
{
+ struct percpu_free_defer *defer;
int i;
if (!gcells->cells)
@@ -99,10 +123,26 @@ void gro_cells_destroy(struct gro_cells *gcells)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
napi_disable(&cell->napi);
- netif_napi_del(&cell->napi);
+ __netif_napi_del(&cell->napi);
__skb_queue_purge(&cell->napi_skbs);
}
- free_percpu(gcells->cells);
+ /* We need to observe an rcu grace period before freeing ->cells,
+ * because netpoll could access dev->napi_list under rcu protection.
+ * Try hard using call_rcu() instead of synchronize_rcu(),
+ * because we might be called from cleanup_net(), and we
+ * definitely do not want to block this critical task.
+ */
+ defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
+ if (likely(defer)) {
+ defer->ptr = gcells->cells;
+ call_rcu(&defer->rcu, percpu_free_defer_callback);
+ } else {
+ /* We do not hold RTNL at this point, synchronize_net()
+ * would not be able to expedite this sync.
+ */
+ synchronize_rcu_expedited();
+ free_percpu(gcells->cells);
+ }
gcells->cells = NULL;
}
EXPORT_SYMBOL(gro_cells_destroy);