summaryrefslogtreecommitdiff
path: root/drivers/net/wan
diff options
context:
space:
mode:
authorXie He <xie.he.0141@gmail.com>2021-04-02 02:30:00 -0700
committerDavid S. Miller <davem@davemloft.net>2021-04-05 11:42:12 -0700
commit514e1150da9cd8d7978d990a353636cf1a7a87c2 (patch)
tree64715b8f2fbe5cb42256ba74c1974f6db3f008e0 /drivers/net/wan
parent7d42e84eb99daf9b7feef37e8f2ea1eaf975346b (diff)
net: x25: Queue received packets in the drivers instead of per-CPU queues
X.25 Layer 3 (the Packet Layer) expects layer 2 to provide a reliable datalink service such that no packets are reordered or dropped. And X.25 Layer 2 (the LAPB layer) is indeed designed to provide such service. However, this reliability is not preserved when a driver calls "netif_rx" to deliver the received packets to layer 3, because "netif_rx" will put the packets into per-CPU queues before they are delivered to layer 3. If there are multiple CPUs, the order of the packets may not be preserved. The per-CPU queues may also drop packets if there are too many. Therefore, we should not call "netif_rx" to let it queue the packets. Instead, we should use our own queue that won't reorder or drop packets. This patch changes all X.25 drivers to use their own queues instead of calling "netif_rx". The patch also documents this requirement in the "x25-iface" documentation. Cc: Martin Schiller <ms@dev.tdt.de> Signed-off-by: Xie He <xie.he.0141@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wan')
-rw-r--r--drivers/net/wan/hdlc_x25.c30
-rw-r--r--drivers/net/wan/lapbether.c49
2 files changed, 71 insertions, 8 deletions
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 5a6a945f6c81..ba8c36c7ea91 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -25,6 +25,8 @@ struct x25_state {
x25_hdlc_proto settings;
bool up;
spinlock_t up_lock; /* Protects "up" */
+ struct sk_buff_head rx_queue;
+ struct tasklet_struct rx_tasklet;
};
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
@@ -34,14 +36,27 @@ static struct x25_state *state(hdlc_device *hdlc)
return hdlc->state;
}
+static void x25_rx_queue_kick(struct tasklet_struct *t)
+{
+ struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
+ struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
+
+ while (skb) {
+ netif_receive_skb_core(skb);
+ skb = skb_dequeue(&x25st->rx_queue);
+ }
+}
+
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
{
+ struct x25_state *x25st = state(dev_to_hdlc(dev));
struct sk_buff *skb;
unsigned char *ptr;
- if ((skb = dev_alloc_skb(1)) == NULL) {
+ skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!skb) {
netdev_err(dev, "out of memory\n");
return;
}
@@ -50,7 +65,9 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
*ptr = code;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&x25st->rx_queue, skb);
+ tasklet_schedule(&x25st->rx_tasklet);
}
@@ -71,6 +88,7 @@ static void x25_disconnected(struct net_device *dev, int reason)
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ struct x25_state *x25st = state(dev_to_hdlc(dev));
unsigned char *ptr;
if (skb_cow(skb, 1)) {
@@ -84,7 +102,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
- return netif_rx(skb);
+
+ skb_queue_tail(&x25st->rx_queue, skb);
+ tasklet_schedule(&x25st->rx_tasklet);
+ return NET_RX_SUCCESS;
}
@@ -223,6 +244,7 @@ static void x25_close(struct net_device *dev)
spin_unlock_bh(&x25st->up_lock);
lapb_unregister(dev);
+ tasklet_kill(&x25st->rx_tasklet);
}
@@ -338,6 +360,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
state(hdlc)->up = false;
spin_lock_init(&state(hdlc)->up_lock);
+ skb_queue_head_init(&state(hdlc)->rx_queue);
+ tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
/* There's no header_ops so hard_header_len should be 0. */
dev->hard_header_len = 0;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 45d74285265a..59646865a3a4 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -53,6 +53,8 @@ struct lapbethdev {
struct net_device *axdev; /* lapbeth device (lapb#) */
bool up;
spinlock_t up_lock; /* Protects "up" */
+ struct sk_buff_head rx_queue;
+ struct napi_struct napi;
};
static LIST_HEAD(lapbeth_devices);
@@ -83,6 +85,26 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
/* ------------------------------------------------------------------------ */
+static int lapbeth_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev,
+ napi);
+ struct sk_buff *skb;
+ int processed = 0;
+
+ for (; processed < budget; ++processed) {
+ skb = skb_dequeue(&lapbeth->rx_queue);
+ if (!skb)
+ break;
+ netif_receive_skb_core(skb);
+ }
+
+ if (processed < budget)
+ napi_complete(napi);
+
+ return processed;
+}
+
/*
* Receive a LAPB frame via an ethernet interface.
*/
@@ -135,6 +157,7 @@ drop:
static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
if (skb_cow(skb, 1)) {
@@ -148,7 +171,10 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
- return netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
+ return NET_RX_SUCCESS;
}
/*
@@ -233,8 +259,9 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
static void lapbeth_connected(struct net_device *dev, int reason)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
- struct sk_buff *skb = dev_alloc_skb(1);
+ struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb) {
pr_err("out of memory\n");
@@ -245,13 +272,16 @@ static void lapbeth_connected(struct net_device *dev, int reason)
*ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
}
static void lapbeth_disconnected(struct net_device *dev, int reason)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
- struct sk_buff *skb = dev_alloc_skb(1);
+ struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb) {
pr_err("out of memory\n");
@@ -262,7 +292,9 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
}
/*
@@ -293,6 +325,8 @@ static int lapbeth_open(struct net_device *dev)
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
+ napi_enable(&lapbeth->napi);
+
if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
@@ -317,6 +351,8 @@ static int lapbeth_close(struct net_device *dev)
if ((err = lapb_unregister(dev)) != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
+ napi_disable(&lapbeth->napi);
+
return 0;
}
@@ -374,6 +410,9 @@ static int lapbeth_new_device(struct net_device *dev)
lapbeth->up = false;
spin_lock_init(&lapbeth->up_lock);
+ skb_queue_head_init(&lapbeth->rx_queue);
+ netif_napi_add(ndev, &lapbeth->napi, lapbeth_napi_poll, 16);
+
rc = -EIO;
if (register_netdevice(ndev))
goto fail;