summaryrefslogtreecommitdiff
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2017-12-07 09:56:42 -0800
committerDavid S. Miller <davem@davemloft.net>2017-12-08 13:32:26 -0500
commitfd8e8d1a775d82f04215f4b884a1962774805346 (patch)
tree09d5e3661eff4c23fe41b43a8a8fc423570624d6 /net/sched/sch_generic.c
parent70e57d5e3f8ec7c482b92ef43e543d87134689ab (diff)
net: sched: check for frozen queue before skb_bad_txq check
I can not think of any reason to pull the bad txq skb off the qdisc if the txq we plan to send this on is still frozen. So check for frozen queue first and abort before dequeuing either skb_bad_txq skb or normal qdisc dequeue() skb. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 84cef0570862..5ff93c2b5b99 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -204,7 +204,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
int *packets)
{
const struct netdev_queue *txq = q->dev_queue;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
*packets = 1;
if (unlikely(!skb_queue_empty(&q->gso_skb))) {
@@ -248,12 +248,15 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
}
validate:
*validate = true;
+
+ if ((q->flags & TCQ_F_ONETXQUEUE) &&
+ netif_xmit_frozen_or_stopped(txq))
+ return skb;
+
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb))
goto bulk;
- if (!(q->flags & TCQ_F_ONETXQUEUE) ||
- !netif_xmit_frozen_or_stopped(txq))
- skb = q->dequeue(q);
+ skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))