summaryrefslogtreecommitdiff
path: root/drivers/net/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/usb')
-rw-r--r--drivers/net/usb/r8152.c85
1 files changed, 63 insertions, 22 deletions
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0c13d9950cd8..1231bf365796 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2449,7 +2449,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
}
}
- if (list_empty(&tp->rx_done))
+ if (list_empty(&tp->rx_done) || work_done >= budget)
goto out1;
clear_bit(RX_EPROTO, &tp->flags);
@@ -2465,6 +2465,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
struct urb *urb;
u8 *rx_data;
+ /* A bulk transfer of USB may contain may packets, so the
+ * total packets may more than the budget. Deal with all
+ * packets in current bulk transfer, and stop to handle the
+ * next bulk transfer until next schedule, if budget is
+ * exhausted.
+ */
+ if (work_done >= budget)
+ break;
+
list_del_init(cursor);
agg = list_entry(cursor, struct rx_agg, list);
@@ -2481,12 +2490,11 @@ static int rx_bottom(struct r8152 *tp, int budget)
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
struct net_device_stats *stats = &netdev->stats;
- unsigned int pkt_len, rx_frag_head_sz;
+ unsigned int pkt_len, rx_frag_head_sz, len;
struct sk_buff *skb;
+ bool use_frags;
- /* limit the skb numbers for rx_queue */
- if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
- break;
+ WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
if (pkt_len < ETH_ZLEN)
@@ -2497,45 +2505,77 @@ static int rx_bottom(struct r8152 *tp, int budget)
break;
pkt_len -= ETH_FCS_LEN;
+ len = pkt_len;
rx_data += sizeof(struct rx_desc);
- if (!agg_free || tp->rx_copybreak > pkt_len)
- rx_frag_head_sz = pkt_len;
+ if (!agg_free || tp->rx_copybreak > len)
+ use_frags = false;
else
- rx_frag_head_sz = tp->rx_copybreak;
+ use_frags = true;
+
+ if (use_frags) {
+ /* If the budget is exhausted, the packet
+ * would be queued in the driver. That is,
+ * napi_gro_frags() wouldn't be called, so
+ * we couldn't use napi_get_frags().
+ */
+ if (work_done >= budget) {
+ rx_frag_head_sz = tp->rx_copybreak;
+ skb = napi_alloc_skb(napi,
+ rx_frag_head_sz);
+ } else {
+ rx_frag_head_sz = 0;
+ skb = napi_get_frags(napi);
+ }
+ } else {
+ rx_frag_head_sz = 0;
+ skb = napi_alloc_skb(napi, len);
+ }
- skb = napi_alloc_skb(napi, rx_frag_head_sz);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
}
skb->ip_summed = r8152_rx_csum(tp, rx_desc);
- memcpy(skb->data, rx_data, rx_frag_head_sz);
- skb_put(skb, rx_frag_head_sz);
- pkt_len -= rx_frag_head_sz;
- rx_data += rx_frag_head_sz;
- if (pkt_len) {
+ rtl_rx_vlan_tag(rx_desc, skb);
+
+ if (use_frags) {
+ if (rx_frag_head_sz) {
+ memcpy(skb->data, rx_data,
+ rx_frag_head_sz);
+ skb_put(skb, rx_frag_head_sz);
+ len -= rx_frag_head_sz;
+ rx_data += rx_frag_head_sz;
+ skb->protocol = eth_type_trans(skb,
+ netdev);
+ }
+
skb_add_rx_frag(skb, 0, agg->page,
agg_offset(agg, rx_data),
- pkt_len,
- SKB_DATA_ALIGN(pkt_len));
+ len, SKB_DATA_ALIGN(len));
get_page(agg->page);
+ } else {
+ memcpy(skb->data, rx_data, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
}
- skb->protocol = eth_type_trans(skb, netdev);
- rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) {
+ if (use_frags)
+ napi_gro_frags(napi);
+ else
+ napi_gro_receive(napi, skb);
+
work_done++;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
- napi_gro_receive(napi, skb);
+ stats->rx_bytes += pkt_len;
} else {
__skb_queue_tail(&tp->rx_queue, skb);
}
find_next_rx:
- rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
+ rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
len_used = agg_offset(agg, rx_data);
len_used += sizeof(struct rx_desc);
@@ -2564,9 +2604,10 @@ submit:
}
}
+ /* Splice the remained list back to rx_done for next schedule */
if (!list_empty(&rx_queue)) {
spin_lock_irqsave(&tp->rx_lock, flags);
- list_splice_tail(&rx_queue, &tp->rx_done);
+ list_splice(&rx_queue, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, flags);
}