summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-01-15 08:58:06 -0800
committerDavid S. Miller <davem@davemloft.net>2014-01-16 16:22:54 -0800
commit0b4cec8c2e872a6bc9146a001d7532f31023aed5 (patch)
tree4f8553bbaac648dffb40b33c4a1422a6be066724 /net/core
parent57bdf7f42be05640f8080b06844c94367ad1884b (diff)
net: Check skb->rxhash in gro_receive
When initializing a gro_list for a packet, first check the rxhash of the incoming skb against that of the skb's in the list. This should be a very strong inidicator of whether the flow is going to be matched, and potentially allows a lot of other checks to be short circuited. Use skb_hash_raw so that we don't force the hash to be calculated. Tested by running netperf 200 TCP_STREAMs between two machines with GRO, HW rxhash, and 1G. Saw no performance degration, slight reduction of time in dev_gro_receive. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 995755733997..b2c1869b04e3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3821,10 +3821,18 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
unsigned int maclen = skb->dev->hard_header_len;
+ u32 hash = skb_get_hash_raw(skb);
for (p = napi->gro_list; p; p = p->next) {
unsigned long diffs;
+ NAPI_GRO_CB(p)->flush = 0;
+
+ if (hash != skb_get_hash_raw(p)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
if (maclen == ETH_HLEN)
@@ -3835,7 +3843,6 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
skb_gro_mac_header(skb),
maclen);
NAPI_GRO_CB(p)->same_flow = !diffs;
- NAPI_GRO_CB(p)->flush = 0;
}
}