summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-05-13 14:44:13 -0700
committerJakub Kicinski <kuba@kernel.org>2024-05-13 14:44:13 -0700
commite6e43570fd98ac609c903bc91d6db163ba2e82d0 (patch)
tree1a1e029a9f9feb22836769c4ff8a4b715fd39fd6 /include
parent9af9b891fc6b44bb336933d63be526ca5cc6ee25 (diff)
parentbc21faefbe58feff0ae7cae8b52c4145073e2208 (diff)
Merge branch 'net-gro-remove-network_header-use-move-p-flush-flush_id-calculations-to-l4'
Richard Gobert says: ==================== net: gro: remove network_header use, move p->{flush/flush_id} calculations to L4 The cb fields network_offset and inner_network_offset are used instead of skb->network_header throughout GRO. These fields are then leveraged in the next commit to remove flush_id state from napi_gro_cb, and stateful code in {ipv6,inet}_gro_receive which may be unnecessarily complicated due to encapsulation support in GRO. These fields are checked in L4 instead. 3rd patch adds tests for different flush_id flows in GRO. ==================== Link: https://lore.kernel.org/r/20240509190819.2985-1-richardbgobert@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/net/gro.h85
1 files changed, 75 insertions, 10 deletions
diff --git a/include/net/gro.h b/include/net/gro.h
index 5df8bf318197..f13634b1f4c1 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -36,15 +36,15 @@ struct napi_gro_cb {
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
/* Number of segments aggregated. */
u16 count;
/* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
u16 proto;
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
/* Used in napi_gro_cb::free */
#define NAPI_GRO_FREE 1
#define NAPI_GRO_FREE_STOLEN_HEAD 2
@@ -75,8 +75,8 @@ struct napi_gro_cb {
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
+ /* Used to determine if ipid_offset can be ignored */
+ u8 ip_fixedid:1;
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
@@ -85,9 +85,6 @@ struct napi_gro_cb {
u8 is_flist:1;
);
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
/* L3 offsets */
union {
struct {
@@ -181,12 +178,17 @@ static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
return ptr;
}
+static inline int skb_gro_receive_network_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark];
+}
+
static inline void *skb_gro_network_header(const struct sk_buff *skb)
{
if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
- return skb_gro_header_fast(skb, skb_network_offset(skb));
+ return skb_gro_header_fast(skb, skb_gro_receive_network_offset(skb));
- return skb_network_header(skb);
+ return skb->data + skb_gro_receive_network_offset(skb);
}
static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
@@ -437,6 +439,69 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
skb_gro_len(skb), proto, 0));
}
+static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2,
+ struct sk_buff *p, bool outer)
+{
+ const u32 id = ntohl(*(__be32 *)&iph->id);
+ const u32 id2 = ntohl(*(__be32 *)&iph2->id);
+ const u16 ipid_offset = (id >> 16) - (id2 >> 16);
+ const u16 count = NAPI_GRO_CB(p)->count;
+ const u32 df = id & IP_DF;
+ int flush;
+
+ /* All fields must match except length and checksum. */
+ flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF));
+
+ if (flush | (outer && df))
+ return flush;
+
+ /* When we receive our second frame we can make a decision on if we
+ * continue this flow as an atomic flow with a fixed ID or if we use
+ * an incrementing ID.
+ */
+ if (count == 1 && df && !ipid_offset)
+ NAPI_GRO_CB(p)->ip_fixedid = true;
+
+ return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid);
+}
+
+static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2)
+{
+ /* <Version:4><Traffic_Class:8><Flow_Label:20> */
+ __be32 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
+
+ /* Flush if Traffic Class fields are different. */
+ return !!((first_word & htonl(0x0FF00000)) |
+ (__force __be32)(iph->hop_limit ^ iph2->hop_limit));
+}
+
+static inline int __gro_receive_network_flush(const void *th, const void *th2,
+ struct sk_buff *p, const u16 diff,
+ bool outer)
+{
+ const void *nh = th - diff;
+ const void *nh2 = th2 - diff;
+
+ if (((struct iphdr *)nh)->version == 6)
+ return ipv6_gro_flush(nh, nh2);
+ else
+ return inet_gro_flush(nh, nh2, p, outer);
+}
+
+static inline int gro_receive_network_flush(const void *th, const void *th2,
+ struct sk_buff *p)
+{
+ const bool encap_mark = NAPI_GRO_CB(p)->encap_mark;
+ int off = skb_transport_offset(p);
+ int flush;
+
+ flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark);
+ if (encap_mark)
+ flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false);
+
+ return flush;
+}
+
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);