summaryrefslogtreecommitdiff
path: root/net/xfrm
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2017-12-20 10:41:31 +0100
committerSteffen Klassert <steffen.klassert@secunet.com>2017-12-20 10:41:31 +0100
commit3dca3f38cfb8efb8571040568cac7d0025fa5bb1 (patch)
treee1227751f873bae8a696222da5d9d3d1a1e070b9 /net/xfrm
parentf39a5c01c3d24f2f61ec9d8c7d7e81f9aca506ce (diff)
xfrm: Separate ESP handling from segmentation for GRO packets.
We change the ESP GSO handlers to only segment the packets. The ESP handling and encryption is defered to validate_xmit_xfrm() where this is done for non GRO packets too. This makes the code more robust and prepares for asynchronous crypto handling. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/xfrm_device.c87
1 files changed, 77 insertions, 10 deletions
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 00641b611aed..a5a7a716c465 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,32 +23,99 @@
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
int err;
+ __u32 seq;
struct xfrm_state *x;
+ struct sk_buff *skb2;
+ netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
- if (skb_is_gso(skb))
- return 0;
+ if (!xo)
+ return skb;
- if (xo) {
- x = skb->sp->xvec[skb->sp->len - 1];
- if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
- return 0;
+ if (!(features & NETIF_F_HW_ESP))
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+ x = skb->sp->xvec[skb->sp->len - 1];
+ if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ return skb;
+
+ if (skb_is_gso(skb)) {
+ struct net_device *dev = skb->dev;
+
+ if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+ struct sk_buff *segs;
+
+ /* Packet got rerouted, fixup features and segment it. */
+ esp_features = esp_features & ~(NETIF_F_HW_ESP
+ | NETIF_F_GSO_ESP);
+ segs = skb_gso_segment(skb, esp_features);
+ if (IS_ERR(segs)) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ kfree_skb(skb);
+ return NULL;
+ } else {
+ consume_skb(skb);
+ skb = segs;
+ }
+ } else {
+ return skb;
+ }
+ }
+
+ if (!skb->next) {
x->outer_mode->xmit(x, skb);
- err = x->type_offload->xmit(x, skb, features);
+ err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
- return err;
+ kfree_skb(skb);
+ return NULL;
}
skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return skb;
}
- return 0;
+ skb2 = skb;
+ seq = xo->seq.low;
+
+ do {
+ struct sk_buff *nskb = skb2->next;
+
+ xo = xfrm_offload(skb2);
+ xo->flags |= XFRM_GSO_SEGMENT;
+ xo->seq.low = seq;
+ xo->seq.hi = xfrm_replay_seqhi(x, seq);
+
+ if(!(features & NETIF_F_HW_ESP))
+ xo->flags |= CRYPTO_FALLBACK;
+
+ x->outer_mode->xmit(x, skb2);
+
+ err = x->type_offload->xmit(x, skb2, esp_features);
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ skb2->next = nskb;
+ kfree_skb_list(skb2);
+ return NULL;
+ }
+
+ if (!skb_is_gso(skb2))
+ seq++;
+ else
+ seq += skb_shinfo(skb2)->gso_segs;
+
+ skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+ skb2 = nskb;
+ } while (skb2);
+
+ return skb;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);