summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-04-25 17:35:09 -0700
committerDavid S. Miller <davem@davemloft.net>2019-04-27 20:17:19 -0400
commit97e1caa517e22d62a283b876fb8aa5f4672c83dd (patch)
tree49c0a9a8eaf6ec2c1dc548f0e68fcc2c2f8ed91b /net
parentb2a20fd0725e8b259c528820033e29fdb3724549 (diff)
net/tls: don't copy negative amounts of data in reencrypt
There is no guarantee the record starts before the skb frags. If we don't check for this condition copy amount will get negative, leading to reads and writes to random memory locations. Familiar hilarity ensues. Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: John Hurley <john.hurley@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/tls/tls_device.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index cc0256939eb6..96357060addc 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -628,14 +628,16 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset,
+ rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
- offset += copy;
- buf += copy;
+ offset += copy;
+ buf += copy;
+ }
skb_walk_frags(skb, skb_iter) {
copy = min_t(int, skb_iter->len,