summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-11-13 14:00:39 +0800
committerDavid S. Miller <davem@davemloft.net>2013-11-14 16:05:27 -0500
commit96f8d9ecf227638c89f98ccdcdd50b569891976c (patch)
tree8c6c163cd3914426873843c0fdb1c850dd5fe053 /drivers/net
parent6115c11fe1a5a636ac99fc823b00df4ff3c0674e (diff)
tuntap: limit head length of skb allocated
We currently use hdr_len as a hint of head length which is advertised by guest. But when guest advertise a very big value, it can lead to an 64K+ allocating of kmalloc() which has a very high possibility of failure when host memory is fragmented or under heavy stress. The huge hdr_len also reduce the effect of zerocopy or even disable if a gso skb is linearized in guest. To solves those issues, this patch introduces an upper limit (PAGE_SIZE) of the head, which guarantees an order 0 allocation each time. Cc: Stefan Hajnoczi <stefanha@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/tun.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7cb105c103fe..782e38bfc1ee 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb;
size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
+ int good_linear;
int offset = 0;
int copylen;
bool zerocopy = false;
@@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
}
+ good_linear = SKB_MAX_HEAD(align);
+
if (msg_control) {
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ if (copylen > good_linear)
+ copylen = good_linear;
linear = copylen;
if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
zerocopy = true;
@@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (!zerocopy) {
copylen = len;
- linear = gso.hdr_len;
+ if (gso.hdr_len > good_linear)
+ linear = good_linear;
+ else
+ linear = gso.hdr_len;
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);