summaryrefslogtreecommitdiff
path: root/net/ipv6/mcast.c
diff options
context:
space:
mode:
authorTaehee Yoo <ap420073@gmail.com>2021-06-13 14:43:44 +0000
committerDavid S. Miller <davem@davemloft.net>2021-06-14 12:46:00 -0700
commitffa85b73c3c4143a8e8087c0930f6c5a6ead8e9f (patch)
treeaee0d324858e9d967861a85b6d1033731de624df /net/ipv6/mcast.c
parentb84b53ee8337ca69512d25295961571fa08a219d (diff)
mld: avoid unnecessary high order page allocation in mld_newpack()
If link mtu is too big, mld_newpack() allocates high-order page. But most mld packets don't need high-order page. So, it might waste unnecessary pages. To avoid this, it makes mld_newpack() try to allocate order-0 page. Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/mcast.c')
-rw-r--r--net/ipv6/mcast.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d36ef9d25e73..54ec163fbafa 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1729,22 +1729,25 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
{
+ u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
+ 2, 0, 0, IPV6_TLV_PADN, 0 };
struct net_device *dev = idev->dev;
- struct net *net = dev_net(dev);
- struct sock *sk = net->ipv6.igmp_sk;
- struct sk_buff *skb;
- struct mld2_report *pmr;
- struct in6_addr addr_buf;
- const struct in6_addr *saddr;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- unsigned int size = mtu + hlen + tlen;
+ struct net *net = dev_net(dev);
+ const struct in6_addr *saddr;
+ struct in6_addr addr_buf;
+ struct mld2_report *pmr;
+ struct sk_buff *skb;
+ unsigned int size;
+ struct sock *sk;
int err;
- u8 ra[8] = { IPPROTO_ICMPV6, 0,
- IPV6_TLV_ROUTERALERT, 2, 0, 0,
- IPV6_TLV_PADN, 0 };
- /* we assume size > sizeof(ra) here */
+ sk = net->ipv6.igmp_sk;
+ /* we assume size > sizeof(ra) here
+ * Also try to not allocate high-order pages for big MTU
+ */
+ size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
skb = sock_alloc_send_skb(sk, size, 1, &err);
if (!skb)
return NULL;