diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-12 07:54:15 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-12 07:54:15 -0800 |
commit | ce38aa9cbed3d109355b0169b520362c409c0541 (patch) | |
tree | 621511c34edd22ac30ca12f78f0d478245b4ccd7 /net/core/skbuff.c | |
parent | 69973b830859bc6529a7a0468ba0d80ee5117826 (diff) | |
parent | d84701ecbcd6ad63faa7a9c18ad670d1c4d561c0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Platform regulatory domain support for ath10k, from Bartosz
Markowski.
2) Centralize min/max MTU checking, thus removing tons of duplicated
code all of the the various drivers. From Jarod Wilson.
3) Support ingress actions in act_mirred, from Shmulik Ladkani.
4) Improve device adjacency tracking, from David Ahern.
5) Add support for LED triggers on PHY link state changes, from Zach
Brown.
6) Improve UDP socket memory accounting, from Paolo Abeni.
7) Set SK_MEM_QUANTUM to a fixed size of 4096, instead of PAGE_SIZE.
From Eric Dumazet.
8) Collapse TCP SKBs at retransmit time even if the right side SKB has
frags. Also from Eric Dumazet.
9) Add IP_RECVFRAGSIZE and IPV6_RECVFRAGSIZE cmsgs, from Willem de
Bruijn.
10) Support routing by UID, from Lorenzo Colitti.
11) Handle L3 domain binding (ie. VRF) for RAW sockets, from David
Ahern.
12) tcp_get_info() can run lockless, from Eric Dumazet.
13) 4-tuple UDP hashing in SFC driver, from Edward Cree.
14) Avoid reorders in GRO code, from Eric Dumazet.
15) IPV6 Segment Routing support, from David Lebrun.
16) Support MPLS push and pop for L3 packets in openvswitch, from Jiri
Benc.
17) Add LRU datastructure support for BPF, Martin KaFai Lau.
18) VF support in liquidio driver, from Raghu Vatsavayi.
19) Multiqueue support in alx driver, from Tobias Regnery.
20) Networking cgroup BPF support, from Daniel Mack.
21) TCP chronograph measurements, from Francis Yan.
22) XDP support for qed driver, from Yuval Mintz.
23) BPF based lwtunnels, from Thomas Graf.
24) Consistent FIB dumping to offloading drivers, from Ido Schimmel.
25) Many optimizations for UDP under high load, from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits)
netfilter: nft_counter: rework atomic dump and reset
e1000: use disable_hardirq() for e1000_netpoll()
i40e: don't truncate match_method assignment
net: ethernet: ti: netcp: add support of cpts
net: phy: phy drivers should not set SUPPORTED_[Asym_]Pause
net: l2tp: ppp: change PPPOL2TP_MSG_* => L2TP_MSG_*
net: l2tp: deprecate PPPOL2TP_MSG_* in favour of L2TP_MSG_*
net: l2tp: export debug flags to UAPI
net: ethernet: stmmac: remove private tx queue lock
net: ethernet: sxgbe: remove private tx queue lock
net: bridge: shorten ageing time on topology change
net: bridge: add helper to set topology change
net: bridge: add helper to offload ageing time
net: nicvf: use new api ethtool_{get|set}_link_ksettings
net: ethernet: ti: cpsw: sync rates for channels in dual emac mode
net: ethernet: ti: cpsw: re-split res only when speed is changed
net: ethernet: ti: cpsw: combine budget and weight split and check
net: ethernet: ti: cpsw: don't start queue twice
net: ethernet: ti: cpsw: use same macros to get active slave
net: mvneta: select GENERIC_ALLOCATOR
...
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 70 |
1 files changed, 60 insertions, 10 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1e3e0087245b..65a74e13c45b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -354,7 +354,7 @@ EXPORT_SYMBOL(build_skb); struct napi_alloc_cache { struct page_frag_cache page; - size_t skb_count; + unsigned int skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; }; @@ -2656,7 +2656,9 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) struct skb_frag_struct *fragfrom, *fragto; BUG_ON(shiftlen > skb->len); - BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ + + if (skb_headlen(skb)) + return 0; todo = shiftlen; from = 0; @@ -3712,21 +3714,29 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sock_queue_err_skb); +static bool is_icmp_err_skb(const struct sk_buff *skb) +{ + return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || + SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); +} + struct sk_buff *sock_dequeue_err_skb(struct sock *sk) { struct sk_buff_head *q = &sk->sk_error_queue; - struct sk_buff *skb, *skb_next; + struct sk_buff *skb, *skb_next = NULL; + bool icmp_next = false; unsigned long flags; - int err = 0; spin_lock_irqsave(&q->lock, flags); skb = __skb_dequeue(q); if (skb && (skb_next = skb_peek(q))) - err = SKB_EXT_ERR(skb_next)->ee.ee_errno; + icmp_next = is_icmp_err_skb(skb_next); spin_unlock_irqrestore(&q->lock, flags); - sk->sk_err = err; - if (err) + if (is_icmp_err_skb(skb) && !icmp_next) + sk->sk_err = 0; + + if (skb_next) sk->sk_error_report(sk); return skb; @@ -3838,10 +3848,18 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (!skb_may_tx_timestamp(sk, tsonly)) return; - if (tsonly) - skb = alloc_skb(0, GFP_ATOMIC); - else + if (tsonly) { +#ifdef CONFIG_INET + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && + sk->sk_protocol == IPPROTO_TCP && + sk->sk_type == SOCK_STREAM) + skb = tcp_get_timestamping_opt_stats(sk); + else +#endif + skb = alloc_skb(0, GFP_ATOMIC); + } else { skb = skb_clone(orig_skb, GFP_ATOMIC); + } if (!skb) return; @@ -4913,3 +4931,35 @@ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, return clone; } EXPORT_SYMBOL(pskb_extract); + +/** + * skb_condense - try to get rid of fragments/frag_list if possible + * @skb: buffer + * + * Can be used to save memory before skb is added to a busy queue. + * If packet has bytes in frags and enough tail room in skb->head, + * pull all of them, so that we can free the frags right now and adjust + * truesize. + * Notes: + * We do not reallocate skb->head thus can not fail. + * Caller must re-evaluate skb->truesize if needed. + */ +void skb_condense(struct sk_buff *skb) +{ + if (skb->data_len) { + if (skb->data_len > skb->end - skb->tail || + skb_cloned(skb)) + return; + + /* Nice, we can free page frag(s) right now */ + __pskb_pull_tail(skb, skb->data_len); + } + /* At this point, skb->truesize might be over estimated, + * because skb had a fragment, and fragments do not tell + * their truesize. + * When we pulled its content into skb->head, fragment + * was freed, but __pskb_pull_tail() could not possibly + * adjust skb->truesize, not knowing the frag truesize. + */ + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); +} |