diff options
author | David S. Miller <davem@davemloft.net> | 2021-11-16 13:10:35 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-11-16 13:10:35 +0000 |
commit | 6fcc06205c15bf1bb90896efdf5967028c154aba (patch) | |
tree | 9485fbb54c44a7933e5e7904aaebaedbe0f82907 /net/ipv4/tcp_input.c | |
parent | 3ad4b7c81a992463c29ae130332c217607fe4452 (diff) | |
parent | 43f51df4172955971ef5498f09308a9dc0291766 (diff) |
Merge branch 'tcp-optimizations'
Eric Dumazet says:
====================
tcp: optimizations for linux-5.17
Mostly small improvements in this series.
The notable change is in "defer skb freeing after
socket lock is released" in recvmsg() (and RX zerocopy)
The idea is to try to let skb freeing to BH handler,
whenever possible, or at least perform the freeing
outside of the socket lock section, for much improved
performance. This idea can probably be extended
to other protocols.
Tests on a 100Gbit NIC
Max throughput for one TCP_STREAM flow, over 10 runs.
MTU : 1500 (1428 bytes of TCP payload per MSS)
Before: 55 Gbit
After: 66 Gbit
MTU : 4096+ (4096 bytes of TCP payload, plus TCP/IPv6 headers)
Before: 82 Gbit
After: 95 Gbit
====================
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 246ab7b5e857..3658b9c3dd2b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5591,7 +5591,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) } } - tp->urg_data = TCP_URG_NOTYET; + WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); WRITE_ONCE(tp->urg_seq, ptr); /* Disable header prediction. */ @@ -5604,11 +5604,11 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ - if (th->urg) + if (unlikely(th->urg)) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ - if (tp->urg_data == TCP_URG_NOTYET) { + if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; @@ -5617,7 +5617,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); - tp->urg_data = TCP_URG_VALID | tmp; + WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); } |