diff options
author | Paolo Abeni <pabeni@redhat.com> | 2025-02-11 13:08:02 +0100 |
---|---|---|
committer | Paolo Abeni <pabeni@redhat.com> | 2025-02-11 13:08:02 +0100 |
commit | ae9b3c0e79bcc154f80f6e862d3085de31bcb3ce (patch) | |
tree | a30101f515f68abb751a449a79570c0458059f11 /net/ipv4/tcp_input.c | |
parent | 812122783ae8b347633b7359a71cfba28ffdb66b (diff) | |
parent | 1280c26228bd7eb14bdecd67dedbdd871f8fdda5 (diff) |
Merge branch 'tcp-allow-to-reduce-max-rto'
Eric Dumazet says:
====================
tcp: allow to reduce max RTO
This is a followup of a discussion started 6 months ago
by Jason Xing.
Some applications want to lower the time between each
retransmit attempts.
TCP_KEEPINTVL and TCP_KEEPCNT socket options don't
work around the issue.
This series adds:
- a new TCP level socket option (TCP_RTO_MAX_MS)
- a new sysctl (/proc/sys/net/ipv4/tcp_rto_max_ms)
Admins and/or applications can now change the max rto value
at their own risk.
Link: https://lore.kernel.org/netdev/20240715033118.32322-1-kerneljasonxing@gmail.com/T/
====================
Link: https://patch.msgid.link/20250207152830.2527578-1-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 19 |
1 files changed, 8 insertions, 11 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 286f15e4994a..4686783b70de 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2252,8 +2252,7 @@ static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag) unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - delay, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, false); *ack_flag &= ~FLAG_SET_XMIT_TIMER; return true; } @@ -3282,8 +3281,7 @@ void tcp_rearm_rto(struct sock *sk) */ rto = usecs_to_jiffies(max_t(int, delta_us, 1)); } - tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, - TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true); } } @@ -3560,10 +3558,10 @@ static void tcp_ack_probe(struct sock *sk) * This function is not for random using! */ } else { - unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); + unsigned long when = tcp_probe0_when(sk, tcp_rto_max(sk)); when = tcp_clamp_probe0_to_user_timeout(sk, when); - tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true); } } @@ -6469,9 +6467,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { /* Previous FIN/ACK or RST/ACK might be ignored. */ if (icsk->icsk_retransmits == 0) - inet_csk_reset_xmit_timer(sk, - ICSK_TIME_RETRANS, - TCP_TIMEOUT_MIN, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + TCP_TIMEOUT_MIN, false); SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE); goto reset_and_undo; } @@ -6586,8 +6583,8 @@ consume: */ inet_csk_schedule_ack(sk); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - TCP_DELACK_MAX, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, false); goto consume; } tcp_send_ack(sk); |