diff options
author | Paolo Abeni <pabeni@redhat.com> | 2025-02-11 13:08:02 +0100 |
---|---|---|
committer | Paolo Abeni <pabeni@redhat.com> | 2025-02-11 13:08:02 +0100 |
commit | ae9b3c0e79bcc154f80f6e862d3085de31bcb3ce (patch) | |
tree | a30101f515f68abb751a449a79570c0458059f11 /include/net/tcp.h | |
parent | 812122783ae8b347633b7359a71cfba28ffdb66b (diff) | |
parent | 1280c26228bd7eb14bdecd67dedbdd871f8fdda5 (diff) |
Merge branch 'tcp-allow-to-reduce-max-rto'
Eric Dumazet says:
====================
tcp: allow to reduce max RTO
This is a followup of a discussion started 6 months ago
by Jason Xing.
Some applications want to lower the time between each
retransmit attempts.
TCP_KEEPINTVL and TCP_KEEPCNT socket options don't
work around the issue.
This series adds:
- a new TCP level socket option (TCP_RTO_MAX_MS)
- a new sysctl (/proc/sys/net/ipv4/tcp_rto_max_ms)
Admins and/or applications can now change the max rto value
at their own risk.
Link: https://lore.kernel.org/netdev/20240715033118.32322-1-kerneljasonxing@gmail.com/T/
====================
Link: https://patch.msgid.link/20250207152830.2527578-1-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index bb7edf0e72aa..7fd2d7fa4532 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -143,8 +143,9 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); #define TCP_DELACK_MIN 4U #define TCP_ATO_MIN 4U #endif -#define TCP_RTO_MAX ((unsigned)(120*HZ)) -#define TCP_RTO_MIN ((unsigned)(HZ/5)) +#define TCP_RTO_MAX_SEC 120 +#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ)) +#define TCP_RTO_MIN ((unsigned)(HZ / 5)) #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ @@ -740,10 +741,14 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu); int tcp_mss_to_mtu(struct sock *sk, int mss); void tcp_mtup_init(struct sock *sk); +static inline unsigned int tcp_rto_max(const struct sock *sk) +{ + return READ_ONCE(inet_csk(sk)->icsk_rto_max); +} + static inline void tcp_bound_rto(struct sock *sk) { - if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) - inet_csk(sk)->icsk_rto = TCP_RTO_MAX; + inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); } static inline u32 __tcp_set_rto(const struct tcp_sock *tp) @@ -1424,10 +1429,12 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk) static inline void tcp_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, - const unsigned long max_when) + bool pace_delay) { - inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk), - max_when); + if (pace_delay) + when += tcp_pacing_delay(sk); + inet_csk_reset_xmit_timer(sk, what, when, + tcp_rto_max(sk)); } /* Something is really bad, we could not queue an additional packet, @@ -1456,7 +1463,7 @@ static inline void tcp_check_probe_timer(struct sock *sk) { if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - tcp_probe0_base(sk), TCP_RTO_MAX); + tcp_probe0_base(sk), true); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) |