summaryrefslogtreecommitdiff
path: root/net/rxrpc
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-12-04 07:46:57 +0000
committerJakub Kicinski <kuba@kernel.org>2024-12-09 13:48:30 -0800
commit5c0ceba23bb47085d6c9c53bff08a29634ee4e7e (patch)
treee0c6028976e4ba3a1140fe9648e6967dac74c8cc /net/rxrpc
parenta3d7f46d983fb2ed528b9cceb457c067fe4277a2 (diff)
rxrpc: Fix the calculation and use of RTO
Make the following changes to the calculation and use of RTO: (1) Fix rxrpc_resend() to use the backed-off RTO value obtained by calling rxrpc_get_rto_backoff() rather than extracting the value itself. Without this, it may retransmit packets too early. (2) The RTO value being similar to the RTT causes a lot of extraneous resends because the RTT doesn't end up taking account of clearing out of the receive queue on the server. Worse, responses to PING-ACKs are made as fast as possible and so are less than the DATA-requested-ACK RTT and so skew the RTT down. Fix this by putting a lower bound on the RTO by adding 100ms to it and limiting the lower end to 200ms. Fixes: c410bf01933e ("rxrpc: Fix the excessive initial retransmission timeout") Fixes: 37473e416234 ("rxrpc: Clean up the resend algorithm") Signed-off-by: David Howells <dhowells@redhat.com> Suggested-by: Simon Wilkinson <sxw@auristor.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/call_event.c3
-rw-r--r--net/rxrpc/rtt.c2
2 files changed, 3 insertions, 2 deletions
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index f71773b18e22..4390c97e3ba6 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -103,7 +103,8 @@ void rxrpc_resend(struct rxrpc_call *call, rxrpc_serial_t ack_serial, bool ping_
.now = ktime_get_real(),
};
struct rxrpc_txqueue *tq = call->tx_queue;
- ktime_t lowest_xmit_ts = KTIME_MAX, rto = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
+ ktime_t lowest_xmit_ts = KTIME_MAX;
+ ktime_t rto = rxrpc_get_rto_backoff(call->peer, false);
bool unacked = false;
_enter("{%d,%d}", call->tx_bottom, call->tx_top);
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index e0b7d99854b4..3f1ec8e420a6 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -27,7 +27,7 @@ static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
static u32 rxrpc_bound_rto(u32 rto)
{
- return umin(rto, RXRPC_RTO_MAX);
+ return clamp(200000, rto + 100000, RXRPC_RTO_MAX);
}
/*