summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/tcp_ipv6.c11
4 files changed, 13 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cc072d2cfcd8..8b5b6ca6617d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3874,7 +3874,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
- sk->sk_err_soft = 0;
+ WRITE_ONCE(sk->sk_err_soft, 0);
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_jiffies32;
if (!prior_packets)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ea370afa70ed..4f6894469b62 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -361,7 +361,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
- sk->sk_err_soft = EMSGSIZE;
+ WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
mtu = dst_mtu(dst);
@@ -602,7 +602,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
tcp_done(sk);
} else {
- sk->sk_err_soft = err;
+ WRITE_ONCE(sk->sk_err_soft, err);
}
goto out;
}
@@ -628,7 +628,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
sk->sk_err = err;
sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->sk_err_soft = err;
+ WRITE_ONCE(sk->sk_err_soft, err);
}
out:
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index cb79127f45c3..8823e2182713 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -67,7 +67,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
- sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+ sk->sk_err = READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT;
sk_error_report(sk);
tcp_write_queue_purge(sk);
@@ -110,7 +110,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
- if (sk->sk_err_soft)
+ if (READ_ONCE(sk->sk_err_soft))
shift++;
if (tcp_check_oom(sk, shift)) {
@@ -146,7 +146,7 @@ static int tcp_orphan_retries(struct sock *sk, bool alive)
int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
/* We know from an ICMP that something is wrong. */
- if (sk->sk_err_soft && !alive)
+ if (READ_ONCE(sk->sk_err_soft) && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1bf93b61aa06..dc963eebc668 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -497,8 +497,9 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
- } else
- sk->sk_err_soft = err;
+ } else {
+ WRITE_ONCE(sk->sk_err_soft, err);
+ }
goto out;
case TCP_LISTEN:
break;
@@ -514,9 +515,9 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk_error_report(sk);
- } else
- sk->sk_err_soft = err;
-
+ } else {
+ WRITE_ONCE(sk->sk_err_soft, err);
+ }
out:
bh_unlock_sock(sk);
sock_put(sk);