summaryrefslogtreecommitdiff
path: root/net/unix
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-03-15 20:57:46 +0000
committerDavid S. Miller <davem@davemloft.net>2023-03-17 08:25:05 +0000
commitcc04410af7de348234ac36a5f50c4ce416efdb4b (patch)
tree75841af050a1b03bf75faad8c2bc9e5806d3512b /net/unix
parent9ae8e5ad99b8ebcd3d3dd46075f3825e6f08f063 (diff)
af_unix: annotate lockless accesses to sk->sk_err
unix_poll() and unix_dgram_poll() read sk->sk_err without any lock held. Add relevant READ_ONCE()/WRITE_ONCE() annotations. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/af_unix.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0b0f18ecce44..fb31e8a4409e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -557,7 +557,7 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
- other->sk_err = ECONNRESET;
+ WRITE_ONCE(other->sk_err, ECONNRESET);
sk_error_report(other);
}
}
@@ -630,7 +630,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
/* No more writes */
skpair->sk_shutdown = SHUTDOWN_MASK;
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
- skpair->sk_err = ECONNRESET;
+ WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
@@ -3165,7 +3165,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
mask = 0;
/* exceptional events? */
- if (sk->sk_err)
+ if (READ_ONCE(sk->sk_err))
mask |= EPOLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
@@ -3208,7 +3208,8 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask = 0;
/* exceptional events? */
- if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ if (READ_ONCE(sk->sk_err) ||
+ !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);