summaryrefslogtreecommitdiff
path: root/net/unix
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/Kconfig4
-rw-r--r--net/unix/af_unix.c1054
-rw-r--r--net/unix/af_unix.h72
-rw-r--r--net/unix/diag.c77
-rw-r--r--net/unix/garbage.c639
-rw-r--r--net/unix/sysctl_net_unix.c9
-rw-r--r--net/unix/unix_bpf.c8
7 files changed, 1140 insertions, 723 deletions
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 8b5d04210d7c..6f1783c1659b 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -17,9 +17,11 @@ config UNIX
Say Y unless you know what you are doing.
config AF_UNIX_OOB
- bool
+ bool "UNIX: out-of-bound messages"
depends on UNIX
default y
+ help
+ Support for MSG_OOB in UNIX domain sockets. If unsure, say Y.
config UNIX_DIAG
tristate "UNIX: socket monitoring interface"
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9a6ad5974dff..52b155123985 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -77,46 +77,40 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/stat.h>
+#include <linux/bpf-cgroup.h>
+#include <linux/btf_ids.h>
#include <linux/dcache.h>
-#include <linux/namei.h>
-#include <linux/socket.h>
-#include <linux/un.h>
+#include <linux/errno.h>
#include <linux/fcntl.h>
+#include <linux/file.h>
#include <linux/filter.h>
-#include <linux/termios.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/in.h>
#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/tcp_states.h>
-#include <net/af_unix.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <net/scm.h>
+#include <linux/fs_struct.h>
#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/rtnetlink.h>
+#include <linux/kernel.h>
#include <linux/mount.h>
-#include <net/checksum.h>
+#include <linux/namei.h>
+#include <linux/net.h>
+#include <linux/pidfs.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/sched/signal.h>
#include <linux/security.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
#include <linux/splice.h>
-#include <linux/freezer.h>
-#include <linux/file.h>
-#include <linux/btf_ids.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <net/af_unix.h>
+#include <net/net_namespace.h>
+#include <net/scm.h>
+#include <net/tcp_states.h>
+#include <uapi/linux/sockios.h>
+#include <uapi/linux/termios.h>
+
+#include "af_unix.h"
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
@@ -126,6 +120,81 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
* hash table is protected with spinlock.
* each socket state is protected by separate spinlock.
*/
+#ifdef CONFIG_PROVE_LOCKING
+#define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r)))
+
+static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
+ const struct lockdep_map *b)
+{
+ return cmp_ptr(a, b);
+}
+
+static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct unix_sock *a, *b;
+
+ a = container_of(_a, struct unix_sock, lock.dep_map);
+ b = container_of(_b, struct unix_sock, lock.dep_map);
+
+ if (a->sk.sk_state == TCP_LISTEN) {
+ /* unix_stream_connect(): Before the 2nd unix_state_lock(),
+ *
+ * 1. a is TCP_LISTEN.
+ * 2. b is not a.
+ * 3. concurrent connect(b -> a) must fail.
+ *
+ * Except for 2. & 3., the b's state can be any possible
+ * value due to concurrent connect() or listen().
+ *
+ * 2. is detected in debug_spin_lock_before(), and 3. cannot
+ * be expressed as lock_cmp_fn.
+ */
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ case TCP_LISTEN:
+ return -1;
+ default:
+ /* Invalid case. */
+ return 0;
+ }
+ }
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk.sk_state == TCP_LISTEN) {
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+
+ /* unix_state_double_lock(): ascending address order. */
+ return cmp_ptr(a, b);
+}
+
+static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct sock *a, *b;
+
+ a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
+ b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
+
+ /* unix_collect_skb(): listener -> embryo order. */
+ if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
+ return -1;
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
+ return 1;
+
+ return 0;
+}
+#endif
static unsigned int unix_unbound_hash(struct sock *sk)
{
@@ -168,7 +237,7 @@ static void unix_table_double_lock(struct net *net,
swap(hash1, hash2);
spin_lock(&net->unx.table.locks[hash1]);
- spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
+ spin_lock(&net->unx.table.locks[hash2]);
}
static void unix_table_double_unlock(struct net *net,
@@ -211,25 +280,14 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
}
#endif /* CONFIG_SECURITY_NETWORK */
-static inline int unix_our_peer(struct sock *sk, struct sock *osk)
-{
- return unix_peer(osk) == sk;
-}
-
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
- return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
-}
-
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ return !unix_peer(osk) || unix_peer(osk) == sk;
}
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +588,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
{
- return sk->sk_state != TCP_LISTEN &&
- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return state != TCP_LISTEN &&
+ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}
static void unix_write_space(struct sock *sk)
@@ -541,12 +599,12 @@ static void unix_write_space(struct sock *sk)
struct socket_wq *wq;
rcu_read_lock();
- if (unix_writable(sk)) {
+ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
@@ -558,7 +616,9 @@ static void unix_write_space(struct sock *sk)
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
if (!skb_queue_empty(&sk->sk_receive_queue)) {
- skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge_reason(&sk->sk_receive_queue,
+ SKB_DROP_REASON_UNIX_DISCONNECT);
+
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
@@ -570,14 +630,13 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
- other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
- skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
@@ -587,6 +646,9 @@ static void unix_sock_destructor(struct sock *sk)
return;
}
+ if (sk->sk_peer_pid)
+ pidfs_put_pid(sk->sk_peer_pid);
+
if (u->addr)
unix_release_addr(u->addr);
@@ -598,6 +660,11 @@ static void unix_sock_destructor(struct sock *sk)
#endif
}
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
+
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
@@ -617,7 +684,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
@@ -625,20 +692,23 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
- }
+ u->oob_skb = NULL;
#endif
wake_up_interruptible_all(&u->peer_wait);
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (skb && !unix_skb_len(skb))
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+#endif
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ if (skb || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -654,9 +724,9 @@ static void unix_release_sock(struct sock *sk, int embrion)
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
- /* passed fds are erased in the kfree_skb hook */
- UNIXCB(skb).consumed = skb->len;
- kfree_skb(skb);
+
+ /* passed fds are erased in the kfree_skb hook */
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
}
if (path.dentry)
@@ -681,7 +751,48 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_gc(); /* Garbage collect fds */
}
-static void init_peercred(struct sock *sk)
+struct unix_peercred {
+ struct pid *peer_pid;
+ const struct cred *peer_cred;
+};
+
+static inline int prepare_peercred(struct unix_peercred *peercred)
+{
+ struct pid *pid;
+ int err;
+
+ pid = task_tgid(current);
+ err = pidfs_register_pid(pid);
+ if (likely(!err)) {
+ peercred->peer_pid = get_pid(pid);
+ peercred->peer_cred = get_current_cred();
+ }
+ return err;
+}
+
+static void drop_peercred(struct unix_peercred *peercred)
+{
+ const struct cred *cred = NULL;
+ struct pid *pid = NULL;
+
+ might_sleep();
+
+ swap(peercred->peer_pid, pid);
+ swap(peercred->peer_cred, cred);
+
+ pidfs_put_pid(pid);
+ put_pid(pid);
+ put_cred(cred);
+}
+
+static inline void init_peercred(struct sock *sk,
+ const struct unix_peercred *peercred)
+{
+ sk->sk_peer_pid = peercred->peer_pid;
+ sk->sk_peer_cred = peercred->peer_cred;
+}
+
+static void update_peercred(struct sock *sk, struct unix_peercred *peercred)
{
const struct cred *old_cred;
struct pid *old_pid;
@@ -689,36 +800,27 @@ static void init_peercred(struct sock *sk)
spin_lock(&sk->sk_peer_lock);
old_pid = sk->sk_peer_pid;
old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(task_tgid(current));
- sk->sk_peer_cred = get_current_cred();
+ init_peercred(sk, peercred);
spin_unlock(&sk->sk_peer_lock);
- put_pid(old_pid);
- put_cred(old_cred);
+ peercred->peer_pid = old_pid;
+ peercred->peer_cred = old_cred;
}
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- const struct cred *old_cred;
- struct pid *old_pid;
+ lockdep_assert_held(&unix_sk(peersk)->lock);
- if (sk < peersk) {
- spin_lock(&sk->sk_peer_lock);
- spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock(&peersk->sk_peer_lock);
- spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- }
- old_pid = sk->sk_peer_pid;
- old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ pidfs_get_pid(sk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
-
spin_unlock(&sk->sk_peer_lock);
- spin_unlock(&peersk->sk_peer_lock);
+}
- put_pid(old_pid);
- put_cred(old_cred);
+static bool unix_may_passcred(const struct sock *sk)
+{
+ return sk->sk_scm_credentials || sk->sk_scm_pidfd;
}
static int unix_listen(struct socket *sock, int backlog)
@@ -726,26 +828,32 @@ static int unix_listen(struct socket *sock, int backlog)
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
+ struct unix_peercred peercred = {};
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
- if (!u->addr)
+ if (!READ_ONCE(u->addr))
goto out; /* No listens on an unbound socket */
+ err = prepare_peercred(&peercred);
+ if (err)
+ goto out;
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
- sk->sk_state = TCP_LISTEN;
+ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
/* set credentials so connect can copy them */
- init_peercred(sk);
+ update_peercred(sk, &peercred);
err = 0;
out_unlock:
unix_state_unlock(sk);
+ drop_peercred(&peercred);
out:
return err;
}
@@ -755,7 +863,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
-static int unix_accept(struct socket *, struct socket *, int, bool);
+static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
static int unix_getname(struct socket *, struct sockaddr *, int);
static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
static __poll_t unix_dgram_poll(struct file *, struct socket *,
@@ -905,13 +1013,6 @@ static void unix_close(struct sock *sk, long timeout)
*/
}
-static void unix_unhash(struct sock *sk)
-{
- /* Nothing to do here, unix socket does not need a ->unhash().
- * This is merely for sockmap.
- */
-}
-
static bool unix_bpf_bypass_getsockopt(int level, int optname)
{
if (level == SOL_SOCKET) {
@@ -942,7 +1043,6 @@ struct proto unix_stream_proto = {
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
.close = unix_close,
- .unhash = unix_unhash,
.bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = unix_stream_bpf_update_proto,
@@ -973,17 +1073,21 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sock_init_data(sock, sk);
+ sk->sk_scm_rights = 1;
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
+ lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
+
u = unix_sk(sk);
- u->inflight = 0;
+ u->listener = NULL;
+ u->vertex = NULL;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- INIT_LIST_HEAD(&u->link);
+ lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
@@ -1053,7 +1157,7 @@ static int unix_release(struct socket *sock)
}
static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
- int type)
+ int type, int flags)
{
struct inode *inode;
struct path path;
@@ -1061,13 +1165,39 @@ static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
int err;
unix_mkname_bsd(sunaddr, addr_len);
- err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
- if (err)
- goto fail;
- err = path_permission(&path, MAY_WRITE);
- if (err)
- goto path_put;
+ if (flags & SOCK_COREDUMP) {
+ const struct cred *cred;
+ struct cred *kcred;
+ struct path root;
+
+ kcred = prepare_kernel_cred(&init_task);
+ if (!kcred) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+
+ cred = override_creds(kcred);
+ err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path,
+ LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS |
+ LOOKUP_NO_MAGICLINKS, &path);
+ put_cred(revert_creds(cred));
+ path_put(&root);
+ if (err)
+ goto fail;
+ } else {
+ err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
+ if (err)
+ goto fail;
+
+ err = path_permission(&path, MAY_WRITE);
+ if (err)
+ goto path_put;
+ }
err = -ECONNREFUSED;
inode = d_backing_inode(path.dentry);
@@ -1117,12 +1247,12 @@ static struct sock *unix_find_abstract(struct net *net,
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunaddr,
- int addr_len, int type)
+ int addr_len, int type, int flags)
{
struct sock *sk;
if (sunaddr->sun_path[0])
- sk = unix_find_bsd(sunaddr, addr_len, type);
+ sk = unix_find_bsd(sunaddr, addr_len, type, flags);
else
sk = unix_find_abstract(net, sunaddr, addr_len, type);
@@ -1131,8 +1261,8 @@ static struct sock *unix_find_other(struct net *net,
static int unix_autobind(struct sock *sk)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
@@ -1155,6 +1285,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ old_hash = sk->sk_hash;
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
@@ -1195,8 +1326,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
@@ -1234,6 +1365,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
+ old_hash = sk->sk_hash;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
@@ -1261,8 +1393,8 @@ out:
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1280,6 +1412,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
+ old_hash = sk->sk_hash;
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
@@ -1329,11 +1462,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
unix_state_lock(sk1);
return;
}
+
if (sk1 > sk2)
swap(sk1, sk2);
unix_state_lock(sk1);
- unix_state_lock_nested(sk2, U_LOCK_SECOND);
+ unix_state_lock(sk2);
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1367,16 +1501,14 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if (err)
goto out;
- if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
- !unix_sk(sk)->addr) {
+ if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
restart:
- other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
+ other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0);
if (IS_ERR(other)) {
err = PTR_ERR(other);
goto out;
@@ -1399,7 +1531,8 @@ restart:
if (err)
goto out_unlock;
- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1416,13 +1549,20 @@ restart:
unix_peer(sk) = other;
if (!other)
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
- if (other != old_peer)
+ if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);
+
+ unix_state_lock(old_peer);
+ if (!unix_peer(old_peer))
+ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+ unix_state_unlock(old_peer);
+ }
+
sock_put(old_peer);
} else {
unix_peer(sk) = other;
@@ -1439,7 +1579,6 @@ out:
}
static long unix_wait_for_peer(struct sock *other, long timeo)
- __releases(&unix_sk(other)->lock)
{
struct unix_sock *u = unix_sk(other);
int sched;
@@ -1466,11 +1605,12 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
+ struct unix_peercred peercred = {};
struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
+ unsigned char state;
long timeo;
int err;
- int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1480,8 +1620,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
if (err)
goto out;
- if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1490,35 +1629,36 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
- If we will make it after state is locked,
- we will have to recheck all again in any case.
+ * If we will make it after state is locked,
+ * we will have to recheck all again in any case.
*/
/* create new sock for complete connection */
newsk = unix_create1(net, NULL, 0, sock->type);
if (IS_ERR(newsk)) {
err = PTR_ERR(newsk);
- newsk = NULL;
goto out;
}
- err = -ENOMEM;
+ err = prepare_peercred(&peercred);
+ if (err)
+ goto out;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
- if (skb == NULL)
- goto out;
+ if (!skb) {
+ err = -ENOMEM;
+ goto out_free_sk;
+ }
restart:
/* Find listening sock. */
- other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
+ other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags);
if (IS_ERR(other)) {
err = PTR_ERR(other);
- other = NULL;
- goto out;
+ goto out_free_skb;
}
- /* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
@@ -1528,59 +1668,43 @@ restart:
goto restart;
}
- err = -ECONNREFUSED;
- if (other->sk_state != TCP_LISTEN)
- goto out_unlock;
- if (other->sk_shutdown & RCV_SHUTDOWN)
+ if (other->sk_state != TCP_LISTEN ||
+ other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -ECONNREFUSED;
goto out_unlock;
+ }
- if (unix_recvq_full(other)) {
- err = -EAGAIN;
- if (!timeo)
+ if (unix_recvq_full_lockless(other)) {
+ if (!timeo) {
+ err = -EAGAIN;
goto out_unlock;
+ }
timeo = unix_wait_for_peer(other, timeo);
+ sock_put(other);
err = sock_intr_errno(timeo);
if (signal_pending(current))
- goto out;
- sock_put(other);
+ goto out_free_skb;
+
goto restart;
}
- /* Latch our state.
-
- It is tricky place. We need to grab our state lock and cannot
- drop lock on peer. It is dangerous because deadlock is
- possible. Connect to self case and simultaneous
- attempt to connect are eliminated by checking socket
- state. other is TCP_LISTEN, if sk is TCP_LISTEN we
- check this before attempt to grab lock.
-
- Well, and we have to recheck the state after socket locked.
+ /* self connect and simultaneous connect are eliminated
+ * by rejecting TCP_LISTEN socket to avoid deadlock.
*/
- st = sk->sk_state;
-
- switch (st) {
- case TCP_CLOSE:
- /* This is ok... continue with connect */
- break;
- case TCP_ESTABLISHED:
- /* Socket is already connected */
- err = -EISCONN;
- goto out_unlock;
- default:
- err = -EINVAL;
+ state = READ_ONCE(sk->sk_state);
+ if (unlikely(state != TCP_CLOSE)) {
+ err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
goto out_unlock;
}
- unix_state_lock_nested(sk, U_LOCK_SECOND);
+ unix_state_lock(sk);
- if (sk->sk_state != st) {
+ if (unlikely(sk->sk_state != TCP_CLOSE)) {
+ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
unix_state_unlock(sk);
- unix_state_unlock(other);
- sock_put(other);
- goto restart;
+ goto out_unlock;
}
err = security_unix_stream_connect(sk, other, newsk);
@@ -1592,11 +1716,14 @@ restart:
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
- unix_peer(newsk) = sk;
- newsk->sk_state = TCP_ESTABLISHED;
- newsk->sk_type = sk->sk_type;
- init_peercred(newsk);
+ unix_peer(newsk) = sk;
+ newsk->sk_state = TCP_ESTABLISHED;
+ newsk->sk_type = sk->sk_type;
+ newsk->sk_scm_recv_flags = other->sk_scm_recv_flags;
+ init_peercred(newsk, &peercred);
+
newu = unix_sk(newsk);
+ newu->listener = other;
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
@@ -1628,7 +1755,7 @@ restart:
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1646,29 +1773,40 @@ restart:
return 0;
out_unlock:
- if (other)
- unix_state_unlock(other);
-
+ unix_state_unlock(other);
+ sock_put(other);
+out_free_skb:
+ consume_skb(skb);
+out_free_sk:
+ unix_release_sock(newsk, 0);
out:
- kfree_skb(skb);
- if (newsk)
- unix_release_sock(newsk, 0);
- if (other)
- sock_put(other);
+ drop_peercred(&peercred);
return err;
}
static int unix_socketpair(struct socket *socka, struct socket *sockb)
{
+ struct unix_peercred ska_peercred = {}, skb_peercred = {};
struct sock *ska = socka->sk, *skb = sockb->sk;
+ int err;
+
+ err = prepare_peercred(&ska_peercred);
+ if (err)
+ return err;
+
+ err = prepare_peercred(&skb_peercred);
+ if (err) {
+ drop_peercred(&ska_peercred);
+ return err;
+ }
/* Join our sockets back to back */
sock_hold(ska);
sock_hold(skb);
unix_peer(ska) = skb;
unix_peer(skb) = ska;
- init_peercred(ska);
- init_peercred(skb);
+ init_peercred(ska, &ska_peercred);
+ init_peercred(skb, &skb_peercred);
ska->sk_state = TCP_ESTABLISHED;
skb->sk_state = TCP_ESTABLISHED;
@@ -1677,43 +1815,31 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
return 0;
}
-static void unix_sock_inherit_flags(const struct socket *old,
- struct socket *new)
-{
- if (test_bit(SOCK_PASSCRED, &old->flags))
- set_bit(SOCK_PASSCRED, &new->flags);
- if (test_bit(SOCK_PASSPIDFD, &old->flags))
- set_bit(SOCK_PASSPIDFD, &new->flags);
- if (test_bit(SOCK_PASSSEC, &old->flags))
- set_bit(SOCK_PASSSEC, &new->flags);
-}
-
-static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int unix_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
- struct sock *tsk;
struct sk_buff *skb;
- int err;
+ struct sock *tsk;
- err = -EOPNOTSUPP;
+ arg->err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
- err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
+ arg->err = -EINVAL;
+ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
- skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
- &err);
+ skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ &arg->err);
if (!skb) {
/* This means receive shutdown. */
- if (err == 0)
- err = -EINVAL;
+ if (arg->err == 0)
+ arg->err = -EINVAL;
goto out;
}
@@ -1723,14 +1849,14 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
/* attach accepted sock to socket */
unix_state_lock(tsk);
+ unix_update_edges(unix_sk(tsk));
newsock->state = SS_CONNECTED;
- unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
return 0;
out:
- return err;
+ return arg->err;
}
@@ -1789,81 +1915,29 @@ static inline bool too_many_unix_fds(struct task_struct *p)
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
- int i;
-
if (too_many_unix_fds(current))
return -ETOOMANYREFS;
- /* Need to duplicate file references for the sake of garbage
- * collection. Otherwise a socket in the fps might become a
- * candidate for GC while the skb is not yet queued.
- */
- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
- if (!UNIXCB(skb).fp)
- return -ENOMEM;
+ UNIXCB(skb).fp = scm->fp;
+ scm->fp = NULL;
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->user, scm->fp->fp[i]);
+ if (unix_prepare_fpl(UNIXCB(skb).fp))
+ return -ENOMEM;
return 0;
}
static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
- int i;
-
scm->fp = UNIXCB(skb).fp;
UNIXCB(skb).fp = NULL;
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+ unix_destroy_fpl(scm->fp);
}
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
-
- /*
- * Garbage collection of unix sockets starts by selecting a set of
- * candidate sockets which have reference only from being in flight
- * (total_refs == inflight_refs). This condition is checked once during
- * the candidate collection phase, and candidates are marked as such, so
- * that non-candidates can later be ignored. While inflight_refs is
- * protected by unix_gc_lock, total_refs (file count) is not, hence this
- * is an instantaneous decision.
- *
- * Once a candidate, however, the socket must not be reinstalled into a
- * file descriptor while the garbage collection is in progress.
- *
- * If the above conditions are met, then the directed graph of
- * candidates (*) does not change while unix_gc_lock is held.
- *
- * Any operations that changes the file count through file descriptors
- * (dup, close, sendmsg) does not change the graph since candidates are
- * not installed in fds.
- *
- * Dequeing a candidate via recvmsg would install it into an fd, but
- * that takes unix_gc_lock to decrement the inflight count, so it's
- * serialized with garbage collection.
- *
- * MSG_PEEK is special in that it does not change the inflight count,
- * yet does install the socket into an fd. The following lock/unlock
- * pair is to ensure serialization with garbage collection. It must be
- * done between incrementing the file count and installing the file into
- * an fd.
- *
- * If garbage collection starts after the barrier provided by the
- * lock/unlock, then it will see the elevated refcount and not mark this
- * as a candidate. If a garbage collection is already in progress
- * before the file count was incremented, then the lock/unlock pair will
- * ensure that garbage collection is finished before progressing to
- * installing the fd.
- *
- * (*) A -> B where B is on the queue of A or B is on the queue of C
- * which is on the queue of listening socket A.
- */
- spin_lock(&unix_gc_lock);
- spin_unlock(&unix_gc_lock);
}
static void unix_destruct_scm(struct sk_buff *skb)
@@ -1885,7 +1959,7 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
{
int err = 0;
- UNIXCB(skb).pid = get_pid(scm->pid);
+ UNIXCB(skb).pid = get_pid(scm->pid);
UNIXCB(skb).uid = scm->creds.uid;
UNIXCB(skb).gid = scm->creds.gid;
UNIXCB(skb).fp = NULL;
@@ -1897,28 +1971,20 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
return err;
}
-static bool unix_passcred_enabled(const struct socket *sock,
- const struct sock *other)
-{
- return test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags) ||
- !other->sk_socket ||
- test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
- test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
-}
-
/*
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
* asserted SOCK_PASSCRED.
*/
-static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
- const struct sock *other)
+static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
+ const struct sock *other)
{
if (UNIXCB(skb).pid)
return;
- if (unix_passcred_enabled(sock, other)) {
- UNIXCB(skb).pid = get_pid(task_tgid(current));
+
+ if (unix_may_passcred(sk) || unix_may_passcred(other) ||
+ !other->sk_socket) {
+ UNIXCB(skb).pid = get_pid(task_tgid(current));
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
}
@@ -1937,8 +2003,10 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count))
+ if (unlikely(fp && fp->count)) {
atomic_add(fp->count, &u->scm_stat.nr_fds);
+ unix_add_edges(fp, u);
+ }
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@@ -1946,8 +2014,10 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count))
+ if (unlikely(fp && fp->count)) {
atomic_sub(fp->count, &u->scm_stat.nr_fds);
+ unix_del_edges(fp);
+ }
}
/*
@@ -1957,7 +2027,6 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
- DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *sk = sock->sk, *other = NULL;
struct unix_sock *u = unix_sk(sk);
struct scm_cookie scm;
@@ -1973,12 +2042,13 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
wait_for_unix_gc(scm.fp);
- err = -EOPNOTSUPP;
- if (msg->msg_flags&MSG_OOB)
+ if (msg->msg_flags & MSG_OOB) {
+ err = -EOPNOTSUPP;
goto out;
+ }
if (msg->msg_namelen) {
- err = unix_validate_addr(sunaddr, msg->msg_namelen);
+ err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
if (err)
goto out;
@@ -1988,24 +2058,18 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
NULL);
if (err)
goto out;
- } else {
- sunaddr = NULL;
- err = -ENOTCONN;
- other = unix_peer_get(sk);
- if (!other)
- goto out;
}
- if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
- err = -EMSGSIZE;
- if (len > sk->sk_sndbuf - 32)
+ if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
+ err = -EMSGSIZE;
goto out;
+ }
if (len > SKB_MAX_ALLOC) {
data_len = min_t(size_t,
@@ -2019,7 +2083,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
PAGE_ALLOC_COSTLY_ORDER);
- if (skb == NULL)
+ if (!skb)
goto out;
err = unix_scm_to_skb(&scm, skb, true);
@@ -2035,17 +2099,18 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
-restart:
- if (!other) {
- err = -ECONNRESET;
- if (sunaddr == NULL)
- goto out_free;
-
- other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
- sk->sk_type);
+ if (msg->msg_namelen) {
+lookup:
+ other = unix_find_other(sock_net(sk), msg->msg_name,
+ msg->msg_namelen, sk->sk_type, 0);
if (IS_ERR(other)) {
err = PTR_ERR(other);
- other = NULL;
+ goto out_free;
+ }
+ } else {
+ other = unix_peer_get(sk);
+ if (!other) {
+ err = -ENOTCONN;
goto out_free;
}
}
@@ -2053,58 +2118,69 @@ restart:
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
- goto out_free;
+ goto out_sock_put;
}
+restart:
sk_locked = 0;
unix_state_lock(other);
restart_locked:
- err = -EPERM;
- if (!unix_may_send(sk, other))
+
+ if (!unix_may_send(sk, other)) {
+ err = -EPERM;
goto out_unlock;
+ }
if (unlikely(sock_flag(other, SOCK_DEAD))) {
- /*
- * Check with 1003.1g - what should
- * datagram error
- */
- unix_state_unlock(other);
- sock_put(other);
+ /* Check with 1003.1g - what should datagram error */
- if (!sk_locked)
- unix_state_lock(sk);
+ unix_state_unlock(other);
- err = 0;
if (sk->sk_type == SOCK_SEQPACKET) {
/* We are here only when racing with unix_release_sock()
* is clearing @other. Never change state to TCP_CLOSE
* unlike SOCK_DGRAM wants.
*/
- unix_state_unlock(sk);
err = -EPIPE;
- } else if (unix_peer(sk) == other) {
+ goto out_sock_put;
+ }
+
+ if (!sk_locked)
+ unix_state_lock(sk);
+
+ if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
- } else {
- unix_state_unlock(sk);
+ goto out_sock_put;
}
- other = NULL;
- if (err)
- goto out_free;
- goto restart;
+ unix_state_unlock(sk);
+
+ if (!msg->msg_namelen) {
+ err = -ECONNRESET;
+ goto out_sock_put;
+ }
+
+ sock_put(other);
+ goto lookup;
}
- err = -EPIPE;
- if (other->sk_shutdown & RCV_SHUTDOWN)
+ if (other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -EPIPE;
+ goto out_unlock;
+ }
+
+ if (UNIXCB(skb).fp && !other->sk_scm_rights) {
+ err = -EPERM;
goto out_unlock;
+ }
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
@@ -2124,7 +2200,7 @@ restart_locked:
err = sock_intr_errno(timeo);
if (signal_pending(current))
- goto out_free;
+ goto out_sock_put;
goto restart;
}
@@ -2152,7 +2228,8 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
- maybe_add_creds(skb, sock, other);
+
+ unix_maybe_add_creds(skb, sk, other);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
@@ -2165,11 +2242,11 @@ out_unlock:
if (sk_locked)
unix_state_unlock(sk);
unix_state_unlock(other);
+out_sock_put:
+ sock_put(other);
out_free:
- kfree_skb(skb);
+ consume_skb(skb);
out:
- if (other)
- sock_put(other);
scm_destroy(&scm);
return err;
}
@@ -2180,54 +2257,58 @@ out:
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
-static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
+static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
struct scm_cookie *scm, bool fds_sent)
{
struct unix_sock *ousk = unix_sk(other);
struct sk_buff *skb;
- int err = 0;
+ int err;
- skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
err = unix_scm_to_skb(scm, skb, !fds_sent);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
+ if (err < 0)
+ goto out;
+
skb_put(skb, 1);
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
- if (err) {
- kfree_skb(skb);
- return err;
- }
+ if (err)
+ goto out;
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN)) {
- unix_state_unlock(other);
- kfree_skb(skb);
- return -EPIPE;
+ err = -EPIPE;
+ goto out_unlock;
}
- maybe_add_creds(skb, sock, other);
- skb_get(skb);
+ if (UNIXCB(skb).fp && !other->sk_scm_rights) {
+ err = -EPERM;
+ goto out_unlock;
+ }
- if (ousk->oob_skb)
- consume_skb(ousk->oob_skb);
+ unix_maybe_add_creds(skb, sk, other);
+ scm_stat_add(other, skb);
+ spin_lock(&other->sk_receive_queue.lock);
WRITE_ONCE(ousk->oob_skb, skb);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
- scm_stat_add(other, skb);
- skb_queue_tail(&other->sk_receive_queue, skb);
sk_send_sigurg(other);
unix_state_unlock(other);
other->sk_data_ready(other);
+ return 0;
+out_unlock:
+ unix_state_unlock(other);
+out:
+ consume_skb(skb);
return err;
}
#endif
@@ -2236,13 +2317,11 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
+ struct sk_buff *skb = NULL;
struct sock *other = NULL;
- int err, size;
- struct sk_buff *skb;
- int sent = 0;
struct scm_cookie scm;
bool fds_sent = false;
- int data_len;
+ int err, sent = 0;
err = scm_send(sock, msg, &scm, false);
if (err < 0)
@@ -2250,8 +2329,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
wait_for_unix_gc(scm.fp);
- err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) {
+ err = -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (len)
len--;
@@ -2261,20 +2340,22 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (msg->msg_namelen) {
- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
- err = -ENOTCONN;
other = unix_peer(sk);
- if (!other)
+ if (!other) {
+ err = -ENOTCONN;
goto out_err;
+ }
}
- if (sk->sk_shutdown & SEND_SHUTDOWN)
- goto pipe_err;
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ goto out_pipe;
while (sent < len) {
- size = len - sent;
+ int size = len - sent;
+ int data_len;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
skb = sock_alloc_send_pskb(sk, 0, 0,
@@ -2282,7 +2363,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2300,19 +2381,18 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(&scm, skb, !fds_sent);
- if (err < 0) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err < 0)
+ goto out_free;
+
fds_sent = true;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
err = skb_splice_from_iter(skb, &msg->msg_iter, size,
sk->sk_allocation);
- if (err < 0) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err < 0)
+ goto out_free;
+
size = err;
refcount_add(size, &sk->sk_wmem_alloc);
} else {
@@ -2320,19 +2400,23 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
skb->data_len = data_len;
skb->len = size;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
- if (err) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err)
+ goto out_free;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
- goto pipe_err_free;
+ goto out_pipe_unlock;
- maybe_add_creds(skb, sock, other);
+ if (UNIXCB(skb).fp && !other->sk_scm_rights) {
+ unix_state_unlock(other);
+ err = -EPERM;
+ goto out_free;
+ }
+
+ unix_maybe_add_creds(skb, sk, other);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
@@ -2342,7 +2426,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (msg->msg_flags & MSG_OOB) {
- err = queue_oob(sock, msg, other, &scm, fds_sent);
+ err = queue_oob(sk, msg, other, &scm, fds_sent);
if (err)
goto out_err;
sent++;
@@ -2353,13 +2437,14 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
return sent;
-pipe_err_free:
+out_pipe_unlock:
unix_state_unlock(other);
- kfree_skb(skb);
-pipe_err:
- if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+out_pipe:
+ if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
+out_free:
+ consume_skb(skb);
out_err:
scm_destroy(&scm);
return sent ? : err;
@@ -2375,7 +2460,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
@@ -2389,7 +2474,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2587,11 +2672,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
return timeo;
}
-static unsigned int unix_skb_len(const struct sk_buff *skb)
-{
- return skb->len - UNIXCB(skb).consumed;
-}
-
struct unix_stream_read_state {
int (*recv_actor)(struct sk_buff *, int, int,
struct unix_stream_read_state *);
@@ -2606,16 +2686,18 @@ struct unix_stream_read_state {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
static int unix_stream_recv_urg(struct unix_stream_read_state *state)
{
+ struct sk_buff *oob_skb, *read_skb = NULL;
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int chunk = 1;
- struct sk_buff *oob_skb;
mutex_lock(&u->iolock);
unix_state_lock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
mutex_unlock(&u->iolock);
return -EINVAL;
@@ -2623,10 +2705,17 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
oob_skb = u->oob_skb;
- if (!(state->flags & MSG_PEEK))
+ if (!(state->flags & MSG_PEEK)) {
WRITE_ONCE(u->oob_skb, NULL);
- else
- skb_get(oob_skb);
+
+ if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
+ !unix_skb_len(oob_skb->prev)) {
+ read_skb = oob_skb->prev;
+ __skb_unlink(read_skb, &sk->sk_receive_queue);
+ }
+ }
+
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
chunk = state->recv_actor(oob_skb, 0, chunk, state);
@@ -2634,10 +2723,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
if (!(state->flags & MSG_PEEK))
UNIXCB(oob_skb).consumed += 1;
- consume_skb(oob_skb);
-
mutex_unlock(&u->iolock);
+ consume_skb(read_skb);
+
if (chunk < 0)
return -EFAULT;
@@ -2648,42 +2737,100 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
int flags, int copied)
{
+ struct sk_buff *read_skb = NULL, *unread_skb = NULL;
struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
- skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
+ if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
+ return skb;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ if (!unix_skb_len(skb)) {
+ if (copied && (!u->oob_skb || skb == u->oob_skb)) {
+ skb = NULL;
+ } else if (flags & MSG_PEEK) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ } else {
+ read_skb = skb;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ __skb_unlink(read_skb, &sk->sk_receive_queue);
+ }
+
+ if (!skb)
+ goto unlock;
+ }
+
+ if (skb != u->oob_skb)
+ goto unlock;
+
+ if (copied) {
skb = NULL;
- } else {
- if (skb == u->oob_skb) {
- if (copied) {
- skb = NULL;
- } else if (sock_flag(sk, SOCK_URGINLINE)) {
- if (!(flags & MSG_PEEK)) {
- WRITE_ONCE(u->oob_skb, NULL);
- consume_skb(skb);
- }
- } else if (flags & MSG_PEEK) {
- skb = NULL;
- } else {
- skb_unlink(skb, &sk->sk_receive_queue);
- WRITE_ONCE(u->oob_skb, NULL);
- if (!WARN_ON_ONCE(skb_unref(skb)))
- kfree_skb(skb);
- skb = skb_peek(&sk->sk_receive_queue);
- }
+ } else if (!(flags & MSG_PEEK)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+
+ if (!sock_flag(sk, SOCK_URGINLINE)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ unread_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
+
+unlock:
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ consume_skb(read_skb);
+ kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
+
return skb;
}
#endif
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+ struct unix_sock *u = unix_sk(sk);
+ struct sk_buff *skb;
+ int err;
+
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
- return unix_read_skb(sk, recv_actor);
+ mutex_lock(&u->iolock);
+ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
+ mutex_unlock(&u->iolock);
+ if (!skb)
+ return err;
+
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (unlikely(skb == READ_ONCE(u->oob_skb))) {
+ bool drop = false;
+
+ unix_state_lock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD)) {
+ unix_state_unlock(sk);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ return -ECONNRESET;
+ }
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (likely(skb == u->oob_skb)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+ drop = true;
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ unix_state_unlock(sk);
+
+ if (drop) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
+ return -EAGAIN;
+ }
+ }
+#endif
+
+ return recv_actor(sk, skb);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
@@ -2704,7 +2851,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;
- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
@@ -2730,9 +2877,8 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
skip = max(sk_peek_offset(sk, flags), 0);
do {
- int chunk;
- bool drop_skb;
struct sk_buff *skb, *last;
+ int chunk;
redo:
unix_state_lock(sk);
@@ -2806,8 +2952,7 @@ unlock:
/* Never glue messages from different writers */
if (!unix_skb_scm_eq(skb, &scm))
break;
- } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) {
+ } else if (unix_may_passcred(sk)) {
/* Copy credentials */
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
@@ -2828,11 +2973,7 @@ unlock:
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
- skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
- drop_skb = !unix_skb_len(skb);
- /* skb is only safe to use if !drop_skb */
- consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
@@ -2841,18 +2982,6 @@ unlock:
copied += chunk;
size -= chunk;
- if (drop_skb) {
- /* the skb was touched by a concurrent reader;
- * we should not expect anything from this skb
- * anymore and assume it invalid - we can be
- * sure it was dropped from the socket queue
- *
- * let's report a short read
- */
- err = 0;
- break;
- }
-
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
@@ -3035,7 +3164,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;
- if (sk->sk_state == TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
@@ -3120,12 +3249,27 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
case SIOCATMARK:
{
+ struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int answ = 0;
+ mutex_lock(&u->iolock);
+
skb = skb_peek(&sk->sk_receive_queue);
- if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
- answ = 1;
+ if (skb) {
+ struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
+ struct sk_buff *next_skb;
+
+ next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
+
+ if (skb == oob_skb ||
+ (!unix_skb_len(skb) &&
+ (!oob_skb || next_skb == oob_skb)))
+ answ = 1;
+ }
+
+ mutex_unlock(&u->iolock);
+
err = put_user(answ, (int __user *)arg);
}
break;
@@ -3147,12 +3291,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
@@ -3174,14 +3320,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- sk->sk_state == TCP_CLOSE)
+ state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
- if (unix_writable(sk))
+ if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
@@ -3192,12 +3338,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
@@ -3217,19 +3365,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->sk_type == SOCK_SEQPACKET) {
- if (sk->sk_state == TCP_CLOSE)
- mask |= EPOLLHUP;
- /* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
- return mask;
- }
+ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+ mask |= EPOLLHUP;
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
- writable = unix_writable(sk);
+ writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);
@@ -3623,6 +3766,7 @@ static int __net_init unix_net_init(struct net *net)
for (i = 0; i < UNIX_HASH_SIZE; i++) {
spin_lock_init(&net->unx.table.locks[i]);
+ lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
}
diff --git a/net/unix/af_unix.h b/net/unix/af_unix.h
new file mode 100644
index 000000000000..59db179df9bb
--- /dev/null
+++ b/net/unix/af_unix.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __AF_UNIX_H
+#define __AF_UNIX_H
+
+#include <linux/uidgid.h>
+
+#define UNIX_HASH_MOD (256 - 1)
+#define UNIX_HASH_SIZE (256 * 2)
+#define UNIX_HASH_BITS 8
+
+struct sock *unix_peer_get(struct sock *sk);
+
+struct unix_skb_parms {
+ struct pid *pid; /* skb credentials */
+ kuid_t uid;
+ kgid_t gid;
+ struct scm_fp_list *fp; /* Passed files */
+#ifdef CONFIG_SECURITY_NETWORK
+ u32 secid; /* Security ID */
+#endif
+ u32 consumed;
+} __randomize_layout;
+
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
+
+/* GC for SCM_RIGHTS */
+extern unsigned int unix_tot_inflight;
+void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver);
+void unix_del_edges(struct scm_fp_list *fpl);
+void unix_update_edges(struct unix_sock *receiver);
+int unix_prepare_fpl(struct scm_fp_list *fpl);
+void unix_destroy_fpl(struct scm_fp_list *fpl);
+void unix_gc(void);
+void wait_for_unix_gc(struct scm_fp_list *fpl);
+
+/* SOCK_DIAG */
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
+/* sysctl */
+#ifdef CONFIG_SYSCTL
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
+#else
+static inline int unix_sysctl_register(struct net *net)
+{
+ return 0;
+}
+
+static inline void unix_sysctl_unregister(struct net *net)
+{
+}
+#endif
+
+/* BPF SOCKMAP */
+int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags);
+int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags);
+
+#ifdef CONFIG_BPF_SYSCALL
+extern struct proto unix_dgram_proto;
+extern struct proto unix_stream_proto;
+
+int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void __init unix_bpf_build_proto(void);
+#else
+static inline void __init unix_bpf_build_proto(void)
+{
+}
+#endif
+
+#endif
diff --git a/net/unix/diag.c b/net/unix/diag.c
index ae39538c5042..79b182d0e62a 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -1,15 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sock_diag.h>
-#include <linux/unix_diag.h>
-#include <linux/skbuff.h>
+
+#include <linux/dcache.h>
#include <linux/module.h>
-#include <linux/uidgid.h>
-#include <net/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/sock_diag.h>
+#include <linux/types.h>
+#include <linux/user_namespace.h>
#include <net/af_unix.h>
+#include <net/netlink.h>
#include <net/tcp_states.h>
-#include <net/sock.h>
+#include <uapi/linux/unix_diag.h>
+
+#include "af_unix.h"
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
@@ -47,9 +49,7 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
peer = unix_peer_get(sk);
if (peer) {
- unix_state_lock(peer);
ino = sock_i_ino(peer);
- unix_state_unlock(peer);
sock_put(peer);
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;
- if (sk->sk_state == TCP_LISTEN) {
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -75,20 +75,9 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
buf = nla_data(attr);
i = 0;
- skb_queue_walk(&sk->sk_receive_queue, skb) {
- struct sock *req, *peer;
-
- req = skb->sk;
- /*
- * The state lock is outer for the same sk's
- * queue lock. With the other's queue locked it's
- * OK to lock the state.
- */
- unix_state_lock_nested(req, U_LOCK_DIAG);
- peer = unix_sk(req)->peer;
- buf[i++] = (peer ? sock_i_ino(peer) : 0);
- unix_state_unlock(req);
- }
+ skb_queue_walk(&sk->sk_receive_queue, skb)
+ buf[i++] = sock_i_ino(unix_peer(skb->sk));
+
spin_unlock(&sk->sk_receive_queue.lock);
}
@@ -103,8 +92,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
- if (sk->sk_state == TCP_LISTEN) {
- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +125,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
- rep->udiag_state = sk->sk_state;
+ rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +154,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -180,22 +169,6 @@ out_nlmsg_trim:
return -EMSGSIZE;
}
-static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u32 flags)
-{
- int sk_ino;
-
- unix_state_lock(sk);
- sk_ino = sock_i_ino(sk);
- unix_state_unlock(sk);
-
- if (!sk_ino)
- return 0;
-
- return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
-}
-
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -213,14 +186,22 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
num = 0;
spin_lock(&net->unx.table.locks[slot]);
sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ int sk_ino;
+
if (num < s_num)
goto next;
- if (!(req->udiag_states & (1 << sk->sk_state)))
+
+ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
+ goto next;
+
+ sk_ino = sock_i_ino(sk);
+ if (!sk_ino)
goto next;
- if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+
+ if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI) < 0) {
+ NLM_F_MULTI, sk_ino) < 0) {
spin_unlock(&net->unx.table.locks[slot]);
goto done;
}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6433a414acf8..01e2b9452c75 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -63,24 +63,33 @@
* wrt receive and holding up unrelated socket operations.
*/
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/un.h>
-#include <linux/net.h>
#include <linux/fs.h>
+#include <linux/list.h>
#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/file.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-
-#include <net/sock.h>
+#include <linux/socket.h>
+#include <linux/workqueue.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
+#include "af_unix.h"
+
+struct unix_vertex {
+ struct list_head edges;
+ struct list_head entry;
+ struct list_head scc_entry;
+ unsigned long out_degree;
+ unsigned long index;
+ unsigned long scc_index;
+};
+
+struct unix_edge {
+ struct unix_sock *predecessor;
+ struct unix_sock *successor;
+ struct list_head vertex_entry;
+ struct list_head stack_entry;
+};
+
struct unix_sock *unix_get_socket(struct file *filp)
{
struct inode *inode = file_inode(filp);
@@ -101,277 +110,481 @@ struct unix_sock *unix_get_socket(struct file *filp)
return NULL;
}
-DEFINE_SPINLOCK(unix_gc_lock);
+static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
+{
+ /* If an embryo socket has a fd,
+ * the listener indirectly holds the fd's refcnt.
+ */
+ if (edge->successor->listener)
+ return unix_sk(edge->successor->listener)->vertex;
+
+ return edge->successor->vertex;
+}
+
+static bool unix_graph_maybe_cyclic;
+static bool unix_graph_grouped;
+
+static void unix_update_graph(struct unix_vertex *vertex)
+{
+ /* If the receiver socket is not inflight, no cyclic
+ * reference could be formed.
+ */
+ if (!vertex)
+ return;
+
+ unix_graph_maybe_cyclic = true;
+ unix_graph_grouped = false;
+}
+
+static LIST_HEAD(unix_unvisited_vertices);
+
+enum unix_vertex_index {
+ UNIX_VERTEX_INDEX_MARK1,
+ UNIX_VERTEX_INDEX_MARK2,
+ UNIX_VERTEX_INDEX_START,
+};
+
+static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
+
+static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
+{
+ struct unix_vertex *vertex = edge->predecessor->vertex;
+
+ if (!vertex) {
+ vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
+ vertex->index = unix_vertex_unvisited_index;
+ vertex->out_degree = 0;
+ INIT_LIST_HEAD(&vertex->edges);
+ INIT_LIST_HEAD(&vertex->scc_entry);
+
+ list_move_tail(&vertex->entry, &unix_unvisited_vertices);
+ edge->predecessor->vertex = vertex;
+ }
+
+ vertex->out_degree++;
+ list_add_tail(&edge->vertex_entry, &vertex->edges);
+
+ unix_update_graph(unix_edge_successor(edge));
+}
+
+static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
+{
+ struct unix_vertex *vertex = edge->predecessor->vertex;
+
+ if (!fpl->dead)
+ unix_update_graph(unix_edge_successor(edge));
+
+ list_del(&edge->vertex_entry);
+ vertex->out_degree--;
+
+ if (!vertex->out_degree) {
+ edge->predecessor->vertex = NULL;
+ list_move_tail(&vertex->entry, &fpl->vertices);
+ }
+}
+
+static void unix_free_vertices(struct scm_fp_list *fpl)
+{
+ struct unix_vertex *vertex, *next_vertex;
+
+ list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
+ list_del(&vertex->entry);
+ kfree(vertex);
+ }
+}
+
+static DEFINE_SPINLOCK(unix_gc_lock);
unsigned int unix_tot_inflight;
-static LIST_HEAD(gc_candidates);
-static LIST_HEAD(gc_inflight_list);
-/* Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
- */
-void unix_inflight(struct user_struct *user, struct file *filp)
+void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
- struct unix_sock *u = unix_get_socket(filp);
+ int i = 0, j = 0;
spin_lock(&unix_gc_lock);
- if (u) {
- if (!u->inflight) {
- WARN_ON_ONCE(!list_empty(&u->link));
- list_add_tail(&u->link, &gc_inflight_list);
- } else {
- WARN_ON_ONCE(list_empty(&u->link));
- }
- u->inflight++;
+ if (!fpl->count_unix)
+ goto out;
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
- }
+ do {
+ struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
+ struct unix_edge *edge;
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+ if (!inflight)
+ continue;
+
+ edge = fpl->edges + i++;
+ edge->predecessor = inflight;
+ edge->successor = receiver;
+
+ unix_add_edge(fpl, edge);
+ } while (i < fpl->count_unix);
+
+ receiver->scm_stat.nr_unix_fds += fpl->count_unix;
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
+out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
spin_unlock(&unix_gc_lock);
+
+ fpl->inflight = true;
+
+ unix_free_vertices(fpl);
}
-void unix_notinflight(struct user_struct *user, struct file *filp)
+void unix_del_edges(struct scm_fp_list *fpl)
{
- struct unix_sock *u = unix_get_socket(filp);
+ struct unix_sock *receiver;
+ int i = 0;
spin_lock(&unix_gc_lock);
- if (u) {
- WARN_ON_ONCE(!u->inflight);
- WARN_ON_ONCE(list_empty(&u->link));
+ if (!fpl->count_unix)
+ goto out;
- u->inflight--;
- if (!u->inflight)
- list_del_init(&u->link);
+ do {
+ struct unix_edge *edge = fpl->edges + i++;
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
- }
+ unix_del_edge(fpl, edge);
+ } while (i < fpl->count_unix);
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+ if (!fpl->dead) {
+ receiver = fpl->edges[0].successor;
+ receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
+ }
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
+out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
spin_unlock(&unix_gc_lock);
+
+ fpl->inflight = false;
}
-static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
- struct sk_buff_head *hitlist)
+void unix_update_edges(struct unix_sock *receiver)
{
- struct sk_buff *skb;
- struct sk_buff *next;
-
- spin_lock(&x->sk_receive_queue.lock);
- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- /* Do we have file descriptors ? */
- if (UNIXCB(skb).fp) {
- bool hit = false;
- /* Process the descriptors of this socket */
- int nfd = UNIXCB(skb).fp->count;
- struct file **fp = UNIXCB(skb).fp->fp;
-
- while (nfd--) {
- /* Get the socket the fd matches if it indeed does so */
- struct unix_sock *u = unix_get_socket(*fp++);
-
- /* Ignore non-candidates, they could have been added
- * to the queues after starting the garbage collection
- */
- if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
- hit = true;
-
- func(u);
- }
- }
- if (hit && hitlist != NULL) {
- __skb_unlink(skb, &x->sk_receive_queue);
- __skb_queue_tail(hitlist, skb);
- }
- }
+ /* nr_unix_fds is only updated under unix_state_lock().
+ * If it's 0 here, the embryo socket is not part of the
+ * inflight graph, and GC will not see it, so no lock needed.
+ */
+ if (!receiver->scm_stat.nr_unix_fds) {
+ receiver->listener = NULL;
+ } else {
+ spin_lock(&unix_gc_lock);
+ unix_update_graph(unix_sk(receiver->listener)->vertex);
+ receiver->listener = NULL;
+ spin_unlock(&unix_gc_lock);
}
- spin_unlock(&x->sk_receive_queue.lock);
}
-static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
- struct sk_buff_head *hitlist)
+int unix_prepare_fpl(struct scm_fp_list *fpl)
{
- if (x->sk_state != TCP_LISTEN) {
- scan_inflight(x, func, hitlist);
- } else {
- struct sk_buff *skb;
- struct sk_buff *next;
- struct unix_sock *u;
- LIST_HEAD(embryos);
+ struct unix_vertex *vertex;
+ int i;
- /* For a listening socket collect the queued embryos
- * and perform a scan on them as well.
- */
- spin_lock(&x->sk_receive_queue.lock);
- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- u = unix_sk(skb->sk);
+ if (!fpl->count_unix)
+ return 0;
- /* An embryo cannot be in-flight, so it's safe
- * to use the list link.
- */
- WARN_ON_ONCE(!list_empty(&u->link));
- list_add_tail(&u->link, &embryos);
- }
- spin_unlock(&x->sk_receive_queue.lock);
+ for (i = 0; i < fpl->count_unix; i++) {
+ vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
+ if (!vertex)
+ goto err;
- while (!list_empty(&embryos)) {
- u = list_entry(embryos.next, struct unix_sock, link);
- scan_inflight(&u->sk, func, hitlist);
- list_del_init(&u->link);
- }
+ list_add(&vertex->entry, &fpl->vertices);
}
+
+ fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
+ GFP_KERNEL_ACCOUNT);
+ if (!fpl->edges)
+ goto err;
+
+ return 0;
+
+err:
+ unix_free_vertices(fpl);
+ return -ENOMEM;
}
-static void dec_inflight(struct unix_sock *usk)
+void unix_destroy_fpl(struct scm_fp_list *fpl)
{
- usk->inflight--;
+ if (fpl->inflight)
+ unix_del_edges(fpl);
+
+ kvfree(fpl->edges);
+ unix_free_vertices(fpl);
}
-static void inc_inflight(struct unix_sock *usk)
+static bool unix_vertex_dead(struct unix_vertex *vertex)
{
- usk->inflight++;
+ struct unix_edge *edge;
+ struct unix_sock *u;
+ long total_ref;
+
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ struct unix_vertex *next_vertex = unix_edge_successor(edge);
+
+ /* The vertex's fd can be received by a non-inflight socket. */
+ if (!next_vertex)
+ return false;
+
+ /* The vertex's fd can be received by an inflight socket in
+ * another SCC.
+ */
+ if (next_vertex->scc_index != vertex->scc_index)
+ return false;
+ }
+
+ /* No receiver exists out of the same SCC. */
+
+ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
+ u = edge->predecessor;
+ total_ref = file_count(u->sk.sk_socket->file);
+
+ /* If not close()d, total_ref > out_degree. */
+ if (total_ref != vertex->out_degree)
+ return false;
+
+ return true;
}
-static void inc_inflight_move_tail(struct unix_sock *u)
+static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
{
- u->inflight++;
+ struct unix_vertex *vertex;
- /* If this still might be part of a cycle, move it to the end
- * of the list, so that it's checked even if it was already
- * passed over
- */
- if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
- list_move_tail(&u->link, &gc_candidates);
-}
+ list_for_each_entry_reverse(vertex, scc, scc_entry) {
+ struct sk_buff_head *queue;
+ struct unix_edge *edge;
+ struct unix_sock *u;
-static bool gc_in_progress;
+ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
+ u = edge->predecessor;
+ queue = &u->sk.sk_receive_queue;
-static void __unix_gc(struct work_struct *work)
-{
- struct sk_buff_head hitlist;
- struct unix_sock *u, *next;
- LIST_HEAD(not_cycle_list);
- struct list_head cursor;
+ spin_lock(&queue->lock);
- spin_lock(&unix_gc_lock);
+ if (u->sk.sk_state == TCP_LISTEN) {
+ struct sk_buff *skb;
- /* First, select candidates for garbage collection. Only
- * in-flight sockets are considered, and from those only ones
- * which don't have any external reference.
- *
- * Holding unix_gc_lock will protect these candidates from
- * being detached, and hence from gaining an external
- * reference. Since there are no possible receivers, all
- * buffers currently on the candidates' queues stay there
- * during the garbage collection.
- *
- * We also know that no new candidate can be added onto the
- * receive queues. Other, non candidate sockets _can_ be
- * added to queue, so we must make sure only to touch
- * candidates.
- *
- * Embryos, though never candidates themselves, affect which
- * candidates are reachable by the garbage collector. Before
- * being added to a listener's queue, an embryo may already
- * receive data carrying SCM_RIGHTS, potentially making the
- * passed socket a candidate that is not yet reachable by the
- * collector. It becomes reachable once the embryo is
- * enqueued. Therefore, we must ensure that no SCM-laden
- * embryo appears in a (candidate) listener's queue between
- * consecutive scan_children() calls.
- */
- list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
- struct sock *sk = &u->sk;
- long total_refs;
-
- total_refs = file_count(sk->sk_socket->file);
-
- WARN_ON_ONCE(!u->inflight);
- WARN_ON_ONCE(total_refs < u->inflight);
- if (total_refs == u->inflight) {
- list_move_tail(&u->link, &gc_candidates);
- __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
- __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
-
- if (sk->sk_state == TCP_LISTEN) {
- unix_state_lock(sk);
- unix_state_unlock(sk);
+ skb_queue_walk(queue, skb) {
+ struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
+
+ spin_lock(&embryo_queue->lock);
+ skb_queue_splice_init(embryo_queue, hitlist);
+ spin_unlock(&embryo_queue->lock);
}
+ } else {
+ skb_queue_splice_init(queue, hitlist);
}
+
+ spin_unlock(&queue->lock);
}
+}
- /* Now remove all internal in-flight reference to children of
- * the candidates.
- */
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, dec_inflight, NULL);
+static bool unix_scc_cyclic(struct list_head *scc)
+{
+ struct unix_vertex *vertex;
+ struct unix_edge *edge;
- /* Restore the references for children of all candidates,
- * which have remaining references. Do this recursively, so
- * only those remain, which form cyclic references.
- *
- * Use a "cursor" link, to make the list traversal safe, even
- * though elements might be moved about.
+ /* SCC containing multiple vertices ? */
+ if (!list_is_singular(scc))
+ return true;
+
+ vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
+
+ /* Self-reference or a embryo-listener circle ? */
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ if (unix_edge_successor(edge) == vertex)
+ return true;
+ }
+
+ return false;
+}
+
+static LIST_HEAD(unix_visited_vertices);
+static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
+
+static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
+ struct sk_buff_head *hitlist)
+{
+ LIST_HEAD(vertex_stack);
+ struct unix_edge *edge;
+ LIST_HEAD(edge_stack);
+
+next_vertex:
+ /* Push vertex to vertex_stack and mark it as on-stack
+ * (index >= UNIX_VERTEX_INDEX_START).
+ * The vertex will be popped when finalising SCC later.
*/
- list_add(&cursor, &gc_candidates);
- while (cursor.next != &gc_candidates) {
- u = list_entry(cursor.next, struct unix_sock, link);
+ list_add(&vertex->scc_entry, &vertex_stack);
+
+ vertex->index = *last_index;
+ vertex->scc_index = *last_index;
+ (*last_index)++;
+
+ /* Explore neighbour vertices (receivers of the current vertex's fd). */
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ struct unix_vertex *next_vertex = unix_edge_successor(edge);
+
+ if (!next_vertex)
+ continue;
+
+ if (next_vertex->index == unix_vertex_unvisited_index) {
+ /* Iterative deepening depth first search
+ *
+ * 1. Push a forward edge to edge_stack and set
+ * the successor to vertex for the next iteration.
+ */
+ list_add(&edge->stack_entry, &edge_stack);
+
+ vertex = next_vertex;
+ goto next_vertex;
- /* Move cursor to after the current position. */
- list_move(&cursor, &u->link);
+ /* 2. Pop the edge directed to the current vertex
+ * and restore the ancestor for backtracking.
+ */
+prev_vertex:
+ edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
+ list_del_init(&edge->stack_entry);
- if (u->inflight) {
- list_move_tail(&u->link, &not_cycle_list);
- __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
- scan_children(&u->sk, inc_inflight_move_tail, NULL);
+ next_vertex = vertex;
+ vertex = edge->predecessor->vertex;
+
+ /* If the successor has a smaller scc_index, two vertices
+ * are in the same SCC, so propagate the smaller scc_index
+ * to skip SCC finalisation.
+ */
+ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
+ } else if (next_vertex->index != unix_vertex_grouped_index) {
+ /* Loop detected by a back/cross edge.
+ *
+ * The successor is on vertex_stack, so two vertices are in
+ * the same SCC. If the successor has a smaller *scc_index*,
+ * propagate it to skip SCC finalisation.
+ */
+ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
+ } else {
+ /* The successor was already grouped as another SCC */
}
}
- list_del(&cursor);
- /* Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link) {
- scan_children(&u->sk, inc_inflight, &hitlist);
-
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
+ if (vertex->index == vertex->scc_index) {
+ struct unix_vertex *v;
+ struct list_head scc;
+ bool scc_dead = true;
+
+ /* SCC finalised.
+ *
+ * If the scc_index was not updated, all the vertices above on
+ * vertex_stack are in the same SCC. Group them using scc_entry.
+ */
+ __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
+
+ list_for_each_entry_reverse(v, &scc, scc_entry) {
+ /* Don't restart DFS from this vertex in unix_walk_scc(). */
+ list_move_tail(&v->entry, &unix_visited_vertices);
+
+ /* Mark vertex as off-stack. */
+ v->index = unix_vertex_grouped_index;
+
+ if (scc_dead)
+ scc_dead = unix_vertex_dead(v);
}
-#endif
+
+ if (scc_dead)
+ unix_collect_skb(&scc, hitlist);
+ else if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
+ list_del(&scc);
}
- /* not_cycle_list contains those sockets which do not make up a
- * cycle. Restore these to the inflight list.
+ /* Need backtracking ? */
+ if (!list_empty(&edge_stack))
+ goto prev_vertex;
+}
+
+static void unix_walk_scc(struct sk_buff_head *hitlist)
+{
+ unsigned long last_index = UNIX_VERTEX_INDEX_START;
+
+ unix_graph_maybe_cyclic = false;
+
+ /* Visit every vertex exactly once.
+ * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
*/
- while (!list_empty(&not_cycle_list)) {
- u = list_entry(not_cycle_list.next, struct unix_sock, link);
- __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
- list_move_tail(&u->link, &gc_inflight_list);
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ __unix_walk_scc(vertex, &last_index, hitlist);
}
- spin_unlock(&unix_gc_lock);
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+ swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
+
+ unix_graph_grouped = true;
+}
+
+static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+{
+ unix_graph_maybe_cyclic = false;
+
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+ struct list_head scc;
+ bool scc_dead = true;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ list_add(&scc, &vertex->scc_entry);
- /* Here we are. Hitlist is filled. Die. */
- __skb_queue_purge(&hitlist);
+ list_for_each_entry_reverse(vertex, &scc, scc_entry) {
+ list_move_tail(&vertex->entry, &unix_visited_vertices);
+
+ if (scc_dead)
+ scc_dead = unix_vertex_dead(vertex);
+ }
+
+ if (scc_dead)
+ unix_collect_skb(&scc, hitlist);
+ else if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
+ list_del(&scc);
+ }
+
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+}
+
+static bool gc_in_progress;
+
+static void __unix_gc(struct work_struct *work)
+{
+ struct sk_buff_head hitlist;
+ struct sk_buff *skb;
spin_lock(&unix_gc_lock);
- /* All candidates should have been detached by now. */
- WARN_ON_ONCE(!list_empty(&gc_candidates));
+ if (!unix_graph_maybe_cyclic) {
+ spin_unlock(&unix_gc_lock);
+ goto skip_gc;
+ }
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, false);
+ __skb_queue_head_init(&hitlist);
+
+ if (unix_graph_grouped)
+ unix_walk_scc_fast(&hitlist);
+ else
+ unix_walk_scc(&hitlist);
spin_unlock(&unix_gc_lock);
+
+ skb_queue_walk(&hitlist, skb) {
+ if (UNIXCB(skb).fp)
+ UNIXCB(skb).fp->dead = true;
+ }
+
+ __skb_queue_purge_reason(&hitlist, SKB_DROP_REASON_SOCKET_CLOSE);
+skip_gc:
+ WRITE_ONCE(gc_in_progress, false);
}
static DECLARE_WORK(unix_gc_work, __unix_gc);
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 3e84b31c355a..e02ed6e3955c 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -5,11 +5,13 @@
* Authors: Mike Shaver.
*/
-#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/sysctl.h>
-
#include <net/af_unix.h>
+#include <net/net_namespace.h>
+
+#include "af_unix.h"
static struct ctl_table unix_table[] = {
{
@@ -19,7 +21,6 @@ static struct ctl_table unix_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
- { }
};
int __net_init unix_sysctl_register(struct net *net)
@@ -52,7 +53,7 @@ err_alloc:
void unix_sysctl_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->unx.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->unx.ctl);
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index bd84785bf8d6..e0d30d6d22ac 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Cong Wang <cong.wang@bytedance.com> */
-#include <linux/skmsg.h>
#include <linux/bpf.h>
-#include <net/sock.h>
+#include <linux/skmsg.h>
#include <net/af_unix.h>
+#include "af_unix.h"
+
#define unix_sk_has_data(__sk, __psock) \
({ !skb_queue_empty(&__sk->sk_receive_queue) || \
!skb_queue_empty(&__psock->ingress_skb) || \
@@ -54,6 +55,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
struct sk_psock *psock;
int copied;
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
if (!len)
return 0;